ponytail commited on
Commit
effc2f1
1 Parent(s): de86215

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +97 -0
app.py ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import spaces
3
+ from transformers import AutoProcessor, LlavaForConditionalGeneration
4
+ # from qwen_vl_utils import process_vision_info
5
+ import torch
6
+ from PIL import Image
7
+ import subprocess
8
+ from datetime import datetime
9
+ import numpy as np
10
+ import os
11
+
12
+ os.environ["no_proxy"] = "localhost,127.0.0.1,::1"
13
+
14
+
15
+ def array_to_image_path(image_array):
16
+ # Convert numpy array to PIL Image
17
+ img = Image.fromarray(np.uint8(image_array))
18
+
19
+ # Generate a unique filename using timestamp
20
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
21
+ filename = f"image_{timestamp}.png"
22
+
23
+ # Save the image
24
+ img.save(filename)
25
+
26
+ # Get the full path of the saved image
27
+ full_path = os.path.abspath(filename)
28
+
29
+ return full_path
30
+
31
+
32
+ cuda = 1
33
+ model_id = "huangfx1020/human_llama3_8b"
34
+ models = {
35
+ "HumanLlaVA-8B": LlavaForConditionalGeneration.from_pretrained("huangfx1020/human_llama3_8b", torch_dtype=torch.float16, low_cpu_mem_usage=True ).to(cuda).eval()
36
+ }
37
+
38
+ # processors = {
39
+ # "Qwen/Qwen2-VL-2B-Instruct": AutoProcessor.from_pretrained("Qwen/Qwen2-VL-2B-Instruct", trust_remote_code=True)
40
+ # }
41
+ processors = {
42
+ "HumanLlaVA-8B": AutoProcessor.from_pretrained("huangfx1020/human_llama3_8b")
43
+ }
44
+ DESCRIPTION = "[HumanLlaVA Demo](https://huggingface.co/Qwen/Qwen2-VL-2B-Instruct)"
45
+
46
+ kwargs = {}
47
+ kwargs['torch_dtype'] = torch.bfloat16
48
+
49
+
50
+ # @spaces.GPU
51
+ def run_example(image, text_input=None, model_id="HumanLlaVA-8B"):
52
+ image_path = array_to_image_path(image)
53
+
54
+ print(image_path)
55
+ model = models[model_id]
56
+ processor = processors[model_id]
57
+ raw_image = Image.open(image_path)
58
+ inputs = processor(images=raw_image, text=prompt, return_tensors='pt').to(cuda, torch.float16)
59
+
60
+ # generated_ids = model.generate(**inputs, max_new_tokens=128)
61
+ # generated_ids_trimmed = [
62
+ # out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
63
+ # ]
64
+ # output_text = processor.batch_decode(
65
+ # generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
66
+ # )
67
+ output = model.generate(**inputs, max_new_tokens=400, do_sample=False)
68
+ print(output)
69
+ predict = processor.decode(output[0][:], skip_special_tokens=False)
70
+ print(predict)
71
+
72
+ return predict
73
+
74
+ css = """
75
+ #output {
76
+ height: 500px;
77
+ overflow: auto;
78
+ border: 1px solid #ccc;
79
+ }
80
+ """
81
+
82
+ with gr.Blocks(css=css) as demo:
83
+ gr.Markdown(DESCRIPTION)
84
+ with gr.Tab(label="HumanLlaVA-8B Input"):
85
+ with gr.Row():
86
+ with gr.Column():
87
+ input_img = gr.Image(label="Input Picture")
88
+ model_selector = gr.Dropdown(choices=list(models.keys()), label="Model", value="HumanLlaVA-8B")
89
+ text_input = gr.Textbox(label="Question")
90
+ submit_btn = gr.Button(value="Submit")
91
+ with gr.Column():
92
+ output_text = gr.Textbox(label="Output Text")
93
+
94
+ submit_btn.click(run_example, [input_img, text_input, model_selector], [output_text])
95
+
96
+ demo.queue(api_open=False)
97
+ demo.launch(debug=True)