Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -21,27 +21,32 @@ model = LlavaForConditionalGeneration.from_pretrained(model_id, low_cpu_mem_usag
|
|
21 |
model.to("cpu")
|
22 |
|
23 |
|
24 |
-
def llava(message):
|
25 |
if message["files"]:
|
26 |
image = user_prompt["files"][0]
|
27 |
-
|
|
|
|
|
|
|
|
|
|
|
28 |
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
|
38 |
-
|
39 |
-
|
40 |
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
|
46 |
def extract_text_from_webpage(html_content):
|
47 |
soup = BeautifulSoup(html_content, 'html.parser')
|
@@ -92,25 +97,7 @@ def respond(message, history):
|
|
92 |
user_prompt = message
|
93 |
# Handle image processing
|
94 |
if message["files"]:
|
95 |
-
|
96 |
-
txt = user_prompt["text"]
|
97 |
-
|
98 |
-
gr.Info("Analyzing image")
|
99 |
-
image = Image.open(image).convert("RGB")
|
100 |
-
prompt = f"<|im_start|>user <image>\n{user_prompt}<|im_end|><|im_start|>assistant"
|
101 |
-
|
102 |
-
inputs = processor(prompt, image, return_tensors="pt")
|
103 |
-
streamer = TextIteratorStreamer(processor, skip_prompt=True, **{"skip_special_tokens": True})
|
104 |
-
generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=1024)
|
105 |
-
generated_text = ""
|
106 |
-
|
107 |
-
thread = Thread(target=model.generate, kwargs=generation_kwargs)
|
108 |
-
thread.start()
|
109 |
-
|
110 |
-
buffer = ""
|
111 |
-
for new_text in streamer:
|
112 |
-
buffer += new_text
|
113 |
-
yield buffer
|
114 |
else:
|
115 |
functions_metadata = [
|
116 |
{"type": "function", "function": {"name": "web_search", "description": "Search query on google", "parameters": {"type": "object", "properties": {"query": {"type": "string", "description": "web search query"}}, "required": ["query"]}}},
|
@@ -165,6 +152,7 @@ def respond(message, history):
|
|
165 |
gr.Info("We are going to Update Our Image Generation Engine to more powerful ones in Next Update. ThankYou")
|
166 |
elif json_data["name"] == "image_qna":
|
167 |
messages = f"<|start_header_id|>system\nYou are OpenGPT 4o mini a helpful assistant made by KingNish. You are provide with both images and captions and Your task is to answer of user with help of caption provided. Answer in human style and show emotions.<|end_header_id|>"
|
|
|
168 |
else:
|
169 |
messages = f"<|start_header_id|>system\nYou are OpenGPT 4o mini a helpful assistant made by KingNish. You answers users query like human friend. You are also Expert in every field and also learn and try to answer from contexts related to previous question. Try your best to give best response possible to user. You also try to show emotions using Emojis and reply like human, use short forms, friendly tone and emotions.<|end_header_id|>"
|
170 |
for msg in history:
|
|
|
21 |
model.to("cpu")
|
22 |
|
23 |
|
24 |
+
def llava(message, history):
|
25 |
if message["files"]:
|
26 |
image = user_prompt["files"][0]
|
27 |
+
else:
|
28 |
+
for hist in history:
|
29 |
+
if type(hist[0])==tuple:
|
30 |
+
image = hist[0][0]
|
31 |
+
|
32 |
+
txt = user_prompt["text"]
|
33 |
|
34 |
+
gr.Info("Analyzing image")
|
35 |
+
image = Image.open(image).convert("RGB")
|
36 |
+
prompt = f"<|im_start|>user <image>\n{user_prompt}<|im_end|><|im_start|>assistant"
|
37 |
|
38 |
+
inputs = processor(prompt, image, return_tensors="pt")
|
39 |
+
streamer = TextIteratorStreamer(processor, skip_prompt=True, **{"skip_special_tokens": True})
|
40 |
+
generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=1024)
|
41 |
+
generated_text = ""
|
42 |
|
43 |
+
thread = Thread(target=model.generate, kwargs=generation_kwargs)
|
44 |
+
thread.start()
|
45 |
|
46 |
+
buffer = ""
|
47 |
+
for new_text in streamer:
|
48 |
+
buffer += new_text
|
49 |
+
yield buffer
|
50 |
|
51 |
def extract_text_from_webpage(html_content):
|
52 |
soup = BeautifulSoup(html_content, 'html.parser')
|
|
|
97 |
user_prompt = message
|
98 |
# Handle image processing
|
99 |
if message["files"]:
|
100 |
+
yield llava(message, history)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
101 |
else:
|
102 |
functions_metadata = [
|
103 |
{"type": "function", "function": {"name": "web_search", "description": "Search query on google", "parameters": {"type": "object", "properties": {"query": {"type": "string", "description": "web search query"}}, "required": ["query"]}}},
|
|
|
152 |
gr.Info("We are going to Update Our Image Generation Engine to more powerful ones in Next Update. ThankYou")
|
153 |
elif json_data["name"] == "image_qna":
|
154 |
messages = f"<|start_header_id|>system\nYou are OpenGPT 4o mini a helpful assistant made by KingNish. You are provide with both images and captions and Your task is to answer of user with help of caption provided. Answer in human style and show emotions.<|end_header_id|>"
|
155 |
+
yield llava(message, history)
|
156 |
else:
|
157 |
messages = f"<|start_header_id|>system\nYou are OpenGPT 4o mini a helpful assistant made by KingNish. You answers users query like human friend. You are also Expert in every field and also learn and try to answer from contexts related to previous question. Try your best to give best response possible to user. You also try to show emotions using Emojis and reply like human, use short forms, friendly tone and emotions.<|end_header_id|>"
|
158 |
for msg in history:
|