Spaces:
Running
on
Zero
Running
on
Zero
Update chatbot.py
Browse files- chatbot.py +43 -42
chatbot.py
CHANGED
@@ -158,50 +158,51 @@ client_llama = InferenceClient("meta-llama/Meta-Llama-3-8B-Instruct")
|
|
158 |
client_mistral_nemo = InferenceClient("mistralai/Mistral-Nemo-Instruct-2407")
|
159 |
|
160 |
def model_inference(user_prompt, chat_history):
|
|
|
161 |
@spaces.GPU(duration=60, queue=False)
|
162 |
def qwen_inference(user_prompt, chat_history):
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
|
206 |
if user_prompt["files"]:
|
207 |
messages = qwen_inference(user_prompt, chat_history)
|
|
|
158 |
client_mistral_nemo = InferenceClient("mistralai/Mistral-Nemo-Instruct-2407")
|
159 |
|
160 |
def model_inference(user_prompt, chat_history):
|
161 |
+
|
162 |
@spaces.GPU(duration=60, queue=False)
|
163 |
def qwen_inference(user_prompt, chat_history):
|
164 |
+
images = []
|
165 |
+
text_input = user_prompt["text"]
|
166 |
+
|
167 |
+
# Handle multiple image uploads
|
168 |
+
if user_prompt["files"]:
|
169 |
+
images.extend(user_prompt["files"])
|
170 |
+
else:
|
171 |
+
for hist in chat_history:
|
172 |
+
if type(hist[0]) == tuple:
|
173 |
+
images.extend(hist[0])
|
174 |
+
|
175 |
+
# System Prompt (Similar to LLaVA)
|
176 |
+
SYSTEM_PROMPT = "You are OpenGPT 4o, an exceptionally capable and versatile AI assistant made by KingNish. Your task is to fulfill users query in best possible way. You are provided with image, videos and 3d structures as input with question your task is to give best possible detailed results to user according to their query. Reply the question asked by user properly and best possible way."
|
177 |
+
|
178 |
+
messages = [{"role": "system", "content": SYSTEM_PROMPT}]
|
179 |
+
|
180 |
+
for image in images:
|
181 |
+
if image.endswith(video_extensions):
|
182 |
+
messages.append({
|
183 |
+
"role": "user",
|
184 |
+
"content": [
|
185 |
+
{"type": "video", "video": image},
|
186 |
+
]
|
187 |
+
})
|
188 |
+
|
189 |
+
if image.endswith(tuple([i for i, f in image_extensions.items()])):
|
190 |
+
messages.append({
|
191 |
+
"role": "user",
|
192 |
+
"content": [
|
193 |
+
{"type": "image", "image": image},
|
194 |
+
]
|
195 |
+
})
|
196 |
+
|
197 |
+
# Add user text input
|
198 |
+
messages.append({
|
199 |
+
"role": "user",
|
200 |
+
"content": [
|
201 |
+
{"type": "text", "text": text_input}
|
202 |
+
]
|
203 |
+
})
|
204 |
+
|
205 |
+
return messages
|
206 |
|
207 |
if user_prompt["files"]:
|
208 |
messages = qwen_inference(user_prompt, chat_history)
|