KingNish commited on
Commit
c2d1d44
1 Parent(s): 4936b31

Update chatbot.py

Browse files
Files changed (1) hide show
  1. chatbot.py +43 -42
chatbot.py CHANGED
@@ -158,50 +158,51 @@ client_llama = InferenceClient("meta-llama/Meta-Llama-3-8B-Instruct")
158
  client_mistral_nemo = InferenceClient("mistralai/Mistral-Nemo-Instruct-2407")
159
 
160
  def model_inference(user_prompt, chat_history):
 
161
  @spaces.GPU(duration=60, queue=False)
162
  def qwen_inference(user_prompt, chat_history):
163
- images = []
164
- text_input = user_prompt["text"]
165
-
166
- # Handle multiple image uploads
167
- if user_prompt["files"]:
168
- images.extend(user_prompt["files"])
169
- else:
170
- for hist in chat_history:
171
- if type(hist[0]) == tuple:
172
- images.extend(hist[0])
173
-
174
- # System Prompt (Similar to LLaVA)
175
- SYSTEM_PROMPT = "You are OpenGPT 4o, an exceptionally capable and versatile AI assistant made by KingNish. Your task is to fulfill users query in best possible way. You are provided with image, videos and 3d structures as input with question your task is to give best possible detailed results to user according to their query. Reply the question asked by user properly and best possible way."
176
-
177
- messages = [{"role": "system", "content": SYSTEM_PROMPT}]
178
-
179
- for image in images:
180
- if image.endswith(video_extensions):
181
- messages.append({
182
- "role": "user",
183
- "content": [
184
- {"type": "video", "video": image},
185
- ]
186
- })
187
-
188
- if image.endswith(tuple([i for i, f in image_extensions.items()])):
189
- messages.append({
190
- "role": "user",
191
- "content": [
192
- {"type": "image", "image": image},
193
- ]
194
- })
195
-
196
- # Add user text input
197
- messages.append({
198
- "role": "user",
199
- "content": [
200
- {"type": "text", "text": text_input}
201
- ]
202
- })
203
-
204
- return messages
205
 
206
  if user_prompt["files"]:
207
  messages = qwen_inference(user_prompt, chat_history)
 
158
  client_mistral_nemo = InferenceClient("mistralai/Mistral-Nemo-Instruct-2407")
159
 
160
  def model_inference(user_prompt, chat_history):
161
+
162
  @spaces.GPU(duration=60, queue=False)
163
  def qwen_inference(user_prompt, chat_history):
164
+ images = []
165
+ text_input = user_prompt["text"]
166
+
167
+ # Handle multiple image uploads
168
+ if user_prompt["files"]:
169
+ images.extend(user_prompt["files"])
170
+ else:
171
+ for hist in chat_history:
172
+ if type(hist[0]) == tuple:
173
+ images.extend(hist[0])
174
+
175
+ # System Prompt (Similar to LLaVA)
176
+ SYSTEM_PROMPT = "You are OpenGPT 4o, an exceptionally capable and versatile AI assistant made by KingNish. Your task is to fulfill users query in best possible way. You are provided with image, videos and 3d structures as input with question your task is to give best possible detailed results to user according to their query. Reply the question asked by user properly and best possible way."
177
+
178
+ messages = [{"role": "system", "content": SYSTEM_PROMPT}]
179
+
180
+ for image in images:
181
+ if image.endswith(video_extensions):
182
+ messages.append({
183
+ "role": "user",
184
+ "content": [
185
+ {"type": "video", "video": image},
186
+ ]
187
+ })
188
+
189
+ if image.endswith(tuple([i for i, f in image_extensions.items()])):
190
+ messages.append({
191
+ "role": "user",
192
+ "content": [
193
+ {"type": "image", "image": image},
194
+ ]
195
+ })
196
+
197
+ # Add user text input
198
+ messages.append({
199
+ "role": "user",
200
+ "content": [
201
+ {"type": "text", "text": text_input}
202
+ ]
203
+ })
204
+
205
+ return messages
206
 
207
  if user_prompt["files"]:
208
  messages = qwen_inference(user_prompt, chat_history)