Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -83,9 +83,7 @@ def search(query):
|
|
83 |
client_gemma = InferenceClient("mistralai/Mistral-7B-Instruct-v0.3")
|
84 |
client_mixtral = InferenceClient("NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO")
|
85 |
client_llama = InferenceClient("meta-llama/Meta-Llama-3-8B-Instruct")
|
86 |
-
|
87 |
-
|
88 |
-
func_caller = []
|
89 |
|
90 |
# Define the main chat function
|
91 |
def respond(message, history):
|
@@ -176,15 +174,15 @@ def respond(message, history):
|
|
176 |
buffer += new_text
|
177 |
yield buffer
|
178 |
else:
|
179 |
-
messages = f"<|
|
180 |
for msg in history:
|
181 |
-
messages += f"\n<|
|
182 |
-
messages += f"\n<|
|
183 |
-
messages+=f"\n<|
|
184 |
-
stream =
|
185 |
output = ""
|
186 |
for response in stream:
|
187 |
-
if not response.token.text == "<|
|
188 |
output += response.token.text
|
189 |
yield output
|
190 |
except:
|
|
|
83 |
client_gemma = InferenceClient("mistralai/Mistral-7B-Instruct-v0.3")
|
84 |
client_mixtral = InferenceClient("NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO")
|
85 |
client_llama = InferenceClient("meta-llama/Meta-Llama-3-8B-Instruct")
|
86 |
+
client_yi = InferenceClient("01-ai/Yi-1.5-34B-Chat")
|
|
|
|
|
87 |
|
88 |
# Define the main chat function
|
89 |
def respond(message, history):
|
|
|
174 |
buffer += new_text
|
175 |
yield buffer
|
176 |
else:
|
177 |
+
messages = f"<|im_start|>system\nYou are OpenCHAT mini a helpful assistant made by KingNish. You answers users query like human friend. You are also Expert in every field and also learn and try to answer from contexts related to previous question. Try your best to give best response possible to user. You also try to show emotions using Emojis and reply like human, use short forms, friendly tone and emotions.<|im_end|>"
|
178 |
for msg in history:
|
179 |
+
messages += f"\n<|im_start|>user\n{str(msg[0])}<|im_end|>"
|
180 |
+
messages += f"\n<|im_start|>assistant\n{str(msg[1])}<|im_end|>"
|
181 |
+
messages+=f"\n<|im_start|>user\n{message_text}<|im_end|>\n<|im_start|>assistant\n"
|
182 |
+
stream = client_yi.text_generation(messages, max_new_tokens=2000, do_sample=True, stream=True, details=True, return_full_text=False)
|
183 |
output = ""
|
184 |
for response in stream:
|
185 |
+
if not response.token.text == "<|endoftext|>":
|
186 |
output += response.token.text
|
187 |
yield output
|
188 |
except:
|