Spaces:
Sleeping
Sleeping
shorder prompt, removed history
Browse files
a.py
CHANGED
@@ -2,7 +2,7 @@ from gradio_client import Client
|
|
2 |
|
3 |
client = Client("wiklif/mixtral-api")
|
4 |
result = client.predict(
|
5 |
-
prompt="Jak masz na
|
6 |
api_name="/chat"
|
7 |
)
|
8 |
print(result)
|
|
|
2 |
|
3 |
client = Client("wiklif/mixtral-api")
|
4 |
result = client.predict(
|
5 |
+
prompt="Jak masz na imię?",
|
6 |
api_name="/chat"
|
7 |
)
|
8 |
print(result)
|
app.py
CHANGED
@@ -12,14 +12,11 @@ def generate_response(chat, kwargs):
|
|
12 |
output += response.token.text
|
13 |
return output
|
14 |
|
15 |
-
def function(prompt
|
16 |
-
chat = "<s>"
|
17 |
-
for user_prompt, bot_response in history:
|
18 |
-
chat += f"[INST] {user_prompt} [/INST] {bot_response}</s> "
|
19 |
-
chat += f"[INST] {prompt} [/INST]"
|
20 |
kwargs = dict(
|
21 |
-
temperature=0.
|
22 |
-
max_new_tokens=
|
23 |
top_p=0.95,
|
24 |
repetition_penalty=1.0,
|
25 |
do_sample=True, # Upewnij się, że używasz próbkowania
|
@@ -28,9 +25,9 @@ def function(prompt, history):
|
|
28 |
|
29 |
try:
|
30 |
output = generate_response(chat, kwargs)
|
31 |
-
|
32 |
except:
|
33 |
-
|
34 |
|
35 |
interface = gr.ChatInterface(
|
36 |
fn=function,
|
|
|
12 |
output += response.token.text
|
13 |
return output
|
14 |
|
15 |
+
def function(prompt):
|
16 |
+
chat = f"<s>[INST] {prompt} [/INST]</s>"
|
|
|
|
|
|
|
17 |
kwargs = dict(
|
18 |
+
temperature=0.5,
|
19 |
+
max_new_tokens=4096,
|
20 |
top_p=0.95,
|
21 |
repetition_penalty=1.0,
|
22 |
do_sample=True, # Upewnij się, że używasz próbkowania
|
|
|
25 |
|
26 |
try:
|
27 |
output = generate_response(chat, kwargs)
|
28 |
+
return output
|
29 |
except:
|
30 |
+
return ''
|
31 |
|
32 |
interface = gr.ChatInterface(
|
33 |
fn=function,
|