Spaces:
Sleeping
Sleeping
some test
Browse files
a.py
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from gradio_client import Client
|
2 |
+
|
3 |
+
|
4 |
+
client = Client("wiklif/mixtral-api")
|
5 |
+
result = client.predict(
|
6 |
+
name="Jak się dziś masz?",
|
7 |
+
api_name="/predict"
|
8 |
+
)
|
9 |
+
print(result)
|
app.py
CHANGED
@@ -4,14 +4,16 @@ import gradio as gr
|
|
4 |
client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
|
5 |
|
6 |
def format_prompt(message, history):
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
def generate(
|
|
|
|
|
15 |
temperature = float(temperature)
|
16 |
if temperature < 1e-2:
|
17 |
temperature = 1e-2
|
@@ -36,24 +38,15 @@ def generate(prompt, history, temperature=0.2, max_new_tokens=256, top_p=0.95, r
|
|
36 |
yield output
|
37 |
return output
|
38 |
|
39 |
-
# Definicja API Gradio
|
40 |
-
def predict(prompt, temperature=0.2, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0):
|
41 |
-
history = [] # lub inny sposób na przechowywanie historii rozmowy
|
42 |
-
output = generate(prompt, history, temperature, max_new_tokens, top_p, repetition_penalty)
|
43 |
-
return output
|
44 |
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
)
|
57 |
-
|
58 |
-
if __name__ == "__main__":
|
59 |
-
demo.launch(server_name="0.0.0.0", server_port=7860)
|
|
|
4 |
client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
|
5 |
|
6 |
def format_prompt(message, history):
|
7 |
+
prompt = "<s>"
|
8 |
+
for user_prompt, bot_response in history:
|
9 |
+
prompt += f"[INST] {user_prompt} [/INST]"
|
10 |
+
prompt += f" {bot_response}</s> "
|
11 |
+
prompt += f"[INST] {message} [/INST]"
|
12 |
+
return prompt
|
13 |
+
|
14 |
+
def generate(
|
15 |
+
prompt, history, temperature=0, max_new_tokens=3500, top_p=0.95, repetition_penalty=1.0,
|
16 |
+
):
|
17 |
temperature = float(temperature)
|
18 |
if temperature < 1e-2:
|
19 |
temperature = 1e-2
|
|
|
38 |
yield output
|
39 |
return output
|
40 |
|
|
|
|
|
|
|
|
|
|
|
41 |
|
42 |
+
mychatbot = gr.Chatbot(
|
43 |
+
avatar_images=["./user.png", "./botm.png"], bubble_full_width=False, show_label=False, show_copy_button=True, likeable=True,)
|
44 |
+
|
45 |
+
demo = gr.ChatInterface(fn=generate,
|
46 |
+
chatbot=mychatbot,
|
47 |
+
title="Tomoniai's Mixtral 8x7b Chat",
|
48 |
+
retry_btn=None,
|
49 |
+
undo_btn=None
|
50 |
+
)
|
51 |
+
|
52 |
+
demo.queue().launch(show_api=True)
|
|
|
|
|
|
|
|