pechaut commited on
Commit
dffa7a8
1 Parent(s): e347403

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +122 -2
app.py CHANGED
@@ -1,6 +1,126 @@
 
 
 
1
  import gradio as gr
 
2
 
3
 
4
- demo = gr.load("StarkWizard/Mistral-7b-instruct-cairo-instruct-AWQ", src="models")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
- demo.launch()
 
1
+ from ctransformers import AutoModelForCausalLM,AutoConfig,AutoTokenizer
2
+ from transformers import TextIteratorStreamer
3
+ import torch
4
  import gradio as gr
5
+ from threading import Thread
6
 
7
 
8
+ hub_name = "StarkWizard/Mistral-7b-instruct-cairo-instruct-GGUF"
9
+ model_file = "Mistral-7b-instruct-cairo-instruct.Q4_k.gguf"
10
+ DEVICE,hw,layers = ("cpu",True,0) if torch.cuda.is_available() else ("cpu",False,0)
11
+
12
+
13
+
14
+
15
+
16
+ print("loading LLM")
17
+ # Load model
18
+ config = AutoConfig.from_pretrained("TheBloke/Mistral-7B-v0.1-GGUF")
19
+
20
+ config.max_seq_len = 4096
21
+ config.max_answer_len= 1024
22
+
23
+ model = AutoModelForCausalLM.from_pretrained(hub_name, model_file=model_file, model_type="mistral", gpu_layers=layers,
24
+ config=config,
25
+ compress_pos_emb=2,
26
+ top_k=4000,
27
+ top_p=0.99,
28
+ temperature=0.0001,
29
+ do_sample=True,
30
+
31
+
32
+ )
33
+
34
+
35
+ def fmt_history(history) -> str:
36
+
37
+ return "\n".join(["User: \"{usr_query}\", Assistant: \"{your_resp}\"".format(
38
+ usr_query=usr_query.replace("\n",""), your_resp=your_resp.format("\n",""))
39
+ for usr_query, your_resp in history])
40
+
41
+
42
+
43
+ def run_generation(user_text, top_p, temperature, top_k, max_new_tokens):
44
+
45
+ text =f"""
46
+ [INST]
47
+ <<SYS>>
48
+ A student asks you a question about Cairo 1. Provide a concise answer to the student's questions,do not expand the subject of the question, do not introduce any new topics or new question not provided by the student.
49
+ Make sure the explanations never be longer than 300 words.Don’t justify your answers. Don’t give information not mentioned in the CONTEXT INFORMATION.provide only one solution <SYS>>
50
+
51
+ Question: I'm working in Cairo 1 :{user_text}
52
+ [/INST]
53
+ """
54
+
55
+ model_output = ""
56
+ for text in model(text, stream=True,max_new_tokens=max_new_tokens,top_p=top_p,top_k=top_k,temperature=temperature):
57
+ model_output += text
58
+ yield model_output
59
+
60
+
61
+ return model_output
62
+
63
+
64
+
65
+ def reset_textbox():
66
+ return gr.update(value='')
67
+
68
+ with gr.Blocks() as demo:
69
+ duplicate_link = "https://huggingface.co/spaces/joaogante/transformers_streaming?duplicate=true"
70
+ gr.Markdown(
71
+ "# 🔥 Mistral Cairo 🔥\n"
72
+ f"[{hub_name}](https://huggingface.co/{hub_name})\n\n"
73
+
74
+ )
75
+
76
+ with gr.Row():
77
+ with gr.Column(scale=4):
78
+ # user_text = gr.Textbox(
79
+ # placeholder="Write an email about an alpaca that likes flan",
80
+ # label="User input"
81
+ # )
82
+ # model_output = gr.Markdown(label="Model output", lines=10, interactive=False)
83
+ # button_submit = gr.Button(value="Submit")
84
+
85
+ chatbot = gr.Chatbot()
86
+ msg = gr.Textbox()
87
+ clear = gr.Button("Clear")
88
+
89
+ def user(user_message, history):
90
+ return "", history + [[user_message, None]]
91
+ def respond(history):
92
+ message = history[-1][0]
93
+ print(f"User: {message}")
94
+ print(f"top_p {top_p.value}, temperature {temperature.value}, top_k {top_k.value}, max_new_tokens {max_new_tokens.value}")
95
+ bot_message = run_generation(message,top_p.value, temperature.value, top_k.value, max_new_tokens.value)
96
+ for character in bot_message:
97
+ history[-1][1] = character
98
+ yield history
99
+
100
+
101
+ msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(respond, chatbot, chatbot)
102
+ clear.click(lambda: None, None, chatbot, queue=False)
103
+
104
+ with gr.Column(scale=1):
105
+ max_new_tokens = gr.Slider(
106
+ minimum=1, maximum=2000, value=2000, step=1, interactive=True, label="Max New Tokens",
107
+ )
108
+ top_p = gr.Slider(
109
+ minimum=0.05, maximum=1.0, value=0.99, step=0.05, interactive=True, label="Top-p (nucleus sampling)",
110
+ )
111
+ top_k = gr.Slider(
112
+ minimum=40, maximum=5000, value=4000, step=10, interactive=True, label="Top-k",
113
+ )
114
+ temperature = gr.Slider(
115
+ minimum=0.01, maximum=0.4, value=0.0001, step=0.1, interactive=True, label="Temperature",
116
+ )
117
+
118
+ # user_text.submit(run_generation, [user_text, top_p, temperature, top_k, max_new_tokens], model_output)
119
+ # button_submit.click(run_generation, [user_text, top_p, temperature, top_k, max_new_tokens], model_output)
120
+
121
+ #demo.queue(max_size=32).launch(enable_queue=True)
122
+
123
+ demo.queue()
124
+ demo.launch()
125
+
126