Spaces:
Sleeping
Sleeping
Upload folder using huggingface_hub
Browse files- app_new.py +73 -39
app_new.py
CHANGED
@@ -64,31 +64,48 @@ MODEL_OPTIONS = {
|
|
64 |
}
|
65 |
|
66 |
MODEL = None
|
|
|
67 |
|
68 |
def get_model_key(model_type, model_size):
|
69 |
return f"{model_type} {model_size}"
|
70 |
|
71 |
-
def initialize_model(model_type
|
72 |
-
|
|
|
73 |
model_key = get_model_key(model_type, model_size)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
74 |
try:
|
75 |
-
print(f"Initializing model: {model_key}")
|
76 |
selected_model = MODEL_OPTIONS[model_key]
|
77 |
MODEL = load_model(
|
78 |
directory=selected_model["directory"],
|
79 |
model_name=selected_model["model_name"],
|
80 |
model_url=selected_model["model_url"]
|
81 |
)
|
|
|
82 |
print(f"Model initialized: {model_key}")
|
83 |
except Exception as e:
|
84 |
print(f"Failed to initialize model {model_key}: {e}")
|
85 |
MODEL = None
|
86 |
|
87 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
88 |
new_history = history + [[message, None]]
|
89 |
return "", new_history
|
90 |
|
91 |
def bot(history, top_p, top_k, temp):
|
|
|
92 |
global MODEL
|
93 |
if MODEL is None:
|
94 |
raise RuntimeError("Model has not been initialized. Please select a model to load.")
|
@@ -96,18 +113,13 @@ def bot(history, top_p, top_k, temp):
|
|
96 |
tokens = get_system_tokens(model)[:]
|
97 |
|
98 |
for user_message, bot_message in history[:-1]:
|
99 |
-
|
100 |
-
tokens.extend(message_tokens)
|
101 |
if bot_message:
|
102 |
-
|
103 |
-
tokens.extend(message_tokens)
|
104 |
|
105 |
last_user_message = history[-1][0]
|
106 |
-
|
107 |
-
tokens.extend(
|
108 |
-
|
109 |
-
role_tokens = model.tokenize("bot\n".encode("utf-8"), special=True)
|
110 |
-
tokens.extend(role_tokens)
|
111 |
|
112 |
generator = model.generate(tokens, top_k=top_k, top_p=top_p, temp=temp)
|
113 |
|
@@ -120,16 +132,22 @@ def bot(history, top_p, top_k, temp):
|
|
120 |
history[-1][1] = partial_text
|
121 |
yield history
|
122 |
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
127 |
favicon = '<img src="https://huggingface.co/FreedomIntelligence/Apollo2-7B/resolve/main/assets/apollo_medium_final.png" width="148px" style="display: inline">'
|
128 |
gr.Markdown(
|
129 |
f"""# {favicon} Apollo GGUF Playground
|
130 |
|
131 |
This is a demo of multilingual medical model series **[Apollo](https://huggingface.co/FreedomIntelligence/Apollo-7B-GGUF)**, GGUF version. [Apollo1](https://arxiv.org/abs/2403.03640) covers 6 languages. [Apollo2](https://arxiv.org/abs/2410.10626) covers 50 languages.
|
132 |
-
|
133 |
"""
|
134 |
)
|
135 |
with gr.Row():
|
@@ -142,7 +160,6 @@ with gr.Blocks(
|
|
142 |
elem_id="send-message-box"
|
143 |
)
|
144 |
with gr.Column(scale=1):
|
145 |
-
# 将 model_type 和 model_size 包含在同一个 gr.Row 中
|
146 |
with gr.Row(equal_height=False):
|
147 |
model_type = gr.Dropdown(
|
148 |
choices=["Apollo", "Apollo2"],
|
@@ -158,7 +175,6 @@ with gr.Blocks(
|
|
158 |
interactive=True,
|
159 |
elem_id="model-size-dropdown",
|
160 |
)
|
161 |
-
#gr.Markdown("### Generation Parameters")
|
162 |
top_p = gr.Slider(
|
163 |
minimum=0.0,
|
164 |
maximum=1.0,
|
@@ -188,16 +204,22 @@ with gr.Blocks(
|
|
188 |
stop = gr.Button("Stop", elem_id="stop-btn")
|
189 |
clear = gr.Button("Clear", elem_id="clear-btn")
|
190 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
191 |
|
192 |
-
|
193 |
-
initialize_model(model_type, model_size)
|
194 |
-
|
195 |
-
model_type.change(update_model, [model_type, model_size], None)
|
196 |
-
model_size.change(update_model, [model_type, model_size], None)
|
197 |
-
|
198 |
-
msg.submit(
|
199 |
fn=user,
|
200 |
-
inputs=[msg, chatbot],
|
201 |
outputs=[msg, chatbot],
|
202 |
queue=False,
|
203 |
).success(
|
@@ -207,28 +229,40 @@ with gr.Blocks(
|
|
207 |
queue=True,
|
208 |
)
|
209 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
210 |
demo.queue(max_size=128)
|
211 |
demo.css = """
|
212 |
footer {display: none !important;}
|
213 |
#send-message-box {width: 100%;}
|
214 |
#send-btn, #stop-btn, #clear-btn {
|
215 |
-
display: inline-block;
|
216 |
-
width: 30%;
|
217 |
-
margin-right: 2px;
|
218 |
-
text-align: center;
|
219 |
}
|
220 |
-
|
221 |
.gr-row {
|
222 |
-
display: flex !important;
|
223 |
-
flex-direction: row !important;
|
224 |
-
justify-content: space-between;
|
225 |
-
align-items: center;
|
226 |
-
flex-wrap: nowrap;
|
227 |
}
|
228 |
"""
|
229 |
|
|
|
|
|
230 |
|
231 |
# Initialize the default model at startup
|
232 |
-
initialize_model("Apollo2", "7B")
|
233 |
|
234 |
demo.launch(show_error=True, share=True)
|
|
|
64 |
}
|
65 |
|
66 |
MODEL = None
|
67 |
+
CURRENT_MODEL_KEY = None
|
68 |
|
69 |
def get_model_key(model_type, model_size):
|
70 |
return f"{model_type} {model_size}"
|
71 |
|
72 |
+
def initialize_model(model_type, model_size):
|
73 |
+
"""Load the selected model dynamically."""
|
74 |
+
global MODEL, CURRENT_MODEL_KEY
|
75 |
model_key = get_model_key(model_type, model_size)
|
76 |
+
|
77 |
+
# Only reload the model if it's not already loaded
|
78 |
+
if CURRENT_MODEL_KEY == model_key and MODEL is not None:
|
79 |
+
print(f"Model {model_key} is already loaded.")
|
80 |
+
return
|
81 |
+
|
82 |
+
print(f"Initializing model: {model_key}")
|
83 |
try:
|
|
|
84 |
selected_model = MODEL_OPTIONS[model_key]
|
85 |
MODEL = load_model(
|
86 |
directory=selected_model["directory"],
|
87 |
model_name=selected_model["model_name"],
|
88 |
model_url=selected_model["model_url"]
|
89 |
)
|
90 |
+
CURRENT_MODEL_KEY = model_key
|
91 |
print(f"Model initialized: {model_key}")
|
92 |
except Exception as e:
|
93 |
print(f"Failed to initialize model {model_key}: {e}")
|
94 |
MODEL = None
|
95 |
|
96 |
+
# Functions for chat interactions
|
97 |
+
def user(message, history, model_type, model_size):
|
98 |
+
"""Handle user input and dynamically initialize the selected model."""
|
99 |
+
global MODEL
|
100 |
+
|
101 |
+
# Dynamically initialize the selected model
|
102 |
+
initialize_model(model_type, model_size)
|
103 |
+
|
104 |
new_history = history + [[message, None]]
|
105 |
return "", new_history
|
106 |
|
107 |
def bot(history, top_p, top_k, temp):
|
108 |
+
"""Generate a response from the bot based on chat history."""
|
109 |
global MODEL
|
110 |
if MODEL is None:
|
111 |
raise RuntimeError("Model has not been initialized. Please select a model to load.")
|
|
|
113 |
tokens = get_system_tokens(model)[:]
|
114 |
|
115 |
for user_message, bot_message in history[:-1]:
|
116 |
+
tokens.extend(get_message_tokens(model=model, role="user", content=user_message))
|
|
|
117 |
if bot_message:
|
118 |
+
tokens.extend(get_message_tokens(model=model, role="bot", content=bot_message))
|
|
|
119 |
|
120 |
last_user_message = history[-1][0]
|
121 |
+
tokens.extend(get_message_tokens(model=model, role="user", content=last_user_message))
|
122 |
+
tokens.extend(model.tokenize("bot\n".encode("utf-8"), special=True))
|
|
|
|
|
|
|
123 |
|
124 |
generator = model.generate(tokens, top_k=top_k, top_p=top_p, temp=temp)
|
125 |
|
|
|
132 |
history[-1][1] = partial_text
|
133 |
yield history
|
134 |
|
135 |
+
def clear_chat():
|
136 |
+
"""Clear the chat history."""
|
137 |
+
return []
|
138 |
+
|
139 |
+
def stop_generation():
|
140 |
+
"""Placeholder to stop generation."""
|
141 |
+
print("Generation stopped.") # Implement stop logic if supported
|
142 |
+
return None
|
143 |
+
|
144 |
+
# Gradio UI
|
145 |
+
with gr.Blocks(theme=gr.themes.Monochrome(), analytics_enabled=False) as demo:
|
146 |
favicon = '<img src="https://huggingface.co/FreedomIntelligence/Apollo2-7B/resolve/main/assets/apollo_medium_final.png" width="148px" style="display: inline">'
|
147 |
gr.Markdown(
|
148 |
f"""# {favicon} Apollo GGUF Playground
|
149 |
|
150 |
This is a demo of multilingual medical model series **[Apollo](https://huggingface.co/FreedomIntelligence/Apollo-7B-GGUF)**, GGUF version. [Apollo1](https://arxiv.org/abs/2403.03640) covers 6 languages. [Apollo2](https://arxiv.org/abs/2410.10626) covers 50 languages.
|
|
|
151 |
"""
|
152 |
)
|
153 |
with gr.Row():
|
|
|
160 |
elem_id="send-message-box"
|
161 |
)
|
162 |
with gr.Column(scale=1):
|
|
|
163 |
with gr.Row(equal_height=False):
|
164 |
model_type = gr.Dropdown(
|
165 |
choices=["Apollo", "Apollo2"],
|
|
|
175 |
interactive=True,
|
176 |
elem_id="model-size-dropdown",
|
177 |
)
|
|
|
178 |
top_p = gr.Slider(
|
179 |
minimum=0.0,
|
180 |
maximum=1.0,
|
|
|
204 |
stop = gr.Button("Stop", elem_id="stop-btn")
|
205 |
clear = gr.Button("Clear", elem_id="clear-btn")
|
206 |
|
207 |
+
# Event bindings
|
208 |
+
submit_event = msg.submit(
|
209 |
+
fn=user,
|
210 |
+
inputs=[msg, chatbot, model_type, model_size],
|
211 |
+
outputs=[msg, chatbot],
|
212 |
+
queue=False,
|
213 |
+
).success(
|
214 |
+
fn=bot,
|
215 |
+
inputs=[chatbot, top_p, top_k, temp],
|
216 |
+
outputs=chatbot,
|
217 |
+
queue=True,
|
218 |
+
)
|
219 |
|
220 |
+
submit_click_event = submit.click(
|
|
|
|
|
|
|
|
|
|
|
|
|
221 |
fn=user,
|
222 |
+
inputs=[msg, chatbot, model_type, model_size],
|
223 |
outputs=[msg, chatbot],
|
224 |
queue=False,
|
225 |
).success(
|
|
|
229 |
queue=True,
|
230 |
)
|
231 |
|
232 |
+
|
233 |
+
stop.click(
|
234 |
+
fn=stop_generation,
|
235 |
+
inputs=None,
|
236 |
+
outputs=None,
|
237 |
+
cancels=[submit_event, submit_click_event],
|
238 |
+
queue=False,
|
239 |
+
)
|
240 |
+
|
241 |
+
clear.click(fn=clear_chat, inputs=None, outputs=chatbot, queue=False)
|
242 |
+
|
243 |
demo.queue(max_size=128)
|
244 |
demo.css = """
|
245 |
footer {display: none !important;}
|
246 |
#send-message-box {width: 100%;}
|
247 |
#send-btn, #stop-btn, #clear-btn {
|
248 |
+
display: inline-block;
|
249 |
+
width: 30%;
|
250 |
+
margin-right: 2px;
|
251 |
+
text-align: center;
|
252 |
}
|
|
|
253 |
.gr-row {
|
254 |
+
display: flex !important;
|
255 |
+
flex-direction: row !important;
|
256 |
+
justify-content: space-between;
|
257 |
+
align-items: center;
|
258 |
+
flex-wrap: nowrap;
|
259 |
}
|
260 |
"""
|
261 |
|
262 |
+
# Initialize
|
263 |
+
|
264 |
|
265 |
# Initialize the default model at startup
|
266 |
+
#initialize_model("Apollo2", "7B")
|
267 |
|
268 |
demo.launch(show_error=True, share=True)
|