Spaces:
Sleeping
Sleeping
Update app.py
Browse fileschange model to v3
app.py
CHANGED
@@ -13,7 +13,7 @@ from transformers import (
|
|
13 |
st.set_page_config(page_title="😶🌫️ FuseChat Model")
|
14 |
|
15 |
root_path = "FuseAI"
|
16 |
-
model_name = "FuseAI/FuseChat-7B-
|
17 |
|
18 |
@st.cache_resource
|
19 |
def load_model(model_name):
|
@@ -41,20 +41,21 @@ def load_model(model_name):
|
|
41 |
|
42 |
|
43 |
with st.sidebar:
|
44 |
-
st.title('😶🌫️ FuseChat-
|
45 |
st.write('This chatbot is created using FuseChat, a model developed by FuseAI')
|
46 |
-
temperature = st.sidebar.slider('temperature', min_value=0.01, max_value=
|
47 |
-
top_p = st.sidebar.slider('top_p', min_value=0.
|
48 |
-
top_k = st.sidebar.slider('top_k', min_value=1, max_value=1000, value=
|
49 |
-
repetition_penalty = st.sidebar.slider('repetition penalty', min_value=1
|
50 |
-
max_length = st.sidebar.slider('
|
51 |
|
52 |
with st.spinner('loading model..'):
|
53 |
model, tokenizer = load_model(model_name)
|
54 |
|
55 |
# Store LLM generated responses
|
56 |
if "messages" not in st.session_state.keys():
|
57 |
-
st.session_state.messages = [{"role": "assistant", "content": "How may I assist you today?"}]
|
|
|
58 |
|
59 |
# Display or clear chat messages
|
60 |
for message in st.session_state.messages:
|
@@ -64,7 +65,7 @@ for message in st.session_state.messages:
|
|
64 |
def set_query(query):
|
65 |
st.session_state.messages.append({"role": "user", "content": query})
|
66 |
# Create a list of candidate questions
|
67 |
-
candidate_questions = ["
|
68 |
# Display the chat interface with a list of clickable question buttons
|
69 |
for question in candidate_questions:
|
70 |
st.sidebar.button(label=question, on_click=set_query, args=[question])
|
@@ -75,15 +76,15 @@ st.sidebar.button('Clear Chat History', on_click=clear_chat_history)
|
|
75 |
|
76 |
|
77 |
def generate_fusechat_response():
|
78 |
-
|
79 |
-
|
80 |
for dict_message in st.session_state.messages:
|
81 |
if dict_message["role"] == "user":
|
82 |
-
|
83 |
else:
|
84 |
-
|
85 |
-
|
86 |
-
input_ids = tokenizer(
|
87 |
streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True)
|
88 |
generate_kwargs = dict(
|
89 |
{"input_ids": input_ids},
|
@@ -105,7 +106,7 @@ def generate_fusechat_response():
|
|
105 |
return "".join(outputs)
|
106 |
|
107 |
# User-provided prompt
|
108 |
-
if prompt := st.chat_input("
|
109 |
st.session_state.messages.append({"role": "user", "content": prompt})
|
110 |
with st.chat_message("user"):
|
111 |
st.write(prompt)
|
|
|
13 |
st.set_page_config(page_title="😶🌫️ FuseChat Model")
|
14 |
|
15 |
root_path = "FuseAI"
|
16 |
+
model_name = "FuseAI/FuseChat-Qwen-2.5-7B-Instruct"
|
17 |
|
18 |
@st.cache_resource
|
19 |
def load_model(model_name):
|
|
|
41 |
|
42 |
|
43 |
with st.sidebar:
|
44 |
+
st.title('😶🌫️ FuseChat-3.0')
|
45 |
st.write('This chatbot is created using FuseChat, a model developed by FuseAI')
|
46 |
+
temperature = st.sidebar.slider('temperature', min_value=0.01, max_value=1.0, value=0.7, step=0.01)
|
47 |
+
top_p = st.sidebar.slider('top_p', min_value=0.1, max_value=1.0, value=0.8, step=0.05)
|
48 |
+
top_k = st.sidebar.slider('top_k', min_value=1, max_value=1000, value=20, step=1)
|
49 |
+
repetition_penalty = st.sidebar.slider('repetition penalty', min_value=1.0, max_value=2.0, value=1.05, step=0.05)
|
50 |
+
max_length = st.sidebar.slider('max_length', min_value=32, max_value=4096, value=2048, step=8)
|
51 |
|
52 |
with st.spinner('loading model..'):
|
53 |
model, tokenizer = load_model(model_name)
|
54 |
|
55 |
# Store LLM generated responses
|
56 |
if "messages" not in st.session_state.keys():
|
57 |
+
# st.session_state.messages = [{"role": "assistant", "content": "How may I assist you today?"}]
|
58 |
+
st.session_state.messages = []
|
59 |
|
60 |
# Display or clear chat messages
|
61 |
for message in st.session_state.messages:
|
|
|
65 |
def set_query(query):
|
66 |
st.session_state.messages.append({"role": "user", "content": query})
|
67 |
# Create a list of candidate questions
|
68 |
+
candidate_questions = ["Is boiling water (100°C) an obtuse angle (larger than 90 degrees)?", "Write a quicksort code in Python.", "笼子里有好几只鸡和兔子。笼子里有72个头,200只腿。里面有多少只鸡和兔子"]
|
69 |
# Display the chat interface with a list of clickable question buttons
|
70 |
for question in candidate_questions:
|
71 |
st.sidebar.button(label=question, on_click=set_query, args=[question])
|
|
|
76 |
|
77 |
|
78 |
def generate_fusechat_response():
|
79 |
+
conversations=[]
|
80 |
+
conversations.append({"role": "system", "content": "You are FuseChat-3.0, created by Sun Yat-sen University. You are a helpful assistant."})
|
81 |
for dict_message in st.session_state.messages:
|
82 |
if dict_message["role"] == "user":
|
83 |
+
conversations.append({"role": "user", "content": dict_message["content"]})
|
84 |
else:
|
85 |
+
conversations.append({"role": "assistant", "content": dict_message["content"]})
|
86 |
+
string_dialogue = tokenizer.apply_chat_template(conversations, tokenize=False, add_generation_prompt=True)
|
87 |
+
input_ids = tokenizer(string_dialogue, return_tensors="pt").input_ids.to('cuda')
|
88 |
streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True)
|
89 |
generate_kwargs = dict(
|
90 |
{"input_ids": input_ids},
|
|
|
106 |
return "".join(outputs)
|
107 |
|
108 |
# User-provided prompt
|
109 |
+
if prompt := st.chat_input("Do androids dream of electric sheep?"):
|
110 |
st.session_state.messages.append({"role": "user", "content": prompt})
|
111 |
with st.chat_message("user"):
|
112 |
st.write(prompt)
|