Spaces:
Sleeping
Sleeping
Tuchuanhuhuhu
commited on
Commit
•
73d933e
1
Parent(s):
90443d4
改进了总结对话闪烁的问题
Browse files- ChuanhuChatbot.py +3 -3
- modules/chat_func.py +3 -1
ChuanhuChatbot.py
CHANGED
@@ -261,7 +261,7 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
|
|
261 |
transfer_input_args = dict(
|
262 |
fn=transfer_input, inputs=[user_input], outputs=[user_question, user_input, submitBtn, cancelBtn], show_progress=True
|
263 |
)
|
264 |
-
|
265 |
get_usage_args = dict(
|
266 |
fn=get_usage, inputs=[user_api_key], outputs=[usageTxt], show_progress=False
|
267 |
)
|
@@ -318,7 +318,7 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
|
|
318 |
token_count,
|
319 |
top_p,
|
320 |
temperature,
|
321 |
-
gr.State(
|
322 |
model_select_dropdown,
|
323 |
language_select_dropdown,
|
324 |
],
|
@@ -326,7 +326,7 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
|
|
326 |
show_progress=True,
|
327 |
)
|
328 |
reduceTokenBtn.click(**get_usage_args)
|
329 |
-
|
330 |
# ChatGPT
|
331 |
keyTxt.change(submit_key, keyTxt, [user_api_key, status_display]).then(**get_usage_args)
|
332 |
|
|
|
261 |
transfer_input_args = dict(
|
262 |
fn=transfer_input, inputs=[user_input], outputs=[user_question, user_input, submitBtn, cancelBtn], show_progress=True
|
263 |
)
|
264 |
+
|
265 |
get_usage_args = dict(
|
266 |
fn=get_usage, inputs=[user_api_key], outputs=[usageTxt], show_progress=False
|
267 |
)
|
|
|
318 |
token_count,
|
319 |
top_p,
|
320 |
temperature,
|
321 |
+
gr.State(max_token_streaming if use_streaming_checkbox.value else max_token_all),
|
322 |
model_select_dropdown,
|
323 |
language_select_dropdown,
|
324 |
],
|
|
|
326 |
show_progress=True,
|
327 |
)
|
328 |
reduceTokenBtn.click(**get_usage_args)
|
329 |
+
|
330 |
# ChatGPT
|
331 |
keyTxt.change(submit_key, keyTxt, [user_api_key, status_display]).then(**get_usage_args)
|
332 |
|
modules/chat_func.py
CHANGED
@@ -268,7 +268,8 @@ def predict(
|
|
268 |
should_check_token_count=True,
|
269 |
): # repetition_penalty, top_k
|
270 |
logging.info("输入为:" + colorama.Fore.BLUE + f"{inputs}" + colorama.Style.RESET_ALL)
|
271 |
-
|
|
|
272 |
if reply_language == "跟随问题语言(不稳定)":
|
273 |
reply_language = "the same language as the question, such as English, 中文, 日本語, Español, Français, or Deutsch."
|
274 |
if files:
|
@@ -460,6 +461,7 @@ def reduce_token_size(
|
|
460 |
flag = False
|
461 |
for chatbot, history, status_text, previous_token_count in iter:
|
462 |
num_chat = find_n(previous_token_count, max_token_count)
|
|
|
463 |
if flag:
|
464 |
chatbot = chatbot[:-1]
|
465 |
flag = True
|
|
|
268 |
should_check_token_count=True,
|
269 |
): # repetition_penalty, top_k
|
270 |
logging.info("输入为:" + colorama.Fore.BLUE + f"{inputs}" + colorama.Style.RESET_ALL)
|
271 |
+
if should_check_token_count:
|
272 |
+
yield chatbot+[(inputs, "")], history, "开始生成回答……", all_token_counts
|
273 |
if reply_language == "跟随问题语言(不稳定)":
|
274 |
reply_language = "the same language as the question, such as English, 中文, 日本語, Español, Français, or Deutsch."
|
275 |
if files:
|
|
|
461 |
flag = False
|
462 |
for chatbot, history, status_text, previous_token_count in iter:
|
463 |
num_chat = find_n(previous_token_count, max_token_count)
|
464 |
+
logging.info(f"previous_token_count: {previous_token_count}, keeping {num_chat} chats")
|
465 |
if flag:
|
466 |
chatbot = chatbot[:-1]
|
467 |
flag = True
|