Spaces:
Sleeping
Sleeping
update UI and fix some bugs
Browse files- ChuanhuChatbot.py +21 -16
ChuanhuChatbot.py
CHANGED
@@ -2,7 +2,7 @@ import gradio as gr
|
|
2 |
import openai
|
3 |
import markdown
|
4 |
|
5 |
-
my_api_key = "
|
6 |
initial_prompt = "You are a helpful assistant."
|
7 |
|
8 |
class ChatGPT:
|
@@ -19,10 +19,10 @@ class ChatGPT:
|
|
19 |
)
|
20 |
statistics = f'本次对话Tokens用量【{self.response["usage"]["total_tokens"]} / 4096】 ( 提问+上文 {self.response["usage"]["prompt_tokens"]},回答 {self.response["usage"]["completion_tokens"]} )'
|
21 |
message = self.response["choices"][0]["message"]["content"]
|
22 |
-
|
23 |
message_with_stats = f'{message}\n\n================\n\n{statistics}'
|
24 |
message_with_stats = markdown.markdown(message_with_stats)
|
25 |
-
|
26 |
return message, message_with_stats
|
27 |
|
28 |
def predict(self, chatbot, input_sentence, ):
|
@@ -57,22 +57,22 @@ class ChatGPT:
|
|
57 |
chatbot = chatbot[:-1]
|
58 |
self.context = self.context[:-2]
|
59 |
return chatbot
|
60 |
-
|
61 |
def reduce_token(self, chatbot):
|
62 |
self.context.append({"role": "user", "content": "请帮我总结一下上述对话的内容,实现减少tokens的同时,保证对话的质量。在总结中不要加入这一句话。"})
|
63 |
message, message_with_stats = self.get_response(self.context)
|
64 |
self.system = {"role": "system", "content": f"You are a helpful assistant. The content that the Assistant and the User discussed in the previous self.context is: {message}."}
|
65 |
-
|
66 |
statistics = f'本次对话Tokens用量【{self.response["usage"]["completion_tokens"]+23} / 4096】'
|
67 |
optmz_str = markdown.markdown( f"System prompt已经更新, 请继续对话\n\n================\n\n{statistics}" )
|
68 |
chatbot.append(("请帮我总结一下上述对话的内容,实现减少tokens的同时,保证对话的质量。", optmz_str))
|
69 |
-
|
70 |
self.context = []
|
71 |
return chatbot, self.system["content"]
|
72 |
|
73 |
|
74 |
def reset_state():
|
75 |
-
return []
|
76 |
|
77 |
mychatGPT = ChatGPT(my_api_key)
|
78 |
|
@@ -81,24 +81,29 @@ with gr.Blocks() as demo:
|
|
81 |
chatbot = gr.Chatbot().style(color_map=("#1D51EE", "#585A5B"))
|
82 |
# state = gr.State([])
|
83 |
|
84 |
-
with gr.
|
|
|
85 |
txt = gr.Textbox(show_label=False, placeholder="💬 在这里输入").style(container=False)
|
|
|
|
|
86 |
with gr.Row():
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
|
92 |
system = gr.Textbox(show_label=True, placeholder=f"在这里输入新的System Prompt...", label="更改 System prompt").style(container=True)
|
93 |
syspromptTxt = gr.Textbox(show_label=True, placeholder=initial_prompt, interactive=False, label="目前的 System prompt").style(container=True)
|
94 |
|
95 |
txt.submit(mychatGPT.predict, [chatbot, txt], [chatbot], show_progress=True)
|
96 |
txt.submit(lambda :"", None, txt)
|
97 |
-
|
|
|
|
|
98 |
system.submit(mychatGPT.update_system, system, syspromptTxt)
|
99 |
system.submit(lambda :"", None, system)
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
|
104 |
demo.launch()
|
|
|
2 |
import openai
|
3 |
import markdown
|
4 |
|
5 |
+
my_api_key = "" # input your api_key
|
6 |
initial_prompt = "You are a helpful assistant."
|
7 |
|
8 |
class ChatGPT:
|
|
|
19 |
)
|
20 |
statistics = f'本次对话Tokens用量【{self.response["usage"]["total_tokens"]} / 4096】 ( 提问+上文 {self.response["usage"]["prompt_tokens"]},回答 {self.response["usage"]["completion_tokens"]} )'
|
21 |
message = self.response["choices"][0]["message"]["content"]
|
22 |
+
|
23 |
message_with_stats = f'{message}\n\n================\n\n{statistics}'
|
24 |
message_with_stats = markdown.markdown(message_with_stats)
|
25 |
+
|
26 |
return message, message_with_stats
|
27 |
|
28 |
def predict(self, chatbot, input_sentence, ):
|
|
|
57 |
chatbot = chatbot[:-1]
|
58 |
self.context = self.context[:-2]
|
59 |
return chatbot
|
60 |
+
|
61 |
def reduce_token(self, chatbot):
|
62 |
self.context.append({"role": "user", "content": "请帮我总结一下上述对话的内容,实现减少tokens的同时,保证对话的质量。在总结中不要加入这一句话。"})
|
63 |
message, message_with_stats = self.get_response(self.context)
|
64 |
self.system = {"role": "system", "content": f"You are a helpful assistant. The content that the Assistant and the User discussed in the previous self.context is: {message}."}
|
65 |
+
|
66 |
statistics = f'本次对话Tokens用量【{self.response["usage"]["completion_tokens"]+23} / 4096】'
|
67 |
optmz_str = markdown.markdown( f"System prompt已经更新, 请继续对话\n\n================\n\n{statistics}" )
|
68 |
chatbot.append(("请帮我总结一下上述对话的内容,实现减少tokens的同时,保证对话的质量。", optmz_str))
|
69 |
+
|
70 |
self.context = []
|
71 |
return chatbot, self.system["content"]
|
72 |
|
73 |
|
74 |
def reset_state():
|
75 |
+
return []
|
76 |
|
77 |
mychatGPT = ChatGPT(my_api_key)
|
78 |
|
|
|
81 |
chatbot = gr.Chatbot().style(color_map=("#1D51EE", "#585A5B"))
|
82 |
# state = gr.State([])
|
83 |
|
84 |
+
with gr.Row():
|
85 |
+
with gr.Column(scale=10):
|
86 |
txt = gr.Textbox(show_label=False, placeholder="💬 在这里输入").style(container=False)
|
87 |
+
with gr.Column(min_width=50, scale=1):
|
88 |
+
submitBtn = gr.Button("发送", variant="primary")
|
89 |
with gr.Row():
|
90 |
+
emptyBtn = gr.Button("🧹 新的对话")
|
91 |
+
retryBtn = gr.Button("🔁 重新生成")
|
92 |
+
delLastBtn = gr.Button("⬅️ 删除上条对话")
|
93 |
+
reduceTokenBtn = gr.Button("♻️ 优化Tokens")
|
94 |
|
95 |
system = gr.Textbox(show_label=True, placeholder=f"在这里输入新的System Prompt...", label="更改 System prompt").style(container=True)
|
96 |
syspromptTxt = gr.Textbox(show_label=True, placeholder=initial_prompt, interactive=False, label="目前的 System prompt").style(container=True)
|
97 |
|
98 |
txt.submit(mychatGPT.predict, [chatbot, txt], [chatbot], show_progress=True)
|
99 |
txt.submit(lambda :"", None, txt)
|
100 |
+
submitBtn.click(mychatGPT.predict, [chatbot, txt], [chatbot], show_progress=True)
|
101 |
+
submitBtn.click(lambda :"", None, txt)
|
102 |
+
emptyBtn.click(reset_state, outputs=[chatbot])
|
103 |
system.submit(mychatGPT.update_system, system, syspromptTxt)
|
104 |
system.submit(lambda :"", None, system)
|
105 |
+
retryBtn.click(mychatGPT.retry, [chatbot], [chatbot], show_progress=True)
|
106 |
+
delLastBtn.click(mychatGPT.delete_last_conversation, [chatbot], [chatbot], show_progress=True)
|
107 |
+
reduceTokenBtn.click(mychatGPT.reduce_token, [chatbot], [chatbot, syspromptTxt], show_progress=True)
|
108 |
|
109 |
demo.launch()
|