Tuchuanhuhuhu commited on
Commit
4333f18
1 Parent(s): 022b9a0

大幅度改进代码质量

Browse files
Files changed (3) hide show
  1. ChuanhuChatbot.py +31 -21
  2. presets.py +10 -0
  3. utils.py +125 -111
ChuanhuChatbot.py CHANGED
@@ -8,9 +8,6 @@ from presets import *
8
 
9
 
10
  my_api_key = "" # 在这里输入你的 API 密钥
11
- HIDE_MY_KEY = False # 如果你想在UI中隐藏你的 API 密钥,将此值设置为 True
12
-
13
- gr.Chatbot.postprocess = postprocess
14
 
15
  #if we are running in Docker
16
  if os.environ.get('dockerrun') == 'yes':
@@ -42,12 +39,17 @@ else:
42
  if username != "" and password != "":
43
  authflag = True
44
 
 
 
45
  with gr.Blocks(css=customCSS) as demo:
46
  gr.HTML(title)
47
- keyTxt = gr.Textbox(show_label=True, placeholder=f"在这里输入你的OpenAI API-key...",
48
- value=my_api_key, label="API Key", type="password", visible=not HIDE_MY_KEY).style(container=True)
 
 
49
  chatbot = gr.Chatbot() # .style(color_map=("#1D51EE", "#585A5B"))
50
  history = gr.State([])
 
51
  promptTemplates = gr.State(load_template(get_template_names(plain=True)[0], mode=2))
52
  TRUECOMSTANT = gr.State(True)
53
  FALSECONSTANT = gr.State(False)
@@ -55,16 +57,16 @@ with gr.Blocks(css=customCSS) as demo:
55
 
56
  with gr.Row():
57
  with gr.Column(scale=12):
58
- txt = gr.Textbox(show_label=False, placeholder="在这里输入").style(
59
  container=False)
60
  with gr.Column(min_width=50, scale=1):
61
  submitBtn = gr.Button("🚀", variant="primary")
62
  with gr.Row():
63
  emptyBtn = gr.Button("🧹 新的对话")
64
  retryBtn = gr.Button("🔄 重新生成")
65
- delLastBtn = gr.Button("🗑️ 删除上条对话")
66
  reduceTokenBtn = gr.Button("♻️ 总结对话")
67
- statusDisplay = gr.Markdown("status: ready")
68
  systemPromptTxt = gr.Textbox(show_label=True, placeholder=f"在这里输入System Prompt...",
69
  label="System prompt", value=initial_prompt).style(container=True)
70
  with gr.Accordion(label="加载Prompt模板", open=False):
@@ -105,26 +107,34 @@ with gr.Blocks(css=customCSS) as demo:
105
  gr.Markdown(description)
106
 
107
 
108
- txt.submit(predict, [txt, top_p, temperature, keyTxt,
109
- chatbot, history, systemPromptTxt], [chatbot, history, statusDisplay])
110
- txt.submit(reset_textbox, [], [txt])
111
- submitBtn.click(predict, [txt, top_p, temperature, keyTxt, chatbot,
112
- history, systemPromptTxt], [chatbot, history, statusDisplay], show_progress=True)
113
- submitBtn.click(reset_textbox, [], [txt])
114
- emptyBtn.click(reset_state, outputs=[chatbot, history])
115
- retryBtn.click(predict, [txt, top_p, temperature, keyTxt, chatbot, history,
116
- systemPromptTxt, TRUECOMSTANT], [chatbot, history, statusDisplay], show_progress=True)
117
- delLastBtn.click(delete_last_conversation, [chatbot, history], [
118
- chatbot, history], show_progress=True)
119
- reduceTokenBtn.click(predict, [txt, top_p, temperature, keyTxt, chatbot, history,
120
- systemPromptTxt, FALSECONSTANT, TRUECOMSTANT], [chatbot, history, statusDisplay], show_progress=True)
 
 
121
  saveHistoryBtn.click(save_chat_history, [
122
  saveFileName, systemPromptTxt, history, chatbot], None, show_progress=True)
 
123
  saveHistoryBtn.click(get_history_names, None, [historyFileSelectDropdown])
 
124
  historyRefreshBtn.click(get_history_names, None, [historyFileSelectDropdown])
 
125
  historyReadBtn.click(load_chat_history, [historyFileSelectDropdown, systemPromptTxt, history, chatbot], [saveFileName, systemPromptTxt, history, chatbot], show_progress=True)
 
126
  templateRefreshBtn.click(get_template_names, None, [templateFileSelectDropdown])
 
127
  templaeFileReadBtn.click(load_template, [templateFileSelectDropdown], [promptTemplates, templateSelectDropdown], show_progress=True)
 
128
  templateApplyBtn.click(get_template_content, [promptTemplates, templateSelectDropdown, systemPromptTxt], [systemPromptTxt], show_progress=True)
129
 
130
  print("川虎的温馨提示:访问 http://localhost:7860 查看界面")
 
8
 
9
 
10
  my_api_key = "" # 在这里输入你的 API 密钥
 
 
 
11
 
12
  #if we are running in Docker
13
  if os.environ.get('dockerrun') == 'yes':
 
39
  if username != "" and password != "":
40
  authflag = True
41
 
42
+ gr.Chatbot.postprocess = postprocess
43
+
44
  with gr.Blocks(css=customCSS) as demo:
45
  gr.HTML(title)
46
+ with gr.Row():
47
+ keyTxt = gr.Textbox(show_label=False, placeholder=f"在这里输入你的OpenAI API-key...",
48
+ value=my_api_key, type="password", visible=not HIDE_MY_KEY).style(container=True)
49
+ use_streaming_checkbox = gr.Checkbox(label="实时传输回答", value=True, visible=enable_streaming_option)
50
  chatbot = gr.Chatbot() # .style(color_map=("#1D51EE", "#585A5B"))
51
  history = gr.State([])
52
+ token_count = gr.State([])
53
  promptTemplates = gr.State(load_template(get_template_names(plain=True)[0], mode=2))
54
  TRUECOMSTANT = gr.State(True)
55
  FALSECONSTANT = gr.State(False)
 
57
 
58
  with gr.Row():
59
  with gr.Column(scale=12):
60
+ user_input = gr.Textbox(show_label=False, placeholder="在这里输入").style(
61
  container=False)
62
  with gr.Column(min_width=50, scale=1):
63
  submitBtn = gr.Button("🚀", variant="primary")
64
  with gr.Row():
65
  emptyBtn = gr.Button("🧹 新的对话")
66
  retryBtn = gr.Button("🔄 重新生成")
67
+ delLastBtn = gr.Button("🗑️ 删除最近一条对话")
68
  reduceTokenBtn = gr.Button("♻️ 总结对话")
69
+ status_display = gr.Markdown("status: ready")
70
  systemPromptTxt = gr.Textbox(show_label=True, placeholder=f"在这里输入System Prompt...",
71
  label="System prompt", value=initial_prompt).style(container=True)
72
  with gr.Accordion(label="加载Prompt模板", open=False):
 
107
  gr.Markdown(description)
108
 
109
 
110
+ user_input.submit(predict, [keyTxt, systemPromptTxt, history, user_input, chatbot, token_count, top_p, temperature, use_streaming_checkbox], [chatbot, history, status_display, token_count], show_progress=True)
111
+ user_input.submit(reset_textbox, [], [user_input])
112
+
113
+ submitBtn.click(predict, [keyTxt, systemPromptTxt, history, user_input, chatbot, token_count, top_p, temperature, use_streaming_checkbox], [chatbot, history, status_display, token_count], show_progress=True)
114
+ submitBtn.click(reset_textbox, [], [user_input])
115
+
116
+ emptyBtn.click(reset_state, outputs=[chatbot, history, token_count, status_display], show_progress=True)
117
+
118
+ retryBtn.click(retry, [keyTxt, systemPromptTxt, history, chatbot, token_count, top_p, temperature, use_streaming_checkbox], [chatbot, history, status_display, token_count], show_progress=True)
119
+
120
+ delLastBtn.click(delete_last_conversation, [chatbot, history, token_count, use_streaming_checkbox], [
121
+ chatbot, history, token_count, status_display], show_progress=True)
122
+
123
+ reduceTokenBtn.click(reduce_token_size, [keyTxt, systemPromptTxt, history, chatbot, token_count, top_p, temperature, use_streaming_checkbox], [chatbot, history, status_display, token_count], show_progress=True)
124
+
125
  saveHistoryBtn.click(save_chat_history, [
126
  saveFileName, systemPromptTxt, history, chatbot], None, show_progress=True)
127
+
128
  saveHistoryBtn.click(get_history_names, None, [historyFileSelectDropdown])
129
+
130
  historyRefreshBtn.click(get_history_names, None, [historyFileSelectDropdown])
131
+
132
  historyReadBtn.click(load_chat_history, [historyFileSelectDropdown, systemPromptTxt, history, chatbot], [saveFileName, systemPromptTxt, history, chatbot], show_progress=True)
133
+
134
  templateRefreshBtn.click(get_template_names, None, [templateFileSelectDropdown])
135
+
136
  templaeFileReadBtn.click(load_template, [templateFileSelectDropdown], [promptTemplates, templateSelectDropdown], show_progress=True)
137
+
138
  templateApplyBtn.click(get_template_content, [promptTemplates, templateSelectDropdown, systemPromptTxt], [systemPromptTxt], show_progress=True)
139
 
140
  print("川虎的温馨提示:访问 http://localhost:7860 查看界面")
presets.py CHANGED
@@ -29,3 +29,13 @@ pre code {
29
  box-shadow: inset 0px 8px 16px hsla(0, 0%, 0%, .2)
30
  }
31
  """
 
 
 
 
 
 
 
 
 
 
 
29
  box-shadow: inset 0px 8px 16px hsla(0, 0%, 0%, .2)
30
  }
31
  """
32
+
33
+ standard_error_msg = "☹️发生了错误:" # 错误信息的标准前缀
34
+ error_retrieve_prompt = "连接超时,无法获取对话。请检查网络连接,或者API-Key是否有效。" # 获取对话时发生错误
35
+ summarize_prompt = "请总结以上对话,不超过100字。" # 总结对话时的 prompt
36
+ max_token_streaming = 3000 # 流式对话时的最大 token 数
37
+ timeout_streaming = 5 # 流式对话时的超时时间
38
+ max_token_all = 3500 # 非流式对话时的最大 token 数
39
+ timeout_all = 200 # 非流式对话时的超时时间
40
+ enable_streaming_option = False # 是否启用选择选择是否实时显示回答的勾选框
41
+ HIDE_MY_KEY = False # 如果你想在UI中隐藏你的 API 密钥,将此值设置为 True
utils.py CHANGED
@@ -14,6 +14,7 @@ import requests
14
  import csv
15
  import mdtex2html
16
  from pypinyin import lazy_pinyin
 
17
 
18
  if TYPE_CHECKING:
19
  from typing import TypedDict
@@ -51,7 +52,6 @@ def parse_text(text):
51
  lines = text.split("\n")
52
  lines = [line for line in lines if line != ""]
53
  count = 0
54
- firstline = False
55
  for i, line in enumerate(lines):
56
  if "```" in line:
57
  count += 1
@@ -79,54 +79,33 @@ def parse_text(text):
79
  text = "".join(lines)
80
  return text
81
 
82
- def predict(inputs, top_p, temperature, openai_api_key, chatbot=[], history=[], system_prompt=initial_prompt, retry=False, summary=False, retry_on_crash = False, stream = True): # repetition_penalty, top_k
 
83
 
84
- if retry_on_crash:
85
- retry = True
86
 
 
 
 
 
 
 
 
 
 
 
 
87
  headers = {
88
  "Content-Type": "application/json",
89
  "Authorization": f"Bearer {openai_api_key}"
90
  }
91
 
92
- chat_counter = len(history) // 2
93
-
94
- print(f"chat_counter - {chat_counter}")
95
-
96
- messages = []
97
- if chat_counter:
98
- for index in range(0, 2*chat_counter, 2):
99
- temp1 = {}
100
- temp1["role"] = "user"
101
- temp1["content"] = history[index]
102
- temp2 = {}
103
- temp2["role"] = "assistant"
104
- temp2["content"] = history[index+1]
105
- if temp1["content"] != "":
106
- if temp2["content"] != "" or retry:
107
- messages.append(temp1)
108
- messages.append(temp2)
109
- else:
110
- messages[-1]['content'] = temp2['content']
111
- if retry and chat_counter:
112
- if retry_on_crash:
113
- messages = messages[-6:]
114
- messages.pop()
115
- elif summary:
116
- history = [*[i["content"] for i in messages[-2:]], "我们刚刚聊了什么?"]
117
- messages.append(compose_user(
118
- "请帮我总结一下上述对话的内容,实现减少字数的同时,保证对话的质量。在总结中不要加入这一句话。"))
119
- else:
120
- temp3 = {}
121
- temp3["role"] = "user"
122
- temp3["content"] = inputs
123
- messages.append(temp3)
124
- chat_counter += 1
125
- messages = [compose_system(system_prompt), *messages]
126
- # messages
127
  payload = {
128
  "model": "gpt-3.5-turbo",
129
- "messages": messages, # [{"role": "user", "content": f"{inputs}"}],
130
  "temperature": temperature, # 1.0,
131
  "top_p": top_p, # 1.0,
132
  "n": 1,
@@ -134,94 +113,129 @@ def predict(inputs, top_p, temperature, openai_api_key, chatbot=[], history=[],
134
  "presence_penalty": 0,
135
  "frequency_penalty": 0,
136
  }
137
-
138
- if not summary:
139
- history.append(inputs)
140
  else:
141
- print("精简中...")
 
 
142
 
143
- print(f"payload: {payload}")
144
- # make a POST request to the API endpoint using the requests.post method, passing in stream=True
 
 
 
 
 
 
145
  try:
146
- response = requests.post(API_URL, headers=headers, json=payload, stream=True)
147
- except:
148
- history.append("")
149
- chatbot.append((inputs, ""))
150
- yield history, chatbot, f"获取请求失败,请检查网络连接。"
151
  return
152
 
153
- token_counter = 0
154
- partial_words = ""
155
 
156
- counter = 0
157
- if stream:
158
- chatbot.append((parse_text(history[-1]), ""))
159
- for chunk in response.iter_lines():
160
- if counter == 0:
161
- counter += 1
162
- continue
163
  counter += 1
164
- # check whether each line is non-empty
165
- if chunk:
166
- # decode each line as response data is in bytes
167
- try:
168
- if len(json.loads(chunk.decode()[6:])['choices'][0]["delta"]) == 0:
169
- chunkjson = json.loads(chunk.decode()[6:])
170
- status_text = f"id: {chunkjson['id']}, finish_reason: {chunkjson['choices'][0]['finish_reason']}"
171
- yield chatbot, history, status_text
172
- break
173
- except Exception as e:
174
- if not retry_on_crash:
175
- print("正在尝试使用缩短的context重新生成……")
176
- chatbot.pop()
177
- history.append("")
178
- yield next(predict(inputs, top_p, temperature, openai_api_key, chatbot, history, system_prompt, retry, summary=False, retry_on_crash=True, stream=False))
179
- else:
180
- msg = "☹️发生了错误:生成失败,请检查网络"
181
- print(msg)
182
- history.append(inputs, "")
183
- chatbot.append(inputs, msg)
184
- yield chatbot, history, "status: ERROR"
185
  break
186
- chunkjson = json.loads(chunk.decode()[6:])
187
- status_text = f"id: {chunkjson['id']}, finish_reason: {chunkjson['choices'][0]['finish_reason']}"
188
- partial_words = partial_words + \
189
- json.loads(chunk.decode()[6:])[
190
- 'choices'][0]["delta"]["content"]
191
  if token_counter == 0:
192
- history.append(" " + partial_words)
193
  else:
194
- history[-1] = partial_words
195
- chatbot[-1] = (parse_text(history[-2]), parse_text(history[-1]))
196
  token_counter += 1
197
- yield chatbot, history, status_text
198
- else:
199
- try:
200
- responsejson = json.loads(response.text)
201
- content = responsejson["choices"][0]["message"]["content"]
202
- history.append(content)
203
- chatbot.append((parse_text(history[-2]), parse_text(content)))
204
- status_text = "精简完成"
205
- except:
206
- chatbot.append((parse_text(history[-1]), "☹️发生了错误,请检查网络连接或者稍后再试。"))
207
- status_text = "status: ERROR"
208
- yield chatbot, history, status_text
209
 
210
 
211
-
212
- def delete_last_conversation(chatbot, history):
213
  try:
214
- if "☹️发生了错误" in chatbot[-1][1]:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
215
  chatbot.pop()
216
- print(history)
217
- return chatbot, history
 
 
 
 
 
 
218
  history.pop()
219
  history.pop()
 
220
  chatbot.pop()
221
- print(history)
222
- return chatbot, history
223
- except:
224
- return chatbot, history
225
 
226
  def save_chat_history(filename, system, history, chatbot):
227
  if filename == "":
@@ -298,7 +312,7 @@ def get_template_content(templates, selection, original_system_prompt):
298
  return original_system_prompt
299
 
300
  def reset_state():
301
- return [], []
302
 
303
  def compose_system(system_prompt):
304
  return {"role": "system", "content": system_prompt}
 
14
  import csv
15
  import mdtex2html
16
  from pypinyin import lazy_pinyin
17
+ from presets import *
18
 
19
  if TYPE_CHECKING:
20
  from typing import TypedDict
 
52
  lines = text.split("\n")
53
  lines = [line for line in lines if line != ""]
54
  count = 0
 
55
  for i, line in enumerate(lines):
56
  if "```" in line:
57
  count += 1
 
79
  text = "".join(lines)
80
  return text
81
 
82
+ def construct_text(role, text):
83
+ return {"role": role, "content": text}
84
 
85
+ def construct_user(text):
86
+ return construct_text("user", text)
87
 
88
+ def construct_system(text):
89
+ return construct_text("system", text)
90
+
91
+ def construct_assistant(text):
92
+ return construct_text("assistant", text)
93
+
94
+ def construct_token_message(token, stream=False):
95
+ extra = "【仅包含回答的计数】 " if stream else ""
96
+ return f"{extra}Token 计数: {token}"
97
+
98
+ def get_response(openai_api_key, system_prompt, history, temperature, top_p, stream):
99
  headers = {
100
  "Content-Type": "application/json",
101
  "Authorization": f"Bearer {openai_api_key}"
102
  }
103
 
104
+ history = [construct_system(system_prompt), *history]
105
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
106
  payload = {
107
  "model": "gpt-3.5-turbo",
108
+ "messages": history, # [{"role": "user", "content": f"{inputs}"}],
109
  "temperature": temperature, # 1.0,
110
  "top_p": top_p, # 1.0,
111
  "n": 1,
 
113
  "presence_penalty": 0,
114
  "frequency_penalty": 0,
115
  }
116
+ if stream:
117
+ timeout = timeout_streaming
 
118
  else:
119
+ timeout = timeout_all
120
+ response = requests.post(API_URL, headers=headers, json=payload, stream=True, timeout=timeout)
121
+ return response
122
 
123
+ def stream_predict(openai_api_key, system_prompt, history, inputs, chatbot, previous_token_count, top_p, temperature):
124
+ def get_return_value():
125
+ return chatbot, history, status_text, [*previous_token_count, token_counter]
126
+ token_counter = 0
127
+ partial_words = ""
128
+ counter = 0
129
+ status_text = "OK"
130
+ history.append(construct_user(inputs))
131
  try:
132
+ response = get_response(openai_api_key, system_prompt, history, temperature, top_p, True)
133
+ except requests.exceptions.ConnectTimeout:
134
+ status_text = standard_error_msg + error_retrieve_prompt
135
+ yield get_return_value()
 
136
  return
137
 
138
+ chatbot.append((parse_text(inputs), ""))
139
+ yield get_return_value()
140
 
141
+ for chunk in response.iter_lines():
142
+ if counter == 0:
 
 
 
 
 
143
  counter += 1
144
+ continue
145
+ counter += 1
146
+ # check whether each line is non-empty
147
+ if chunk:
148
+ chunk = chunk.decode()
149
+ chunklength = len(chunk)
150
+ chunk = json.loads(chunk[6:])
151
+ # decode each line as response data is in bytes
152
+ if chunklength > 6 and "delta" in chunk['choices'][0]:
153
+ finish_reason = chunk['choices'][0]['finish_reason']
154
+ status_text = construct_token_message(sum(previous_token_count)+token_counter, stream=True)
155
+ if finish_reason == "stop":
156
+ yield get_return_value()
 
 
 
 
 
 
 
 
157
  break
158
+ partial_words = partial_words + chunk['choices'][0]["delta"]["content"]
 
 
 
 
159
  if token_counter == 0:
160
+ history.append(construct_assistant(" " + partial_words))
161
  else:
162
+ history[-1] = construct_assistant(partial_words)
163
+ chatbot[-1] = (parse_text(inputs), parse_text(partial_words))
164
  token_counter += 1
165
+ yield get_return_value()
 
 
 
 
 
 
 
 
 
 
 
166
 
167
 
168
+ def predict_all(openai_api_key, system_prompt, history, inputs, chatbot, previous_token_count, top_p, temperature):
169
+ history.append(construct_user(inputs))
170
  try:
171
+ response = get_response(openai_api_key, system_prompt, history, temperature, top_p, False)
172
+ except requests.exceptions.ConnectTimeout:
173
+ status_text = standard_error_msg + error_retrieve_prompt
174
+ return chatbot, history, status_text, previous_token_count
175
+ response = json.loads(response.text)
176
+ content = response["choices"][0]["message"]["content"]
177
+ history.append(construct_assistant(content))
178
+ chatbot.append((parse_text(inputs), parse_text(content)))
179
+ total_token_count = response["usage"]["total_tokens"]
180
+ previous_token_count.append(total_token_count - sum(previous_token_count))
181
+ status_text = construct_token_message(total_token_count)
182
+ return chatbot, history, status_text, previous_token_count
183
+
184
+
185
+ def predict(openai_api_key, system_prompt, history, inputs, chatbot, token_count, top_p, temperature, stream=False, should_check_token_count = True): # repetition_penalty, top_k
186
+ if stream:
187
+ iter = stream_predict(openai_api_key, system_prompt, history, inputs, chatbot, token_count, top_p, temperature)
188
+ for chatbot, history, status_text, token_count in iter:
189
+ yield chatbot, history, status_text, token_count
190
+ else:
191
+ chatbot, history, status_text, token_count = predict_all(openai_api_key, system_prompt, history, inputs, chatbot, token_count, top_p, temperature)
192
+ yield chatbot, history, status_text, token_count
193
+ if stream:
194
+ max_token = max_token_streaming
195
+ else:
196
+ max_token = max_token_all
197
+ if sum(token_count) > max_token and should_check_token_count:
198
+ iter = reduce_token_size(openai_api_key, system_prompt, history, chatbot, token_count, top_p, temperature, stream=False, hidden=True)
199
+ for chatbot, history, status_text, token_count in iter:
200
+ status_text = f"Token 达到上限,已自动降低Token计数至 {status_text}"
201
+ yield chatbot, history, status_text, token_count
202
+
203
+
204
+ def retry(openai_api_key, system_prompt, history, chatbot, token_count, top_p, temperature, stream=False):
205
+ if len(history) == 0:
206
+ yield chatbot, history, f"{standard_error_msg}上下文是空的", token_count
207
+ return
208
+ history.pop()
209
+ inputs = history.pop()["content"]
210
+ token_count.pop()
211
+ iter = predict(openai_api_key, system_prompt, history, inputs, chatbot, token_count, top_p, temperature, stream=stream)
212
+ for x in iter:
213
+ yield x
214
+
215
+
216
+ def reduce_token_size(openai_api_key, system_prompt, history, chatbot, token_count, top_p, temperature, stream=False, hidden=False):
217
+ iter = predict(openai_api_key, system_prompt, history, summarize_prompt, chatbot, token_count, top_p, temperature, stream=stream, should_check_token_count=False)
218
+ for chatbot, history, status_text, previous_token_count in iter:
219
+ history = history[-2:]
220
+ token_count = previous_token_count[-1:]
221
+ if hidden:
222
  chatbot.pop()
223
+ yield chatbot, history, construct_token_message(sum(token_count), stream=stream), token_count
224
+
225
+
226
+ def delete_last_conversation(chatbot, history, previous_token_count, streaming):
227
+ if len(chatbot) > 0 and standard_error_msg in chatbot[-1][1]:
228
+ chatbot.pop()
229
+ return chatbot, history
230
+ if len(history) > 0:
231
  history.pop()
232
  history.pop()
233
+ if len(chatbot) > 0:
234
  chatbot.pop()
235
+ if len(previous_token_count) > 0:
236
+ previous_token_count.pop()
237
+ return chatbot, history, previous_token_count, construct_token_message(sum(previous_token_count), streaming)
238
+
239
 
240
  def save_chat_history(filename, system, history, chatbot):
241
  if filename == "":
 
312
  return original_system_prompt
313
 
314
  def reset_state():
315
+ return [], [], [], construct_token_message(0)
316
 
317
  def compose_system(system_prompt):
318
  return {"role": "system", "content": system_prompt}