Tuchuanhuhuhu commited on
Commit
86018c8
1 Parent(s): 60d042c

增加context上限滑动条

Browse files
Files changed (3) hide show
  1. ChuanhuChatbot.py +10 -1
  2. modules/base_model.py +4 -0
  3. modules/models.py +3 -0
ChuanhuChatbot.py CHANGED
@@ -189,13 +189,21 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
189
  value="",
190
  lines=1,
191
  )
 
 
 
 
 
 
 
 
192
  max_tokens_slider = gr.Slider(
193
  minimum=1,
194
  maximum=32768,
195
  value=1000,
196
  step=1,
197
  interactive=True,
198
- label="max tokens",
199
  )
200
  presence_penalty_slider = gr.Slider(
201
  minimum=-2.0,
@@ -379,6 +387,7 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
379
  downloadFile.change(**load_history_from_file_args)
380
 
381
  # Advanced
 
382
  temperature_slider.change(current_model.value.set_temperature, [temperature_slider], None)
383
  top_p_slider.change(current_model.value.set_top_p, [top_p_slider], None)
384
  n_choices_slider.change(current_model.value.set_n_choices, [n_choices_slider], None)
 
189
  value="",
190
  lines=1,
191
  )
192
+ max_context_length_slider = gr.Slider(
193
+ minimum=1,
194
+ maximum=32768,
195
+ value=2000,
196
+ step=1,
197
+ interactive=True,
198
+ label="max context",
199
+ )
200
  max_tokens_slider = gr.Slider(
201
  minimum=1,
202
  maximum=32768,
203
  value=1000,
204
  step=1,
205
  interactive=True,
206
+ label="max generations",
207
  )
208
  presence_penalty_slider = gr.Slider(
209
  minimum=-2.0,
 
387
  downloadFile.change(**load_history_from_file_args)
388
 
389
  # Advanced
390
+ max_context_length_slider.change(current_model.value.set_token_upper_limit, [max_context_length_slider], None)
391
  temperature_slider.change(current_model.value.set_temperature, [temperature_slider], None)
392
  top_p_slider.change(current_model.value.set_top_p, [top_p_slider], None)
393
  n_choices_slider.change(current_model.value.set_n_choices, [n_choices_slider], None)
modules/base_model.py CHANGED
@@ -357,6 +357,10 @@ class BaseLLMModel:
357
  def recover(self):
358
  self.interrupted = False
359
 
 
 
 
 
360
  def set_temperature(self, new_temperature):
361
  self.temperature = new_temperature
362
 
 
357
  def recover(self):
358
  self.interrupted = False
359
 
360
+ def set_token_upper_limit(self, new_upper_limit):
361
+ self.token_upper_limit = new_upper_limit
362
+ print(f"token上限设置为{new_upper_limit}")
363
+
364
  def set_temperature(self, new_temperature):
365
  self.temperature = new_temperature
366
 
modules/models.py CHANGED
@@ -97,6 +97,9 @@ class OpenAIClient(BaseLLMModel):
97
  logging.error(f"获取API使用情况失败:" + str(e))
98
  return STANDARD_ERROR_MSG + ERROR_RETRIEVE_MSG
99
 
 
 
 
100
  @shared.state.switching_api_key # 在不开启多账号模式的时候,这个装饰器不会起作用
101
  def _get_response(self, stream=False):
102
  openai_api_key = self.api_key
 
97
  logging.error(f"获取API使用情况失败:" + str(e))
98
  return STANDARD_ERROR_MSG + ERROR_RETRIEVE_MSG
99
 
100
+ def set_token_upper_limit(self, new_upper_limit):
101
+ pass
102
+
103
  @shared.state.switching_api_key # 在不开启多账号模式的时候,这个装饰器不会起作用
104
  def _get_response(self, stream=False):
105
  openai_api_key = self.api_key