Spaces:
Running
Running
File size: 19,898 Bytes
91609d6 a9a4892 91609d6 a9a4892 91609d6 a9a4892 91609d6 03ba072 01a377d 91609d6 ab61418 91609d6 a1fe67d 9bd8511 91609d6 b0409b9 c37c49d 01a377d 8a5e8bc 01a377d 518385d 8a5e8bc 01a377d 518385d 01a377d b0409b9 40d91e9 01a377d 8a5e8bc 03ba072 9bd8511 01a377d 03ba072 b0409b9 40d91e9 03ba072 9f9848c 03ba072 9bd8511 01a377d 45c81cd b0409b9 40d91e9 03ba072 9bd8511 9f9848c 8a5e8bc 9f9848c 03ba072 9bd8511 01a377d 03ba072 b0409b9 40d91e9 03ba072 9bd8511 01a377d 45c81cd b0409b9 40d91e9 03ba072 96c1852 03ba072 9bd8511 03ba072 b0409b9 40d91e9 03ba072 96c1852 a1fe67d 03ba072 8a5e8bc 4b9078a 5102ec8 c43e22b 42eef1b 7842cf0 8a5e8bc a1fe67d 8a5e8bc 5c0a088 8a5e8bc a1fe67d 8a5e8bc 4b9078a 6aba339 9bd8511 6aba339 ab61418 6aba339 91609d6 9bd8511 91609d6 9bd8511 91609d6 8a5e8bc 6aba339 91609d6 6aba339 91609d6 9bd8511 91609d6 6aba339 91609d6 781ef44 91609d6 6aba339 91609d6 6aba339 91609d6 6aba339 91609d6 5353eba 91609d6 9bd8511 91609d6 8a5e8bc 91609d6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 |
"""
该文件中主要包含2个函数,是所有LLM的通用接口,它们会继续向下调用更底层的LLM模型,处理多模型并行等细节
不具备多线程能力的函数:正常对话时使用,具备完备的交互功能,不可多线程
1. predict(...)
具备多线程调用能力的函数:在函数插件中被调用,灵活而简洁
2. predict_no_ui_long_connection(...)
"""
import tiktoken
from functools import lru_cache
from concurrent.futures import ThreadPoolExecutor
from toolbox import get_conf, trimmed_format_exc
from .bridge_chatgpt import predict_no_ui_long_connection as chatgpt_noui
from .bridge_chatgpt import predict as chatgpt_ui
from .bridge_chatglm import predict_no_ui_long_connection as chatglm_noui
from .bridge_chatglm import predict as chatglm_ui
from .bridge_chatglm import predict_no_ui_long_connection as chatglm_noui
from .bridge_chatglm import predict as chatglm_ui
from .bridge_qianfan import predict_no_ui_long_connection as qianfan_noui
from .bridge_qianfan import predict as qianfan_ui
colors = ['#FF00FF', '#00FFFF', '#FF0000', '#990099', '#009999', '#990044']
class LazyloadTiktoken(object):
def __init__(self, model):
self.model = model
@staticmethod
@lru_cache(maxsize=128)
def get_encoder(model):
print('正在加载tokenizer,如果是第一次运行,可能需要一点时间下载参数')
tmp = tiktoken.encoding_for_model(model)
print('加载tokenizer完毕')
return tmp
def encode(self, *args, **kwargs):
encoder = self.get_encoder(self.model)
return encoder.encode(*args, **kwargs)
def decode(self, *args, **kwargs):
encoder = self.get_encoder(self.model)
return encoder.decode(*args, **kwargs)
# Endpoint 重定向
API_URL_REDIRECT, AZURE_ENDPOINT, AZURE_ENGINE = get_conf("API_URL_REDIRECT", "AZURE_ENDPOINT", "AZURE_ENGINE")
openai_endpoint = "https://api.openai.com/v1/chat/completions"
api2d_endpoint = "https://openai.api2d.net/v1/chat/completions"
newbing_endpoint = "wss://sydney.bing.com/sydney/ChatHub"
azure_endpoint = AZURE_ENDPOINT + f'openai/deployments/{AZURE_ENGINE}/chat/completions?api-version=2023-05-15'
# 兼容旧版的配置
try:
API_URL, = get_conf("API_URL")
if API_URL != "https://api.openai.com/v1/chat/completions":
openai_endpoint = API_URL
print("警告!API_URL配置选项将被弃用,请更换为API_URL_REDIRECT配置")
except:
pass
# 新版配置
if openai_endpoint in API_URL_REDIRECT: openai_endpoint = API_URL_REDIRECT[openai_endpoint]
if api2d_endpoint in API_URL_REDIRECT: api2d_endpoint = API_URL_REDIRECT[api2d_endpoint]
if newbing_endpoint in API_URL_REDIRECT: newbing_endpoint = API_URL_REDIRECT[newbing_endpoint]
# 获取tokenizer
tokenizer_gpt35 = LazyloadTiktoken("gpt-3.5-turbo")
tokenizer_gpt4 = LazyloadTiktoken("gpt-4")
get_token_num_gpt35 = lambda txt: len(tokenizer_gpt35.encode(txt, disallowed_special=()))
get_token_num_gpt4 = lambda txt: len(tokenizer_gpt4.encode(txt, disallowed_special=()))
# 开始初始化模型
AVAIL_LLM_MODELS, LLM_MODEL = get_conf("AVAIL_LLM_MODELS", "LLM_MODEL")
AVAIL_LLM_MODELS = AVAIL_LLM_MODELS + [LLM_MODEL]
# -=-=-=-=-=-=- 以下这部分是最早加入的最稳定的模型 -=-=-=-=-=-=-
model_info = {
# openai
"gpt-3.5-turbo": {
"fn_with_ui": chatgpt_ui,
"fn_without_ui": chatgpt_noui,
"endpoint": openai_endpoint,
"max_token": 4096,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
},
"gpt-3.5-turbo-16k": {
"fn_with_ui": chatgpt_ui,
"fn_without_ui": chatgpt_noui,
"endpoint": openai_endpoint,
"max_token": 1024*16,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
},
"gpt-3.5-turbo-0613": {
"fn_with_ui": chatgpt_ui,
"fn_without_ui": chatgpt_noui,
"endpoint": openai_endpoint,
"max_token": 4096,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
},
"gpt-3.5-turbo-16k-0613": {
"fn_with_ui": chatgpt_ui,
"fn_without_ui": chatgpt_noui,
"endpoint": openai_endpoint,
"max_token": 1024 * 16,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
},
"gpt-4": {
"fn_with_ui": chatgpt_ui,
"fn_without_ui": chatgpt_noui,
"endpoint": openai_endpoint,
"max_token": 8192,
"tokenizer": tokenizer_gpt4,
"token_cnt": get_token_num_gpt4,
},
# azure openai
"azure-gpt-3.5":{
"fn_with_ui": chatgpt_ui,
"fn_without_ui": chatgpt_noui,
"endpoint": azure_endpoint,
"max_token": 4096,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
},
# api_2d
"api2d-gpt-3.5-turbo": {
"fn_with_ui": chatgpt_ui,
"fn_without_ui": chatgpt_noui,
"endpoint": api2d_endpoint,
"max_token": 4096,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
},
"api2d-gpt-4": {
"fn_with_ui": chatgpt_ui,
"fn_without_ui": chatgpt_noui,
"endpoint": api2d_endpoint,
"max_token": 8192,
"tokenizer": tokenizer_gpt4,
"token_cnt": get_token_num_gpt4,
},
# 将 chatglm 直接对齐到 chatglm2
"chatglm": {
"fn_with_ui": chatglm_ui,
"fn_without_ui": chatglm_noui,
"endpoint": None,
"max_token": 1024,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
},
"chatglm2": {
"fn_with_ui": chatglm_ui,
"fn_without_ui": chatglm_noui,
"endpoint": None,
"max_token": 1024,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
},
"qianfan": {
"fn_with_ui": qianfan_ui,
"fn_without_ui": qianfan_noui,
"endpoint": None,
"max_token": 2000,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
},
}
# -=-=-=-=-=-=- 以下部分是新加入的模型,可能附带额外依赖 -=-=-=-=-=-=-
if "claude-1-100k" in AVAIL_LLM_MODELS or "claude-2" in AVAIL_LLM_MODELS:
from .bridge_claude import predict_no_ui_long_connection as claude_noui
from .bridge_claude import predict as claude_ui
model_info.update({
"claude-1-100k": {
"fn_with_ui": claude_ui,
"fn_without_ui": claude_noui,
"endpoint": None,
"max_token": 8196,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
},
})
model_info.update({
"claude-2": {
"fn_with_ui": claude_ui,
"fn_without_ui": claude_noui,
"endpoint": None,
"max_token": 8196,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
},
})
if "jittorllms_rwkv" in AVAIL_LLM_MODELS:
from .bridge_jittorllms_rwkv import predict_no_ui_long_connection as rwkv_noui
from .bridge_jittorllms_rwkv import predict as rwkv_ui
model_info.update({
"jittorllms_rwkv": {
"fn_with_ui": rwkv_ui,
"fn_without_ui": rwkv_noui,
"endpoint": None,
"max_token": 1024,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
},
})
if "jittorllms_llama" in AVAIL_LLM_MODELS:
from .bridge_jittorllms_llama import predict_no_ui_long_connection as llama_noui
from .bridge_jittorllms_llama import predict as llama_ui
model_info.update({
"jittorllms_llama": {
"fn_with_ui": llama_ui,
"fn_without_ui": llama_noui,
"endpoint": None,
"max_token": 1024,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
},
})
if "jittorllms_pangualpha" in AVAIL_LLM_MODELS:
from .bridge_jittorllms_pangualpha import predict_no_ui_long_connection as pangualpha_noui
from .bridge_jittorllms_pangualpha import predict as pangualpha_ui
model_info.update({
"jittorllms_pangualpha": {
"fn_with_ui": pangualpha_ui,
"fn_without_ui": pangualpha_noui,
"endpoint": None,
"max_token": 1024,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
},
})
if "moss" in AVAIL_LLM_MODELS:
from .bridge_moss import predict_no_ui_long_connection as moss_noui
from .bridge_moss import predict as moss_ui
model_info.update({
"moss": {
"fn_with_ui": moss_ui,
"fn_without_ui": moss_noui,
"endpoint": None,
"max_token": 1024,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
},
})
if "stack-claude" in AVAIL_LLM_MODELS:
from .bridge_stackclaude import predict_no_ui_long_connection as claude_noui
from .bridge_stackclaude import predict as claude_ui
model_info.update({
"stack-claude": {
"fn_with_ui": claude_ui,
"fn_without_ui": claude_noui,
"endpoint": None,
"max_token": 8192,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
}
})
if "newbing-free" in AVAIL_LLM_MODELS:
try:
from .bridge_newbingfree import predict_no_ui_long_connection as newbingfree_noui
from .bridge_newbingfree import predict as newbingfree_ui
model_info.update({
"newbing-free": {
"fn_with_ui": newbingfree_ui,
"fn_without_ui": newbingfree_noui,
"endpoint": newbing_endpoint,
"max_token": 4096,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
}
})
except:
print(trimmed_format_exc())
if "newbing" in AVAIL_LLM_MODELS: # same with newbing-free
try:
from .bridge_newbingfree import predict_no_ui_long_connection as newbingfree_noui
from .bridge_newbingfree import predict as newbingfree_ui
model_info.update({
"newbing": {
"fn_with_ui": newbingfree_ui,
"fn_without_ui": newbingfree_noui,
"endpoint": newbing_endpoint,
"max_token": 4096,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
}
})
except:
print(trimmed_format_exc())
if "chatglmft" in AVAIL_LLM_MODELS: # same with newbing-free
try:
from .bridge_chatglmft import predict_no_ui_long_connection as chatglmft_noui
from .bridge_chatglmft import predict as chatglmft_ui
model_info.update({
"chatglmft": {
"fn_with_ui": chatglmft_ui,
"fn_without_ui": chatglmft_noui,
"endpoint": None,
"max_token": 4096,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
}
})
except:
print(trimmed_format_exc())
if "internlm" in AVAIL_LLM_MODELS:
try:
from .bridge_internlm import predict_no_ui_long_connection as internlm_noui
from .bridge_internlm import predict as internlm_ui
model_info.update({
"internlm": {
"fn_with_ui": internlm_ui,
"fn_without_ui": internlm_noui,
"endpoint": None,
"max_token": 4096,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
}
})
except:
print(trimmed_format_exc())
if "chatglm_onnx" in AVAIL_LLM_MODELS:
try:
from .bridge_chatglmonnx import predict_no_ui_long_connection as chatglm_onnx_noui
from .bridge_chatglmonnx import predict as chatglm_onnx_ui
model_info.update({
"chatglm_onnx": {
"fn_with_ui": chatglm_onnx_ui,
"fn_without_ui": chatglm_onnx_noui,
"endpoint": None,
"max_token": 4096,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
}
})
except:
print(trimmed_format_exc())
if "qwen" in AVAIL_LLM_MODELS:
try:
from .bridge_qwen import predict_no_ui_long_connection as qwen_noui
from .bridge_qwen import predict as qwen_ui
model_info.update({
"qwen": {
"fn_with_ui": qwen_ui,
"fn_without_ui": qwen_noui,
"endpoint": None,
"max_token": 4096,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
}
})
except:
print(trimmed_format_exc())
if "chatgpt_website" in AVAIL_LLM_MODELS: # 接入一些逆向工程https://github.com/acheong08/ChatGPT-to-API/
try:
from .bridge_chatgpt_website import predict_no_ui_long_connection as chatgpt_website_noui
from .bridge_chatgpt_website import predict as chatgpt_website_ui
model_info.update({
"chatgpt_website": {
"fn_with_ui": chatgpt_website_ui,
"fn_without_ui": chatgpt_website_noui,
"endpoint": openai_endpoint,
"max_token": 4096,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
}
})
except:
print(trimmed_format_exc())
if "spark" in AVAIL_LLM_MODELS: # 讯飞星火认知大模型
try:
from .bridge_spark import predict_no_ui_long_connection as spark_noui
from .bridge_spark import predict as spark_ui
model_info.update({
"spark": {
"fn_with_ui": spark_ui,
"fn_without_ui": spark_noui,
"endpoint": None,
"max_token": 4096,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
}
})
except:
print(trimmed_format_exc())
if "sparkv2" in AVAIL_LLM_MODELS: # 讯飞星火认知大模型
try:
from .bridge_spark import predict_no_ui_long_connection as spark_noui
from .bridge_spark import predict as spark_ui
model_info.update({
"sparkv2": {
"fn_with_ui": spark_ui,
"fn_without_ui": spark_noui,
"endpoint": None,
"max_token": 4096,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
}
})
except:
print(trimmed_format_exc())
if "llama2" in AVAIL_LLM_MODELS: # llama2
try:
from .bridge_llama2 import predict_no_ui_long_connection as llama2_noui
from .bridge_llama2 import predict as llama2_ui
model_info.update({
"llama2": {
"fn_with_ui": llama2_ui,
"fn_without_ui": llama2_noui,
"endpoint": None,
"max_token": 4096,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
}
})
except:
print(trimmed_format_exc())
def LLM_CATCH_EXCEPTION(f):
"""
装饰器函数,将错误显示出来
"""
def decorated(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience):
try:
return f(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience)
except Exception as e:
tb_str = '\n```\n' + trimmed_format_exc() + '\n```\n'
observe_window[0] = tb_str
return tb_str
return decorated
def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience=False):
"""
发送至LLM,等待回复,一次性完成,不显示中间过程。但内部用stream的方法避免中途网线被掐。
inputs:
是本次问询的输入
sys_prompt:
系统静默prompt
llm_kwargs:
LLM的内部调优参数
history:
是之前的对话列表
observe_window = None:
用于负责跨越线程传递已经输出的部分,大部分时候仅仅为了fancy的视觉效果,留空即可。observe_window[0]:观测窗。observe_window[1]:看门狗
"""
import threading, time, copy
model = llm_kwargs['llm_model']
n_model = 1
if '&' not in model:
assert not model.startswith("tgui"), "TGUI不支持函数插件的实现"
# 如果只询问1个大语言模型:
method = model_info[model]["fn_without_ui"]
return method(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience)
else:
# 如果同时询问多个大语言模型,这个稍微啰嗦一点,但思路相同,您不必读这个else分支
executor = ThreadPoolExecutor(max_workers=4)
models = model.split('&')
n_model = len(models)
window_len = len(observe_window)
assert window_len==3
window_mutex = [["", time.time(), ""] for _ in range(n_model)] + [True]
futures = []
for i in range(n_model):
model = models[i]
method = model_info[model]["fn_without_ui"]
llm_kwargs_feedin = copy.deepcopy(llm_kwargs)
llm_kwargs_feedin['llm_model'] = model
future = executor.submit(LLM_CATCH_EXCEPTION(method), inputs, llm_kwargs_feedin, history, sys_prompt, window_mutex[i], console_slience)
futures.append(future)
def mutex_manager(window_mutex, observe_window):
while True:
time.sleep(0.25)
if not window_mutex[-1]: break
# 看门狗(watchdog)
for i in range(n_model):
window_mutex[i][1] = observe_window[1]
# 观察窗(window)
chat_string = []
for i in range(n_model):
chat_string.append( f"【{str(models[i])} 说】: <font color=\"{colors[i]}\"> {window_mutex[i][0]} </font>" )
res = '<br/><br/>\n\n---\n\n'.join(chat_string)
# # # # # # # # # # #
observe_window[0] = res
t_model = threading.Thread(target=mutex_manager, args=(window_mutex, observe_window), daemon=True)
t_model.start()
return_string_collect = []
while True:
worker_done = [h.done() for h in futures]
if all(worker_done):
executor.shutdown()
break
time.sleep(1)
for i, future in enumerate(futures): # wait and get
return_string_collect.append( f"【{str(models[i])} 说】: <font color=\"{colors[i]}\"> {future.result()} </font>" )
window_mutex[-1] = False # stop mutex thread
res = '<br/><br/>\n\n---\n\n'.join(return_string_collect)
return res
def predict(inputs, llm_kwargs, *args, **kwargs):
"""
发送至LLM,流式获取输出。
用于基础的对话功能。
inputs 是本次问询的输入
top_p, temperature是LLM的内部调优参数
history 是之前的对话列表(注意无论是inputs还是history,内容太长了都会触发token数量溢出的错误)
chatbot 为WebUI中显示的对话列表,修改它,然后yeild出去,可以直接修改对话界面内容
additional_fn代表点击的哪个按钮,按钮见functional.py
"""
method = model_info[llm_kwargs['llm_model']]["fn_with_ui"] # 如果这里报错,检查config中的AVAIL_LLM_MODELS选项
yield from method(inputs, llm_kwargs, *args, **kwargs)
|