Spaces:
Sleeping
Sleeping
Tuchuanhuhuhu
commited on
Commit
•
c26dfd8
1
Parent(s):
d4bd3ba
修复gitignore导致的文件缺失
Browse files- .gitignore +1 -1
- modules/models/StableLM.py +95 -0
- modules/models/base_model.py +561 -0
- modules/models/models.py +625 -0
.gitignore
CHANGED
@@ -140,7 +140,7 @@ dmypy.json
|
|
140 |
api_key.txt
|
141 |
config.json
|
142 |
auth.json
|
143 |
-
models/
|
144 |
lora/
|
145 |
.idea
|
146 |
templates/*
|
|
|
140 |
api_key.txt
|
141 |
config.json
|
142 |
auth.json
|
143 |
+
.models/
|
144 |
lora/
|
145 |
.idea
|
146 |
templates/*
|
modules/models/StableLM.py
ADDED
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline, StoppingCriteria, StoppingCriteriaList
|
3 |
+
import time
|
4 |
+
import numpy as np
|
5 |
+
from torch.nn import functional as F
|
6 |
+
import os
|
7 |
+
from base_model import BaseLLMModel
|
8 |
+
|
9 |
+
class StopOnTokens(StoppingCriteria):
|
10 |
+
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
|
11 |
+
stop_ids = [50278, 50279, 50277, 1, 0]
|
12 |
+
for stop_id in stop_ids:
|
13 |
+
if input_ids[0][-1] == stop_id:
|
14 |
+
return True
|
15 |
+
return False
|
16 |
+
|
17 |
+
class StableLM_Client(BaseLLMModel):
|
18 |
+
def __init__(self, model_name) -> None:
|
19 |
+
super().__init__(model_name=model_name)
|
20 |
+
print(f"Starting to load StableLM to memory")
|
21 |
+
self.model = AutoModelForCausalLM.from_pretrained(
|
22 |
+
"stabilityai/stablelm-tuned-alpha-7b", torch_dtype=torch.float16).cuda()
|
23 |
+
self.tokenizer = AutoTokenizer.from_pretrained("stabilityai/stablelm-tuned-alpha-7b")
|
24 |
+
self.generator = pipeline('text-generation', model=self.model, tokenizer=self.tokenizer, device=0)
|
25 |
+
print(f"Sucessfully loaded StableLM to the memory")
|
26 |
+
self.system_prompt = """StableAssistant
|
27 |
+
- StableAssistant is A helpful and harmless Open Source AI Language Model developed by Stability and CarperAI.
|
28 |
+
- StableAssistant is excited to be able to help the user, but will refuse to do anything that could be considered harmful to the user.
|
29 |
+
- StableAssistant is more than just an information source, StableAssistant is also able to write poetry, short stories, and make jokes.
|
30 |
+
- StableAssistant will refuse to participate in anything that could harm a human."""
|
31 |
+
|
32 |
+
def user(self, user_message, history):
|
33 |
+
history = history + [[user_message, ""]]
|
34 |
+
return "", history, history
|
35 |
+
|
36 |
+
|
37 |
+
def bot(self, history, curr_system_message):
|
38 |
+
messages = f"<|SYSTEM|># {self.system_prompt}" + \
|
39 |
+
"".join(["".join(["<|USER|>"+item[0], "<|ASSISTANT|>"+item[1]])
|
40 |
+
for item in history])
|
41 |
+
output = self.generate(messages)
|
42 |
+
history[-1][1] = output
|
43 |
+
time.sleep(1)
|
44 |
+
return history, history
|
45 |
+
|
46 |
+
def _get_stablelm_style_input(self):
|
47 |
+
messages = self.system_prompt + \
|
48 |
+
"".join(["".join(["<|USER|>"+self.history[i]["content"], "<|ASSISTANT|>"+self.history[i + 1]["content"]])
|
49 |
+
for i in range(0, len(self.history), 2)])
|
50 |
+
return messages
|
51 |
+
|
52 |
+
def generate(self, text, bad_text=None):
|
53 |
+
stop = StopOnTokens()
|
54 |
+
result = self.generator(text, max_new_tokens=1024, num_return_sequences=1, num_beams=1, do_sample=True,
|
55 |
+
temperature=1.0, top_p=0.95, top_k=1000, stopping_criteria=StoppingCriteriaList([stop]))
|
56 |
+
return result[0]["generated_text"].replace(text, "")
|
57 |
+
|
58 |
+
def contrastive_generate(self, text, bad_text):
|
59 |
+
with torch.no_grad():
|
60 |
+
tokens = self.tokenizer(text, return_tensors="pt")[
|
61 |
+
'input_ids'].cuda()[:, :4096-1024]
|
62 |
+
bad_tokens = self.tokenizer(bad_text, return_tensors="pt")[
|
63 |
+
'input_ids'].cuda()[:, :4096-1024]
|
64 |
+
history = None
|
65 |
+
bad_history = None
|
66 |
+
curr_output = list()
|
67 |
+
for i in range(1024):
|
68 |
+
out = self.model(tokens, past_key_values=history, use_cache=True)
|
69 |
+
logits = out.logits
|
70 |
+
history = out.past_key_values
|
71 |
+
bad_out = self.model(bad_tokens, past_key_values=bad_history,
|
72 |
+
use_cache=True)
|
73 |
+
bad_logits = bad_out.logits
|
74 |
+
bad_history = bad_out.past_key_values
|
75 |
+
probs = F.softmax(logits.float(), dim=-1)[0][-1].cpu()
|
76 |
+
bad_probs = F.softmax(bad_logits.float(), dim=-1)[0][-1].cpu()
|
77 |
+
logits = torch.log(probs)
|
78 |
+
bad_logits = torch.log(bad_probs)
|
79 |
+
logits[probs > 0.1] = logits[probs > 0.1] - bad_logits[probs > 0.1]
|
80 |
+
probs = F.softmax(logits)
|
81 |
+
out = int(torch.multinomial(probs, 1))
|
82 |
+
if out in [50278, 50279, 50277, 1, 0]:
|
83 |
+
break
|
84 |
+
else:
|
85 |
+
curr_output.append(out)
|
86 |
+
out = np.array([out])
|
87 |
+
tokens = torch.from_numpy(np.array([out])).to(
|
88 |
+
tokens.device)
|
89 |
+
bad_tokens = torch.from_numpy(np.array([out])).to(
|
90 |
+
tokens.device)
|
91 |
+
return self.tokenizer.decode(curr_output)
|
92 |
+
|
93 |
+
def get_answer_at_once(self):
|
94 |
+
messages = self._get_stablelm_style_input()
|
95 |
+
return self.generate(messages)
|
modules/models/base_model.py
ADDED
@@ -0,0 +1,561 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
from typing import TYPE_CHECKING, List
|
3 |
+
|
4 |
+
import logging
|
5 |
+
import json
|
6 |
+
import commentjson as cjson
|
7 |
+
import os
|
8 |
+
import sys
|
9 |
+
import requests
|
10 |
+
import urllib3
|
11 |
+
import traceback
|
12 |
+
|
13 |
+
from tqdm import tqdm
|
14 |
+
import colorama
|
15 |
+
from duckduckgo_search import ddg
|
16 |
+
import asyncio
|
17 |
+
import aiohttp
|
18 |
+
from enum import Enum
|
19 |
+
|
20 |
+
from ..presets import *
|
21 |
+
from ..llama_func import *
|
22 |
+
from ..utils import *
|
23 |
+
from .. import shared
|
24 |
+
from ..config import retrieve_proxy
|
25 |
+
|
26 |
+
|
27 |
+
class ModelType(Enum):
|
28 |
+
Unknown = -1
|
29 |
+
OpenAI = 0
|
30 |
+
ChatGLM = 1
|
31 |
+
LLaMA = 2
|
32 |
+
XMChat = 3
|
33 |
+
|
34 |
+
@classmethod
|
35 |
+
def get_type(cls, model_name: str):
|
36 |
+
model_type = None
|
37 |
+
model_name_lower = model_name.lower()
|
38 |
+
if "gpt" in model_name_lower:
|
39 |
+
model_type = ModelType.OpenAI
|
40 |
+
elif "chatglm" in model_name_lower:
|
41 |
+
model_type = ModelType.ChatGLM
|
42 |
+
elif "llama" in model_name_lower or "alpaca" in model_name_lower:
|
43 |
+
model_type = ModelType.LLaMA
|
44 |
+
elif "xmchat" in model_name_lower:
|
45 |
+
model_type = ModelType.XMChat
|
46 |
+
else:
|
47 |
+
model_type = ModelType.Unknown
|
48 |
+
return model_type
|
49 |
+
|
50 |
+
|
51 |
+
class BaseLLMModel:
|
52 |
+
def __init__(
|
53 |
+
self,
|
54 |
+
model_name,
|
55 |
+
system_prompt="",
|
56 |
+
temperature=1.0,
|
57 |
+
top_p=1.0,
|
58 |
+
n_choices=1,
|
59 |
+
stop=None,
|
60 |
+
max_generation_token=None,
|
61 |
+
presence_penalty=0,
|
62 |
+
frequency_penalty=0,
|
63 |
+
logit_bias=None,
|
64 |
+
user="",
|
65 |
+
) -> None:
|
66 |
+
self.history = []
|
67 |
+
self.all_token_counts = []
|
68 |
+
self.model_name = model_name
|
69 |
+
self.model_type = ModelType.get_type(model_name)
|
70 |
+
try:
|
71 |
+
self.token_upper_limit = MODEL_TOKEN_LIMIT[model_name]
|
72 |
+
except KeyError:
|
73 |
+
self.token_upper_limit = DEFAULT_TOKEN_LIMIT
|
74 |
+
self.interrupted = False
|
75 |
+
self.system_prompt = system_prompt
|
76 |
+
self.api_key = None
|
77 |
+
self.need_api_key = False
|
78 |
+
self.single_turn = False
|
79 |
+
|
80 |
+
self.temperature = temperature
|
81 |
+
self.top_p = top_p
|
82 |
+
self.n_choices = n_choices
|
83 |
+
self.stop_sequence = stop
|
84 |
+
self.max_generation_token = None
|
85 |
+
self.presence_penalty = presence_penalty
|
86 |
+
self.frequency_penalty = frequency_penalty
|
87 |
+
self.logit_bias = logit_bias
|
88 |
+
self.user_identifier = user
|
89 |
+
|
90 |
+
def get_answer_stream_iter(self):
|
91 |
+
"""stream predict, need to be implemented
|
92 |
+
conversations are stored in self.history, with the most recent question, in OpenAI format
|
93 |
+
should return a generator, each time give the next word (str) in the answer
|
94 |
+
"""
|
95 |
+
logging.warning("stream predict not implemented, using at once predict instead")
|
96 |
+
response, _ = self.get_answer_at_once()
|
97 |
+
yield response
|
98 |
+
|
99 |
+
def get_answer_at_once(self):
|
100 |
+
"""predict at once, need to be implemented
|
101 |
+
conversations are stored in self.history, with the most recent question, in OpenAI format
|
102 |
+
Should return:
|
103 |
+
the answer (str)
|
104 |
+
total token count (int)
|
105 |
+
"""
|
106 |
+
logging.warning("at once predict not implemented, using stream predict instead")
|
107 |
+
response_iter = self.get_answer_stream_iter()
|
108 |
+
count = 0
|
109 |
+
for response in response_iter:
|
110 |
+
count += 1
|
111 |
+
return response, sum(self.all_token_counts) + count
|
112 |
+
|
113 |
+
def billing_info(self):
|
114 |
+
"""get billing infomation, inplement if needed"""
|
115 |
+
logging.warning("billing info not implemented, using default")
|
116 |
+
return BILLING_NOT_APPLICABLE_MSG
|
117 |
+
|
118 |
+
def count_token(self, user_input):
|
119 |
+
"""get token count from input, implement if needed"""
|
120 |
+
logging.warning("token count not implemented, using default")
|
121 |
+
return len(user_input)
|
122 |
+
|
123 |
+
def stream_next_chatbot(self, inputs, chatbot, fake_input=None, display_append=""):
|
124 |
+
def get_return_value():
|
125 |
+
return chatbot, status_text
|
126 |
+
|
127 |
+
status_text = i18n("开始实时传输回答……")
|
128 |
+
if fake_input:
|
129 |
+
chatbot.append((fake_input, ""))
|
130 |
+
else:
|
131 |
+
chatbot.append((inputs, ""))
|
132 |
+
|
133 |
+
user_token_count = self.count_token(inputs)
|
134 |
+
self.all_token_counts.append(user_token_count)
|
135 |
+
logging.debug(f"输入token计数: {user_token_count}")
|
136 |
+
|
137 |
+
stream_iter = self.get_answer_stream_iter()
|
138 |
+
|
139 |
+
for partial_text in stream_iter:
|
140 |
+
chatbot[-1] = (chatbot[-1][0], partial_text + display_append)
|
141 |
+
self.all_token_counts[-1] += 1
|
142 |
+
status_text = self.token_message()
|
143 |
+
yield get_return_value()
|
144 |
+
if self.interrupted:
|
145 |
+
self.recover()
|
146 |
+
break
|
147 |
+
self.history.append(construct_assistant(partial_text))
|
148 |
+
|
149 |
+
def next_chatbot_at_once(self, inputs, chatbot, fake_input=None, display_append=""):
|
150 |
+
if fake_input:
|
151 |
+
chatbot.append((fake_input, ""))
|
152 |
+
else:
|
153 |
+
chatbot.append((inputs, ""))
|
154 |
+
if fake_input is not None:
|
155 |
+
user_token_count = self.count_token(fake_input)
|
156 |
+
else:
|
157 |
+
user_token_count = self.count_token(inputs)
|
158 |
+
self.all_token_counts.append(user_token_count)
|
159 |
+
ai_reply, total_token_count = self.get_answer_at_once()
|
160 |
+
self.history.append(construct_assistant(ai_reply))
|
161 |
+
if fake_input is not None:
|
162 |
+
self.history[-2] = construct_user(fake_input)
|
163 |
+
chatbot[-1] = (chatbot[-1][0], ai_reply + display_append)
|
164 |
+
if fake_input is not None:
|
165 |
+
self.all_token_counts[-1] += count_token(construct_assistant(ai_reply))
|
166 |
+
else:
|
167 |
+
self.all_token_counts[-1] = total_token_count - sum(self.all_token_counts)
|
168 |
+
status_text = self.token_message()
|
169 |
+
return chatbot, status_text
|
170 |
+
|
171 |
+
def handle_file_upload(self, files, chatbot):
|
172 |
+
"""if the model accepts multi modal input, implement this function"""
|
173 |
+
status = gr.Markdown.update()
|
174 |
+
if files:
|
175 |
+
construct_index(self.api_key, file_src=files)
|
176 |
+
status = "索引构建完成"
|
177 |
+
return gr.Files.update(), chatbot, status
|
178 |
+
|
179 |
+
def prepare_inputs(self, real_inputs, use_websearch, files, reply_language, chatbot):
|
180 |
+
fake_inputs = None
|
181 |
+
display_append = []
|
182 |
+
limited_context = False
|
183 |
+
fake_inputs = real_inputs
|
184 |
+
if files:
|
185 |
+
from llama_index.indices.vector_store.base_query import GPTVectorStoreIndexQuery
|
186 |
+
from llama_index.indices.query.schema import QueryBundle
|
187 |
+
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
|
188 |
+
from langchain.chat_models import ChatOpenAI
|
189 |
+
from llama_index import (
|
190 |
+
GPTSimpleVectorIndex,
|
191 |
+
ServiceContext,
|
192 |
+
LangchainEmbedding,
|
193 |
+
OpenAIEmbedding,
|
194 |
+
)
|
195 |
+
limited_context = True
|
196 |
+
msg = "加载索引中……"
|
197 |
+
logging.info(msg)
|
198 |
+
# yield chatbot + [(inputs, "")], msg
|
199 |
+
index = construct_index(self.api_key, file_src=files)
|
200 |
+
assert index is not None, "获取索引失败"
|
201 |
+
msg = "索引获取成功,生成回答中……"
|
202 |
+
logging.info(msg)
|
203 |
+
if local_embedding or self.model_type != ModelType.OpenAI:
|
204 |
+
embed_model = LangchainEmbedding(HuggingFaceEmbeddings(model_name = "sentence-transformers/distiluse-base-multilingual-cased-v2"))
|
205 |
+
else:
|
206 |
+
embed_model = OpenAIEmbedding()
|
207 |
+
# yield chatbot + [(inputs, "")], msg
|
208 |
+
with retrieve_proxy():
|
209 |
+
prompt_helper = PromptHelper(
|
210 |
+
max_input_size=4096,
|
211 |
+
num_output=5,
|
212 |
+
max_chunk_overlap=20,
|
213 |
+
chunk_size_limit=600,
|
214 |
+
)
|
215 |
+
from llama_index import ServiceContext
|
216 |
+
|
217 |
+
service_context = ServiceContext.from_defaults(
|
218 |
+
prompt_helper=prompt_helper, embed_model=embed_model
|
219 |
+
)
|
220 |
+
query_object = GPTVectorStoreIndexQuery(
|
221 |
+
index.index_struct,
|
222 |
+
service_context=service_context,
|
223 |
+
similarity_top_k=5,
|
224 |
+
vector_store=index._vector_store,
|
225 |
+
docstore=index._docstore,
|
226 |
+
)
|
227 |
+
query_bundle = QueryBundle(real_inputs)
|
228 |
+
nodes = query_object.retrieve(query_bundle)
|
229 |
+
reference_results = [n.node.text for n in nodes]
|
230 |
+
reference_results = add_source_numbers(reference_results, use_source=False)
|
231 |
+
display_append = add_details(reference_results)
|
232 |
+
display_append = "\n\n" + "".join(display_append)
|
233 |
+
real_inputs = (
|
234 |
+
replace_today(PROMPT_TEMPLATE)
|
235 |
+
.replace("{query_str}", real_inputs)
|
236 |
+
.replace("{context_str}", "\n\n".join(reference_results))
|
237 |
+
.replace("{reply_language}", reply_language)
|
238 |
+
)
|
239 |
+
elif use_websearch:
|
240 |
+
limited_context = True
|
241 |
+
search_results = ddg(real_inputs, max_results=5)
|
242 |
+
reference_results = []
|
243 |
+
for idx, result in enumerate(search_results):
|
244 |
+
logging.debug(f"搜索结果{idx + 1}:{result}")
|
245 |
+
domain_name = urllib3.util.parse_url(result["href"]).host
|
246 |
+
reference_results.append([result["body"], result["href"]])
|
247 |
+
display_append.append(
|
248 |
+
# f"{idx+1}. [{domain_name}]({result['href']})\n"
|
249 |
+
f"<li><a href=\"{result['href']}\" target=\"_blank\">{domain_name}</a></li>\n"
|
250 |
+
)
|
251 |
+
reference_results = add_source_numbers(reference_results)
|
252 |
+
display_append = "<ol>\n\n" + "".join(display_append) + "</ol>"
|
253 |
+
real_inputs = (
|
254 |
+
replace_today(WEBSEARCH_PTOMPT_TEMPLATE)
|
255 |
+
.replace("{query}", real_inputs)
|
256 |
+
.replace("{web_results}", "\n\n".join(reference_results))
|
257 |
+
.replace("{reply_language}", reply_language)
|
258 |
+
)
|
259 |
+
else:
|
260 |
+
display_append = ""
|
261 |
+
return limited_context, fake_inputs, display_append, real_inputs, chatbot
|
262 |
+
|
263 |
+
def predict(
|
264 |
+
self,
|
265 |
+
inputs,
|
266 |
+
chatbot,
|
267 |
+
stream=False,
|
268 |
+
use_websearch=False,
|
269 |
+
files=None,
|
270 |
+
reply_language="中文",
|
271 |
+
should_check_token_count=True,
|
272 |
+
): # repetition_penalty, top_k
|
273 |
+
|
274 |
+
status_text = "开始生成回答……"
|
275 |
+
logging.info(
|
276 |
+
"输入为:" + colorama.Fore.BLUE + f"{inputs}" + colorama.Style.RESET_ALL
|
277 |
+
)
|
278 |
+
if should_check_token_count:
|
279 |
+
yield chatbot + [(inputs, "")], status_text
|
280 |
+
if reply_language == "跟随问题语言(不稳定)":
|
281 |
+
reply_language = "the same language as the question, such as English, 中文, 日本語, Español, Français, or Deutsch."
|
282 |
+
|
283 |
+
limited_context, fake_inputs, display_append, inputs, chatbot = self.prepare_inputs(real_inputs=inputs, use_websearch=use_websearch, files=files, reply_language=reply_language, chatbot=chatbot)
|
284 |
+
yield chatbot + [(fake_inputs, "")], status_text
|
285 |
+
|
286 |
+
if (
|
287 |
+
self.need_api_key and
|
288 |
+
self.api_key is None
|
289 |
+
and not shared.state.multi_api_key
|
290 |
+
):
|
291 |
+
status_text = STANDARD_ERROR_MSG + NO_APIKEY_MSG
|
292 |
+
logging.info(status_text)
|
293 |
+
chatbot.append((inputs, ""))
|
294 |
+
if len(self.history) == 0:
|
295 |
+
self.history.append(construct_user(inputs))
|
296 |
+
self.history.append("")
|
297 |
+
self.all_token_counts.append(0)
|
298 |
+
else:
|
299 |
+
self.history[-2] = construct_user(inputs)
|
300 |
+
yield chatbot + [(inputs, "")], status_text
|
301 |
+
return
|
302 |
+
elif len(inputs.strip()) == 0:
|
303 |
+
status_text = STANDARD_ERROR_MSG + NO_INPUT_MSG
|
304 |
+
logging.info(status_text)
|
305 |
+
yield chatbot + [(inputs, "")], status_text
|
306 |
+
return
|
307 |
+
|
308 |
+
if self.single_turn:
|
309 |
+
self.history = []
|
310 |
+
self.all_token_counts = []
|
311 |
+
self.history.append(construct_user(inputs))
|
312 |
+
|
313 |
+
try:
|
314 |
+
if stream:
|
315 |
+
logging.debug("使用流式传输")
|
316 |
+
iter = self.stream_next_chatbot(
|
317 |
+
inputs,
|
318 |
+
chatbot,
|
319 |
+
fake_input=fake_inputs,
|
320 |
+
display_append=display_append,
|
321 |
+
)
|
322 |
+
for chatbot, status_text in iter:
|
323 |
+
yield chatbot, status_text
|
324 |
+
else:
|
325 |
+
logging.debug("不使用流式传输")
|
326 |
+
chatbot, status_text = self.next_chatbot_at_once(
|
327 |
+
inputs,
|
328 |
+
chatbot,
|
329 |
+
fake_input=fake_inputs,
|
330 |
+
display_append=display_append,
|
331 |
+
)
|
332 |
+
yield chatbot, status_text
|
333 |
+
except Exception as e:
|
334 |
+
traceback.print_exc()
|
335 |
+
status_text = STANDARD_ERROR_MSG + str(e)
|
336 |
+
yield chatbot, status_text
|
337 |
+
|
338 |
+
if len(self.history) > 1 and self.history[-1]["content"] != inputs:
|
339 |
+
logging.info(
|
340 |
+
"回答为:"
|
341 |
+
+ colorama.Fore.BLUE
|
342 |
+
+ f"{self.history[-1]['content']}"
|
343 |
+
+ colorama.Style.RESET_ALL
|
344 |
+
)
|
345 |
+
|
346 |
+
if limited_context:
|
347 |
+
# self.history = self.history[-4:]
|
348 |
+
# self.all_token_counts = self.all_token_counts[-2:]
|
349 |
+
self.history = []
|
350 |
+
self.all_token_counts = []
|
351 |
+
|
352 |
+
max_token = self.token_upper_limit - TOKEN_OFFSET
|
353 |
+
|
354 |
+
if sum(self.all_token_counts) > max_token and should_check_token_count:
|
355 |
+
count = 0
|
356 |
+
while (
|
357 |
+
sum(self.all_token_counts)
|
358 |
+
> self.token_upper_limit * REDUCE_TOKEN_FACTOR
|
359 |
+
and sum(self.all_token_counts) > 0
|
360 |
+
):
|
361 |
+
count += 1
|
362 |
+
del self.all_token_counts[0]
|
363 |
+
del self.history[:2]
|
364 |
+
logging.info(status_text)
|
365 |
+
status_text = f"为了防止token超限,模型忘记了早期的 {count} 轮对话"
|
366 |
+
yield chatbot, status_text
|
367 |
+
|
368 |
+
def retry(
|
369 |
+
self,
|
370 |
+
chatbot,
|
371 |
+
stream=False,
|
372 |
+
use_websearch=False,
|
373 |
+
files=None,
|
374 |
+
reply_language="中文",
|
375 |
+
):
|
376 |
+
logging.debug("重试中……")
|
377 |
+
if len(self.history) > 0:
|
378 |
+
inputs = self.history[-2]["content"]
|
379 |
+
del self.history[-2:]
|
380 |
+
self.all_token_counts.pop()
|
381 |
+
elif len(chatbot) > 0:
|
382 |
+
inputs = chatbot[-1][0]
|
383 |
+
else:
|
384 |
+
yield chatbot, f"{STANDARD_ERROR_MSG}上下文是空的"
|
385 |
+
return
|
386 |
+
|
387 |
+
iter = self.predict(
|
388 |
+
inputs,
|
389 |
+
chatbot,
|
390 |
+
stream=stream,
|
391 |
+
use_websearch=use_websearch,
|
392 |
+
files=files,
|
393 |
+
reply_language=reply_language,
|
394 |
+
)
|
395 |
+
for x in iter:
|
396 |
+
yield x
|
397 |
+
logging.debug("重试完毕")
|
398 |
+
|
399 |
+
# def reduce_token_size(self, chatbot):
|
400 |
+
# logging.info("开始减少token数量……")
|
401 |
+
# chatbot, status_text = self.next_chatbot_at_once(
|
402 |
+
# summarize_prompt,
|
403 |
+
# chatbot
|
404 |
+
# )
|
405 |
+
# max_token_count = self.token_upper_limit * REDUCE_TOKEN_FACTOR
|
406 |
+
# num_chat = find_n(self.all_token_counts, max_token_count)
|
407 |
+
# logging.info(f"previous_token_count: {self.all_token_counts}, keeping {num_chat} chats")
|
408 |
+
# chatbot = chatbot[:-1]
|
409 |
+
# self.history = self.history[-2*num_chat:] if num_chat > 0 else []
|
410 |
+
# self.all_token_counts = self.all_token_counts[-num_chat:] if num_chat > 0 else []
|
411 |
+
# msg = f"保留了最近{num_chat}轮对话"
|
412 |
+
# logging.info(msg)
|
413 |
+
# logging.info("减少token数量完毕")
|
414 |
+
# return chatbot, msg + "," + self.token_message(self.all_token_counts if len(self.all_token_counts) > 0 else [0])
|
415 |
+
|
416 |
+
def interrupt(self):
|
417 |
+
self.interrupted = True
|
418 |
+
|
419 |
+
def recover(self):
|
420 |
+
self.interrupted = False
|
421 |
+
|
422 |
+
def set_token_upper_limit(self, new_upper_limit):
|
423 |
+
self.token_upper_limit = new_upper_limit
|
424 |
+
print(f"token上限设置为{new_upper_limit}")
|
425 |
+
|
426 |
+
def set_temperature(self, new_temperature):
|
427 |
+
self.temperature = new_temperature
|
428 |
+
|
429 |
+
def set_top_p(self, new_top_p):
|
430 |
+
self.top_p = new_top_p
|
431 |
+
|
432 |
+
def set_n_choices(self, new_n_choices):
|
433 |
+
self.n_choices = new_n_choices
|
434 |
+
|
435 |
+
def set_stop_sequence(self, new_stop_sequence: str):
|
436 |
+
new_stop_sequence = new_stop_sequence.split(",")
|
437 |
+
self.stop_sequence = new_stop_sequence
|
438 |
+
|
439 |
+
def set_max_tokens(self, new_max_tokens):
|
440 |
+
self.max_generation_token = new_max_tokens
|
441 |
+
|
442 |
+
def set_presence_penalty(self, new_presence_penalty):
|
443 |
+
self.presence_penalty = new_presence_penalty
|
444 |
+
|
445 |
+
def set_frequency_penalty(self, new_frequency_penalty):
|
446 |
+
self.frequency_penalty = new_frequency_penalty
|
447 |
+
|
448 |
+
def set_logit_bias(self, logit_bias):
|
449 |
+
logit_bias = logit_bias.split()
|
450 |
+
bias_map = {}
|
451 |
+
encoding = tiktoken.get_encoding("cl100k_base")
|
452 |
+
for line in logit_bias:
|
453 |
+
word, bias_amount = line.split(":")
|
454 |
+
if word:
|
455 |
+
for token in encoding.encode(word):
|
456 |
+
bias_map[token] = float(bias_amount)
|
457 |
+
self.logit_bias = bias_map
|
458 |
+
|
459 |
+
def set_user_identifier(self, new_user_identifier):
|
460 |
+
self.user_identifier = new_user_identifier
|
461 |
+
|
462 |
+
def set_system_prompt(self, new_system_prompt):
|
463 |
+
self.system_prompt = new_system_prompt
|
464 |
+
|
465 |
+
def set_key(self, new_access_key):
|
466 |
+
self.api_key = new_access_key.strip()
|
467 |
+
msg = i18n("API密钥更改为了") + hide_middle_chars(self.api_key)
|
468 |
+
logging.info(msg)
|
469 |
+
return self.api_key, msg
|
470 |
+
|
471 |
+
def set_single_turn(self, new_single_turn):
|
472 |
+
self.single_turn = new_single_turn
|
473 |
+
|
474 |
+
def reset(self):
|
475 |
+
self.history = []
|
476 |
+
self.all_token_counts = []
|
477 |
+
self.interrupted = False
|
478 |
+
return [], self.token_message([0])
|
479 |
+
|
480 |
+
def delete_first_conversation(self):
|
481 |
+
if self.history:
|
482 |
+
del self.history[:2]
|
483 |
+
del self.all_token_counts[0]
|
484 |
+
return self.token_message()
|
485 |
+
|
486 |
+
def delete_last_conversation(self, chatbot):
|
487 |
+
if len(chatbot) > 0 and STANDARD_ERROR_MSG in chatbot[-1][1]:
|
488 |
+
msg = "由于包含报错信息,只删除chatbot记录"
|
489 |
+
chatbot.pop()
|
490 |
+
return chatbot, self.history
|
491 |
+
if len(self.history) > 0:
|
492 |
+
self.history.pop()
|
493 |
+
self.history.pop()
|
494 |
+
if len(chatbot) > 0:
|
495 |
+
msg = "删除了一组chatbot对话"
|
496 |
+
chatbot.pop()
|
497 |
+
if len(self.all_token_counts) > 0:
|
498 |
+
msg = "删除了一组对话的token计数记录"
|
499 |
+
self.all_token_counts.pop()
|
500 |
+
msg = "删除了一组对话"
|
501 |
+
return chatbot, msg
|
502 |
+
|
503 |
+
def token_message(self, token_lst=None):
|
504 |
+
if token_lst is None:
|
505 |
+
token_lst = self.all_token_counts
|
506 |
+
token_sum = 0
|
507 |
+
for i in range(len(token_lst)):
|
508 |
+
token_sum += sum(token_lst[: i + 1])
|
509 |
+
return i18n("Token 计数: ") + f"{sum(token_lst)}" + i18n(",本次对话累计消耗了 ") + f"{token_sum} tokens"
|
510 |
+
|
511 |
+
def save_chat_history(self, filename, chatbot, user_name):
|
512 |
+
if filename == "":
|
513 |
+
return
|
514 |
+
if not filename.endswith(".json"):
|
515 |
+
filename += ".json"
|
516 |
+
return save_file(filename, self.system_prompt, self.history, chatbot, user_name)
|
517 |
+
|
518 |
+
def export_markdown(self, filename, chatbot, user_name):
|
519 |
+
if filename == "":
|
520 |
+
return
|
521 |
+
if not filename.endswith(".md"):
|
522 |
+
filename += ".md"
|
523 |
+
return save_file(filename, self.system_prompt, self.history, chatbot, user_name)
|
524 |
+
|
525 |
+
def load_chat_history(self, filename, chatbot, user_name):
|
526 |
+
logging.debug(f"{user_name} 加载对话历史中……")
|
527 |
+
if type(filename) != str:
|
528 |
+
filename = filename.name
|
529 |
+
try:
|
530 |
+
with open(os.path.join(HISTORY_DIR, user_name, filename), "r") as f:
|
531 |
+
json_s = json.load(f)
|
532 |
+
try:
|
533 |
+
if type(json_s["history"][0]) == str:
|
534 |
+
logging.info("历史记录格式为旧版,正在转换……")
|
535 |
+
new_history = []
|
536 |
+
for index, item in enumerate(json_s["history"]):
|
537 |
+
if index % 2 == 0:
|
538 |
+
new_history.append(construct_user(item))
|
539 |
+
else:
|
540 |
+
new_history.append(construct_assistant(item))
|
541 |
+
json_s["history"] = new_history
|
542 |
+
logging.info(new_history)
|
543 |
+
except:
|
544 |
+
# 没有对话历史
|
545 |
+
pass
|
546 |
+
logging.debug(f"{user_name} 加载对话历史完毕")
|
547 |
+
self.history = json_s["history"]
|
548 |
+
return filename, json_s["system"], json_s["chatbot"]
|
549 |
+
except FileNotFoundError:
|
550 |
+
logging.warning(f"{user_name} 没有找到对话历史文件,不执行任何操作")
|
551 |
+
return filename, self.system_prompt, chatbot
|
552 |
+
|
553 |
+
def like(self):
|
554 |
+
"""like the last response, implement if needed
|
555 |
+
"""
|
556 |
+
return gr.update()
|
557 |
+
|
558 |
+
def dislike(self):
|
559 |
+
"""dislike the last response, implement if needed
|
560 |
+
"""
|
561 |
+
return gr.update()
|
modules/models/models.py
ADDED
@@ -0,0 +1,625 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
from typing import TYPE_CHECKING, List
|
3 |
+
|
4 |
+
import logging
|
5 |
+
import json
|
6 |
+
import commentjson as cjson
|
7 |
+
import os
|
8 |
+
import sys
|
9 |
+
import requests
|
10 |
+
import urllib3
|
11 |
+
import platform
|
12 |
+
import base64
|
13 |
+
from io import BytesIO
|
14 |
+
from PIL import Image
|
15 |
+
|
16 |
+
from tqdm import tqdm
|
17 |
+
import colorama
|
18 |
+
from duckduckgo_search import ddg
|
19 |
+
import asyncio
|
20 |
+
import aiohttp
|
21 |
+
from enum import Enum
|
22 |
+
import uuid
|
23 |
+
|
24 |
+
from ..presets import *
|
25 |
+
from ..llama_func import *
|
26 |
+
from ..utils import *
|
27 |
+
from .. import shared
|
28 |
+
from ..config import retrieve_proxy
|
29 |
+
from modules import config
|
30 |
+
from .base_model import BaseLLMModel, ModelType
|
31 |
+
|
32 |
+
|
33 |
+
class OpenAIClient(BaseLLMModel):
|
34 |
+
def __init__(
|
35 |
+
self,
|
36 |
+
model_name,
|
37 |
+
api_key,
|
38 |
+
system_prompt=INITIAL_SYSTEM_PROMPT,
|
39 |
+
temperature=1.0,
|
40 |
+
top_p=1.0,
|
41 |
+
) -> None:
|
42 |
+
super().__init__(
|
43 |
+
model_name=model_name,
|
44 |
+
temperature=temperature,
|
45 |
+
top_p=top_p,
|
46 |
+
system_prompt=system_prompt,
|
47 |
+
)
|
48 |
+
self.api_key = api_key
|
49 |
+
self.need_api_key = True
|
50 |
+
self._refresh_header()
|
51 |
+
|
52 |
+
def get_answer_stream_iter(self):
|
53 |
+
response = self._get_response(stream=True)
|
54 |
+
if response is not None:
|
55 |
+
iter = self._decode_chat_response(response)
|
56 |
+
partial_text = ""
|
57 |
+
for i in iter:
|
58 |
+
partial_text += i
|
59 |
+
yield partial_text
|
60 |
+
else:
|
61 |
+
yield STANDARD_ERROR_MSG + GENERAL_ERROR_MSG
|
62 |
+
|
63 |
+
def get_answer_at_once(self):
|
64 |
+
response = self._get_response()
|
65 |
+
response = json.loads(response.text)
|
66 |
+
content = response["choices"][0]["message"]["content"]
|
67 |
+
total_token_count = response["usage"]["total_tokens"]
|
68 |
+
return content, total_token_count
|
69 |
+
|
70 |
+
def count_token(self, user_input):
|
71 |
+
input_token_count = count_token(construct_user(user_input))
|
72 |
+
if self.system_prompt is not None and len(self.all_token_counts) == 0:
|
73 |
+
system_prompt_token_count = count_token(
|
74 |
+
construct_system(self.system_prompt)
|
75 |
+
)
|
76 |
+
return input_token_count + system_prompt_token_count
|
77 |
+
return input_token_count
|
78 |
+
|
79 |
+
def billing_info(self):
|
80 |
+
try:
|
81 |
+
curr_time = datetime.datetime.now()
|
82 |
+
last_day_of_month = get_last_day_of_month(
|
83 |
+
curr_time).strftime("%Y-%m-%d")
|
84 |
+
first_day_of_month = curr_time.replace(day=1).strftime("%Y-%m-%d")
|
85 |
+
usage_url = f"{shared.state.usage_api_url}?start_date={first_day_of_month}&end_date={last_day_of_month}"
|
86 |
+
try:
|
87 |
+
usage_data = self._get_billing_data(usage_url)
|
88 |
+
except Exception as e:
|
89 |
+
logging.error(f"获取API使用情况失败:" + str(e))
|
90 |
+
return i18n("**获取API使用情况失败**")
|
91 |
+
rounded_usage = "{:.5f}".format(usage_data["total_usage"] / 100)
|
92 |
+
return i18n("**本月使用金额** ") + f"\u3000 ${rounded_usage}"
|
93 |
+
except requests.exceptions.ConnectTimeout:
|
94 |
+
status_text = (
|
95 |
+
STANDARD_ERROR_MSG + CONNECTION_TIMEOUT_MSG + ERROR_RETRIEVE_MSG
|
96 |
+
)
|
97 |
+
return status_text
|
98 |
+
except requests.exceptions.ReadTimeout:
|
99 |
+
status_text = STANDARD_ERROR_MSG + READ_TIMEOUT_MSG + ERROR_RETRIEVE_MSG
|
100 |
+
return status_text
|
101 |
+
except Exception as e:
|
102 |
+
import traceback
|
103 |
+
traceback.print_exc()
|
104 |
+
logging.error(i18n("获取API使用情况失败:") + str(e))
|
105 |
+
return STANDARD_ERROR_MSG + ERROR_RETRIEVE_MSG
|
106 |
+
|
107 |
+
def set_token_upper_limit(self, new_upper_limit):
|
108 |
+
pass
|
109 |
+
|
110 |
+
@shared.state.switching_api_key # 在不开启多账号模式的时候,这个装饰器不会起作用
|
111 |
+
def _get_response(self, stream=False):
|
112 |
+
openai_api_key = self.api_key
|
113 |
+
system_prompt = self.system_prompt
|
114 |
+
history = self.history
|
115 |
+
logging.debug(colorama.Fore.YELLOW +
|
116 |
+
f"{history}" + colorama.Fore.RESET)
|
117 |
+
headers = {
|
118 |
+
"Content-Type": "application/json",
|
119 |
+
"Authorization": f"Bearer {openai_api_key}",
|
120 |
+
}
|
121 |
+
|
122 |
+
if system_prompt is not None:
|
123 |
+
history = [construct_system(system_prompt), *history]
|
124 |
+
|
125 |
+
payload = {
|
126 |
+
"model": self.model_name,
|
127 |
+
"messages": history,
|
128 |
+
"temperature": self.temperature,
|
129 |
+
"top_p": self.top_p,
|
130 |
+
"n": self.n_choices,
|
131 |
+
"stream": stream,
|
132 |
+
"presence_penalty": self.presence_penalty,
|
133 |
+
"frequency_penalty": self.frequency_penalty,
|
134 |
+
}
|
135 |
+
|
136 |
+
if self.max_generation_token is not None:
|
137 |
+
payload["max_tokens"] = self.max_generation_token
|
138 |
+
if self.stop_sequence is not None:
|
139 |
+
payload["stop"] = self.stop_sequence
|
140 |
+
if self.logit_bias is not None:
|
141 |
+
payload["logit_bias"] = self.logit_bias
|
142 |
+
if self.user_identifier is not None:
|
143 |
+
payload["user"] = self.user_identifier
|
144 |
+
|
145 |
+
if stream:
|
146 |
+
timeout = TIMEOUT_STREAMING
|
147 |
+
else:
|
148 |
+
timeout = TIMEOUT_ALL
|
149 |
+
|
150 |
+
# 如果有自定义的api-host,使用自定义host发送请求,否则使用默认设置发送请求
|
151 |
+
if shared.state.completion_url != COMPLETION_URL:
|
152 |
+
logging.info(f"使用自定义API URL: {shared.state.completion_url}")
|
153 |
+
|
154 |
+
with retrieve_proxy():
|
155 |
+
try:
|
156 |
+
response = requests.post(
|
157 |
+
shared.state.completion_url,
|
158 |
+
headers=headers,
|
159 |
+
json=payload,
|
160 |
+
stream=stream,
|
161 |
+
timeout=timeout,
|
162 |
+
)
|
163 |
+
except:
|
164 |
+
return None
|
165 |
+
return response
|
166 |
+
|
167 |
+
def _refresh_header(self):
|
168 |
+
self.headers = {
|
169 |
+
"Content-Type": "application/json",
|
170 |
+
"Authorization": f"Bearer {self.api_key}",
|
171 |
+
}
|
172 |
+
|
173 |
+
def _get_billing_data(self, billing_url):
|
174 |
+
with retrieve_proxy():
|
175 |
+
response = requests.get(
|
176 |
+
billing_url,
|
177 |
+
headers=self.headers,
|
178 |
+
timeout=TIMEOUT_ALL,
|
179 |
+
)
|
180 |
+
|
181 |
+
if response.status_code == 200:
|
182 |
+
data = response.json()
|
183 |
+
return data
|
184 |
+
else:
|
185 |
+
raise Exception(
|
186 |
+
f"API request failed with status code {response.status_code}: {response.text}"
|
187 |
+
)
|
188 |
+
|
189 |
+
def _decode_chat_response(self, response):
|
190 |
+
error_msg = ""
|
191 |
+
for chunk in response.iter_lines():
|
192 |
+
if chunk:
|
193 |
+
chunk = chunk.decode()
|
194 |
+
chunk_length = len(chunk)
|
195 |
+
try:
|
196 |
+
chunk = json.loads(chunk[6:])
|
197 |
+
except json.JSONDecodeError:
|
198 |
+
print(i18n("JSON解析错误,收到的内容: ") + f"{chunk}")
|
199 |
+
error_msg += chunk
|
200 |
+
continue
|
201 |
+
if chunk_length > 6 and "delta" in chunk["choices"][0]:
|
202 |
+
if chunk["choices"][0]["finish_reason"] == "stop":
|
203 |
+
break
|
204 |
+
try:
|
205 |
+
yield chunk["choices"][0]["delta"]["content"]
|
206 |
+
except Exception as e:
|
207 |
+
# logging.error(f"Error: {e}")
|
208 |
+
continue
|
209 |
+
if error_msg:
|
210 |
+
raise Exception(error_msg)
|
211 |
+
|
212 |
+
def set_key(self, new_access_key):
|
213 |
+
ret = super().set_key(new_access_key)
|
214 |
+
self._refresh_header()
|
215 |
+
return ret
|
216 |
+
|
217 |
+
|
218 |
+
class ChatGLM_Client(BaseLLMModel):
|
219 |
+
def __init__(self, model_name) -> None:
|
220 |
+
super().__init__(model_name=model_name)
|
221 |
+
from transformers import AutoTokenizer, AutoModel
|
222 |
+
import torch
|
223 |
+
global CHATGLM_TOKENIZER, CHATGLM_MODEL
|
224 |
+
if CHATGLM_TOKENIZER is None or CHATGLM_MODEL is None:
|
225 |
+
system_name = platform.system()
|
226 |
+
model_path = None
|
227 |
+
if os.path.exists("models"):
|
228 |
+
model_dirs = os.listdir("models")
|
229 |
+
if model_name in model_dirs:
|
230 |
+
model_path = f"models/{model_name}"
|
231 |
+
if model_path is not None:
|
232 |
+
model_source = model_path
|
233 |
+
else:
|
234 |
+
model_source = f"THUDM/{model_name}"
|
235 |
+
CHATGLM_TOKENIZER = AutoTokenizer.from_pretrained(
|
236 |
+
model_source, trust_remote_code=True
|
237 |
+
)
|
238 |
+
quantified = False
|
239 |
+
if "int4" in model_name:
|
240 |
+
quantified = True
|
241 |
+
model = AutoModel.from_pretrained(
|
242 |
+
model_source, trust_remote_code=True
|
243 |
+
)
|
244 |
+
if torch.cuda.is_available():
|
245 |
+
# run on CUDA
|
246 |
+
logging.info("CUDA is available, using CUDA")
|
247 |
+
model = model.half().cuda()
|
248 |
+
# mps加速还存在一些问题,暂时不使用
|
249 |
+
elif system_name == "Darwin" and model_path is not None and not quantified:
|
250 |
+
logging.info("Running on macOS, using MPS")
|
251 |
+
# running on macOS and model already downloaded
|
252 |
+
model = model.half().to("mps")
|
253 |
+
else:
|
254 |
+
logging.info("GPU is not available, using CPU")
|
255 |
+
model = model.float()
|
256 |
+
model = model.eval()
|
257 |
+
CHATGLM_MODEL = model
|
258 |
+
|
259 |
+
def _get_glm_style_input(self):
|
260 |
+
history = [x["content"] for x in self.history]
|
261 |
+
query = history.pop()
|
262 |
+
logging.debug(colorama.Fore.YELLOW +
|
263 |
+
f"{history}" + colorama.Fore.RESET)
|
264 |
+
assert (
|
265 |
+
len(history) % 2 == 0
|
266 |
+
), f"History should be even length. current history is: {history}"
|
267 |
+
history = [[history[i], history[i + 1]]
|
268 |
+
for i in range(0, len(history), 2)]
|
269 |
+
return history, query
|
270 |
+
|
271 |
+
def get_answer_at_once(self):
|
272 |
+
history, query = self._get_glm_style_input()
|
273 |
+
response, _ = CHATGLM_MODEL.chat(
|
274 |
+
CHATGLM_TOKENIZER, query, history=history)
|
275 |
+
return response, len(response)
|
276 |
+
|
277 |
+
def get_answer_stream_iter(self):
|
278 |
+
history, query = self._get_glm_style_input()
|
279 |
+
for response, history in CHATGLM_MODEL.stream_chat(
|
280 |
+
CHATGLM_TOKENIZER,
|
281 |
+
query,
|
282 |
+
history,
|
283 |
+
max_length=self.token_upper_limit,
|
284 |
+
top_p=self.top_p,
|
285 |
+
temperature=self.temperature,
|
286 |
+
):
|
287 |
+
yield response
|
288 |
+
|
289 |
+
|
290 |
+
class LLaMA_Client(BaseLLMModel):
|
291 |
+
def __init__(
|
292 |
+
self,
|
293 |
+
model_name,
|
294 |
+
lora_path=None,
|
295 |
+
) -> None:
|
296 |
+
super().__init__(model_name=model_name)
|
297 |
+
from lmflow.datasets.dataset import Dataset
|
298 |
+
from lmflow.pipeline.auto_pipeline import AutoPipeline
|
299 |
+
from lmflow.models.auto_model import AutoModel
|
300 |
+
from lmflow.args import ModelArguments, DatasetArguments, InferencerArguments
|
301 |
+
|
302 |
+
self.max_generation_token = 1000
|
303 |
+
self.end_string = "\n\n"
|
304 |
+
# We don't need input data
|
305 |
+
data_args = DatasetArguments(dataset_path=None)
|
306 |
+
self.dataset = Dataset(data_args)
|
307 |
+
self.system_prompt = ""
|
308 |
+
|
309 |
+
global LLAMA_MODEL, LLAMA_INFERENCER
|
310 |
+
if LLAMA_MODEL is None or LLAMA_INFERENCER is None:
|
311 |
+
model_path = None
|
312 |
+
if os.path.exists("models"):
|
313 |
+
model_dirs = os.listdir("models")
|
314 |
+
if model_name in model_dirs:
|
315 |
+
model_path = f"models/{model_name}"
|
316 |
+
if model_path is not None:
|
317 |
+
model_source = model_path
|
318 |
+
else:
|
319 |
+
model_source = f"decapoda-research/{model_name}"
|
320 |
+
# raise Exception(f"models目录下没有这个模型: {model_name}")
|
321 |
+
if lora_path is not None:
|
322 |
+
lora_path = f"lora/{lora_path}"
|
323 |
+
model_args = ModelArguments(model_name_or_path=model_source, lora_model_path=lora_path, model_type=None, config_overrides=None, config_name=None, tokenizer_name=None, cache_dir=None,
|
324 |
+
use_fast_tokenizer=True, model_revision='main', use_auth_token=False, torch_dtype=None, use_lora=False, lora_r=8, lora_alpha=32, lora_dropout=0.1, use_ram_optimized_load=True)
|
325 |
+
pipeline_args = InferencerArguments(
|
326 |
+
local_rank=0, random_seed=1, deepspeed='configs/ds_config_chatbot.json', mixed_precision='bf16')
|
327 |
+
|
328 |
+
with open(pipeline_args.deepspeed, "r") as f:
|
329 |
+
ds_config = json.load(f)
|
330 |
+
LLAMA_MODEL = AutoModel.get_model(
|
331 |
+
model_args,
|
332 |
+
tune_strategy="none",
|
333 |
+
ds_config=ds_config,
|
334 |
+
)
|
335 |
+
LLAMA_INFERENCER = AutoPipeline.get_pipeline(
|
336 |
+
pipeline_name="inferencer",
|
337 |
+
model_args=model_args,
|
338 |
+
data_args=data_args,
|
339 |
+
pipeline_args=pipeline_args,
|
340 |
+
)
|
341 |
+
|
342 |
+
def _get_llama_style_input(self):
|
343 |
+
history = []
|
344 |
+
instruction = ""
|
345 |
+
if self.system_prompt:
|
346 |
+
instruction = (f"Instruction: {self.system_prompt}\n")
|
347 |
+
for x in self.history:
|
348 |
+
if x["role"] == "user":
|
349 |
+
history.append(f"{instruction}Input: {x['content']}")
|
350 |
+
else:
|
351 |
+
history.append(f"Output: {x['content']}")
|
352 |
+
context = "\n\n".join(history)
|
353 |
+
context += "\n\nOutput: "
|
354 |
+
return context
|
355 |
+
|
356 |
+
def get_answer_at_once(self):
|
357 |
+
context = self._get_llama_style_input()
|
358 |
+
|
359 |
+
input_dataset = self.dataset.from_dict(
|
360 |
+
{"type": "text_only", "instances": [{"text": context}]}
|
361 |
+
)
|
362 |
+
|
363 |
+
output_dataset = LLAMA_INFERENCER.inference(
|
364 |
+
model=LLAMA_MODEL,
|
365 |
+
dataset=input_dataset,
|
366 |
+
max_new_tokens=self.max_generation_token,
|
367 |
+
temperature=self.temperature,
|
368 |
+
)
|
369 |
+
|
370 |
+
response = output_dataset.to_dict()["instances"][0]["text"]
|
371 |
+
return response, len(response)
|
372 |
+
|
373 |
+
def get_answer_stream_iter(self):
|
374 |
+
context = self._get_llama_style_input()
|
375 |
+
partial_text = ""
|
376 |
+
step = 1
|
377 |
+
for _ in range(0, self.max_generation_token, step):
|
378 |
+
input_dataset = self.dataset.from_dict(
|
379 |
+
{"type": "text_only", "instances": [
|
380 |
+
{"text": context + partial_text}]}
|
381 |
+
)
|
382 |
+
output_dataset = LLAMA_INFERENCER.inference(
|
383 |
+
model=LLAMA_MODEL,
|
384 |
+
dataset=input_dataset,
|
385 |
+
max_new_tokens=step,
|
386 |
+
temperature=self.temperature,
|
387 |
+
)
|
388 |
+
response = output_dataset.to_dict()["instances"][0]["text"]
|
389 |
+
if response == "" or response == self.end_string:
|
390 |
+
break
|
391 |
+
partial_text += response
|
392 |
+
yield partial_text
|
393 |
+
|
394 |
+
|
395 |
+
class XMChat(BaseLLMModel):
|
396 |
+
def __init__(self, api_key):
|
397 |
+
super().__init__(model_name="xmchat")
|
398 |
+
self.api_key = api_key
|
399 |
+
self.session_id = None
|
400 |
+
self.reset()
|
401 |
+
self.image_bytes = None
|
402 |
+
self.image_path = None
|
403 |
+
self.xm_history = []
|
404 |
+
self.url = "https://xmbot.net/web"
|
405 |
+
self.last_conv_id = None
|
406 |
+
|
407 |
+
def reset(self):
|
408 |
+
self.session_id = str(uuid.uuid4())
|
409 |
+
self.last_conv_id = None
|
410 |
+
return [], "已重置"
|
411 |
+
|
412 |
+
def image_to_base64(self, image_path):
|
413 |
+
# 打开并加载图片
|
414 |
+
img = Image.open(image_path)
|
415 |
+
|
416 |
+
# 获取图片的宽度和高度
|
417 |
+
width, height = img.size
|
418 |
+
|
419 |
+
# 计算压缩比例,以确保最长边小于4096像素
|
420 |
+
max_dimension = 2048
|
421 |
+
scale_ratio = min(max_dimension / width, max_dimension / height)
|
422 |
+
|
423 |
+
if scale_ratio < 1:
|
424 |
+
# 按压缩比例调整图片大小
|
425 |
+
new_width = int(width * scale_ratio)
|
426 |
+
new_height = int(height * scale_ratio)
|
427 |
+
img = img.resize((new_width, new_height), Image.ANTIALIAS)
|
428 |
+
|
429 |
+
# 将图片转换为jpg格式的二进制数据
|
430 |
+
buffer = BytesIO()
|
431 |
+
if img.mode == "RGBA":
|
432 |
+
img = img.convert("RGB")
|
433 |
+
img.save(buffer, format='JPEG')
|
434 |
+
binary_image = buffer.getvalue()
|
435 |
+
|
436 |
+
# 对二进制数据进行Base64编码
|
437 |
+
base64_image = base64.b64encode(binary_image).decode('utf-8')
|
438 |
+
|
439 |
+
return base64_image
|
440 |
+
|
441 |
+
def try_read_image(self, filepath):
|
442 |
+
def is_image_file(filepath):
|
443 |
+
# 判断文件是否为图片
|
444 |
+
valid_image_extensions = [".jpg", ".jpeg", ".png", ".bmp", ".gif", ".tiff"]
|
445 |
+
file_extension = os.path.splitext(filepath)[1].lower()
|
446 |
+
return file_extension in valid_image_extensions
|
447 |
+
|
448 |
+
if is_image_file(filepath):
|
449 |
+
logging.info(f"读取图片文件: {filepath}")
|
450 |
+
self.image_bytes = self.image_to_base64(filepath)
|
451 |
+
self.image_path = filepath
|
452 |
+
else:
|
453 |
+
self.image_bytes = None
|
454 |
+
self.image_path = None
|
455 |
+
|
456 |
+
def like(self):
|
457 |
+
if self.last_conv_id is None:
|
458 |
+
return "点赞失败,你还没发送过消息"
|
459 |
+
data = {
|
460 |
+
"uuid": self.last_conv_id,
|
461 |
+
"appraise": "good"
|
462 |
+
}
|
463 |
+
response = requests.post(self.url, json=data)
|
464 |
+
return "👍点赞成功,,感谢反馈~"
|
465 |
+
|
466 |
+
def dislike(self):
|
467 |
+
if self.last_conv_id is None:
|
468 |
+
return "点踩失败,你还没发送过消息"
|
469 |
+
data = {
|
470 |
+
"uuid": self.last_conv_id,
|
471 |
+
"appraise": "bad"
|
472 |
+
}
|
473 |
+
response = requests.post(self.url, json=data)
|
474 |
+
return "👎点踩成功,感谢反馈~"
|
475 |
+
|
476 |
+
def prepare_inputs(self, real_inputs, use_websearch, files, reply_language, chatbot):
|
477 |
+
fake_inputs = real_inputs
|
478 |
+
display_append = ""
|
479 |
+
limited_context = False
|
480 |
+
return limited_context, fake_inputs, display_append, real_inputs, chatbot
|
481 |
+
|
482 |
+
def handle_file_upload(self, files, chatbot):
|
483 |
+
"""if the model accepts multi modal input, implement this function"""
|
484 |
+
if files:
|
485 |
+
for file in files:
|
486 |
+
if file.name:
|
487 |
+
logging.info(f"尝试读取图像: {file.name}")
|
488 |
+
self.try_read_image(file.name)
|
489 |
+
if self.image_path is not None:
|
490 |
+
chatbot = chatbot + [((self.image_path,), None)]
|
491 |
+
if self.image_bytes is not None:
|
492 |
+
logging.info("使用图片作为输入")
|
493 |
+
# XMChat的一轮对话中实际上只能处理一张图片
|
494 |
+
self.reset()
|
495 |
+
conv_id = str(uuid.uuid4())
|
496 |
+
data = {
|
497 |
+
"user_id": self.api_key,
|
498 |
+
"session_id": self.session_id,
|
499 |
+
"uuid": conv_id,
|
500 |
+
"data_type": "imgbase64",
|
501 |
+
"data": self.image_bytes
|
502 |
+
}
|
503 |
+
response = requests.post(self.url, json=data)
|
504 |
+
response = json.loads(response.text)
|
505 |
+
logging.info(f"图片回复: {response['data']}")
|
506 |
+
return None, chatbot, None
|
507 |
+
|
508 |
+
def get_answer_at_once(self):
|
509 |
+
question = self.history[-1]["content"]
|
510 |
+
conv_id = str(uuid.uuid4())
|
511 |
+
self.last_conv_id = conv_id
|
512 |
+
data = {
|
513 |
+
"user_id": self.api_key,
|
514 |
+
"session_id": self.session_id,
|
515 |
+
"uuid": conv_id,
|
516 |
+
"data_type": "text",
|
517 |
+
"data": question
|
518 |
+
}
|
519 |
+
response = requests.post(self.url, json=data)
|
520 |
+
try:
|
521 |
+
response = json.loads(response.text)
|
522 |
+
return response["data"], len(response["data"])
|
523 |
+
except Exception as e:
|
524 |
+
return response.text, len(response.text)
|
525 |
+
|
526 |
+
|
527 |
+
|
528 |
+
|
529 |
+
def get_model(
|
530 |
+
model_name,
|
531 |
+
lora_model_path=None,
|
532 |
+
access_key=None,
|
533 |
+
temperature=None,
|
534 |
+
top_p=None,
|
535 |
+
system_prompt=None,
|
536 |
+
) -> BaseLLMModel:
|
537 |
+
msg = i18n("模型设置为了:") + f" {model_name}"
|
538 |
+
model_type = ModelType.get_type(model_name)
|
539 |
+
lora_selector_visibility = False
|
540 |
+
lora_choices = []
|
541 |
+
dont_change_lora_selector = False
|
542 |
+
if model_type != ModelType.OpenAI:
|
543 |
+
config.local_embedding = True
|
544 |
+
# del current_model.model
|
545 |
+
model = None
|
546 |
+
try:
|
547 |
+
if model_type == ModelType.OpenAI:
|
548 |
+
logging.info(f"正在加载OpenAI模型: {model_name}")
|
549 |
+
model = OpenAIClient(
|
550 |
+
model_name=model_name,
|
551 |
+
api_key=access_key,
|
552 |
+
system_prompt=system_prompt,
|
553 |
+
temperature=temperature,
|
554 |
+
top_p=top_p,
|
555 |
+
)
|
556 |
+
elif model_type == ModelType.ChatGLM:
|
557 |
+
logging.info(f"正在加载ChatGLM模型: {model_name}")
|
558 |
+
model = ChatGLM_Client(model_name)
|
559 |
+
elif model_type == ModelType.LLaMA and lora_model_path == "":
|
560 |
+
msg = f"现在请为 {model_name} 选择LoRA模型"
|
561 |
+
logging.info(msg)
|
562 |
+
lora_selector_visibility = True
|
563 |
+
if os.path.isdir("lora"):
|
564 |
+
lora_choices = get_file_names(
|
565 |
+
"lora", plain=True, filetypes=[""])
|
566 |
+
lora_choices = ["No LoRA"] + lora_choices
|
567 |
+
elif model_type == ModelType.LLaMA and lora_model_path != "":
|
568 |
+
logging.info(f"正在加载LLaMA模型: {model_name} + {lora_model_path}")
|
569 |
+
dont_change_lora_selector = True
|
570 |
+
if lora_model_path == "No LoRA":
|
571 |
+
lora_model_path = None
|
572 |
+
msg += " + No LoRA"
|
573 |
+
else:
|
574 |
+
msg += f" + {lora_model_path}"
|
575 |
+
model = LLaMA_Client(model_name, lora_model_path)
|
576 |
+
elif model_type == ModelType.XMChat:
|
577 |
+
if os.environ.get("XMCHAT_API_KEY") != "":
|
578 |
+
access_key = os.environ.get("XMCHAT_API_KEY")
|
579 |
+
model = XMChat(api_key=access_key)
|
580 |
+
elif model_type == ModelType.Unknown:
|
581 |
+
raise ValueError(f"未知模型: {model_name}")
|
582 |
+
logging.info(msg)
|
583 |
+
except Exception as e:
|
584 |
+
logging.error(e)
|
585 |
+
msg = f"{STANDARD_ERROR_MSG}: {e}"
|
586 |
+
if dont_change_lora_selector:
|
587 |
+
return model, msg
|
588 |
+
else:
|
589 |
+
return model, msg, gr.Dropdown.update(choices=lora_choices, visible=lora_selector_visibility)
|
590 |
+
|
591 |
+
|
592 |
+
if __name__ == "__main__":
|
593 |
+
with open("config.json", "r") as f:
|
594 |
+
openai_api_key = cjson.load(f)["openai_api_key"]
|
595 |
+
# set logging level to debug
|
596 |
+
logging.basicConfig(level=logging.DEBUG)
|
597 |
+
# client = ModelManager(model_name="gpt-3.5-turbo", access_key=openai_api_key)
|
598 |
+
client = get_model(model_name="chatglm-6b-int4")
|
599 |
+
chatbot = []
|
600 |
+
stream = False
|
601 |
+
# 测试账单功能
|
602 |
+
logging.info(colorama.Back.GREEN + "测试账单功能" + colorama.Back.RESET)
|
603 |
+
logging.info(client.billing_info())
|
604 |
+
# 测试问答
|
605 |
+
logging.info(colorama.Back.GREEN + "测试问答" + colorama.Back.RESET)
|
606 |
+
question = "巴黎是中国的首都吗?"
|
607 |
+
for i in client.predict(inputs=question, chatbot=chatbot, stream=stream):
|
608 |
+
logging.info(i)
|
609 |
+
logging.info(f"测试问答后history : {client.history}")
|
610 |
+
# 测试记忆力
|
611 |
+
logging.info(colorama.Back.GREEN + "测试记忆力" + colorama.Back.RESET)
|
612 |
+
question = "我刚刚问了你什么问题?"
|
613 |
+
for i in client.predict(inputs=question, chatbot=chatbot, stream=stream):
|
614 |
+
logging.info(i)
|
615 |
+
logging.info(f"测试记忆力后history : {client.history}")
|
616 |
+
# 测试重试功能
|
617 |
+
logging.info(colorama.Back.GREEN + "测试重试功能" + colorama.Back.RESET)
|
618 |
+
for i in client.retry(chatbot=chatbot, stream=stream):
|
619 |
+
logging.info(i)
|
620 |
+
logging.info(f"重试后history : {client.history}")
|
621 |
+
# # 测试总结功能
|
622 |
+
# print(colorama.Back.GREEN + "测试总结功能" + colorama.Back.RESET)
|
623 |
+
# chatbot, msg = client.reduce_token_size(chatbot=chatbot)
|
624 |
+
# print(chatbot, msg)
|
625 |
+
# print(f"总结后history: {client.history}")
|