Spaces:
Running
on
T4
Running
on
T4
Implement basic chat mode
#1
by
cryscan
- opened
app.py
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
import gradio as gr
|
2 |
-
import os, gc, torch
|
3 |
from datetime import datetime
|
4 |
from huggingface_hub import hf_hub_download
|
5 |
from pynvml import *
|
@@ -7,19 +7,16 @@ nvmlInit()
|
|
7 |
gpu_h = nvmlDeviceGetHandleByIndex(0)
|
8 |
ctx_limit = 1024
|
9 |
title = "RWKV-4-Pile-14B-20230313-ctx8192-test1050"
|
10 |
-
desc = f'''Links:
|
11 |
-
<a href='https://github.com/BlinkDL/ChatRWKV' target="_blank" style="margin:0 0.5em">ChatRWKV</a>
|
12 |
-
<a href='https://github.com/BlinkDL/RWKV-LM' target="_blank" style="margin:0 0.5em">RWKV-LM</a>
|
13 |
-
<a href="https://pypi.org/project/rwkv/" target="_blank" style="margin:0 0.5em">RWKV pip package</a>
|
14 |
-
<a href="https://huggingface.co/spaces/BlinkDL/Raven-RWKV-7B" target="_blank" style="margin:0 0.5em">Raven 7B (alpaca-style)</a>
|
15 |
'''
|
16 |
|
17 |
os.environ["RWKV_JIT_ON"] = '1'
|
18 |
-
os.environ["RWKV_CUDA_ON"] = '
|
19 |
|
20 |
from rwkv.model import RWKV
|
21 |
model_path = hf_hub_download(repo_id="BlinkDL/rwkv-4-pile-14b", filename=f"{title}.pth")
|
22 |
model = RWKV(model=model_path, strategy='cuda fp16i8 *24 -> cuda fp16')
|
|
|
23 |
from rwkv.utils import PIPELINE, PIPELINE_ARGS
|
24 |
pipeline = PIPELINE(model, "20B_tokenizer.json")
|
25 |
|
@@ -106,28 +103,206 @@ Arrange the given numbers in ascending order.
|
|
106 |
["Simply put, the theory of relativity states that", 150, 1.0, 0.5, 0.2, 0.2],
|
107 |
]
|
108 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
109 |
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
131 |
|
132 |
demo.queue(max_size=10)
|
133 |
demo.launch(share=False)
|
|
|
1 |
import gradio as gr
|
2 |
+
import os, copy, gc, torch
|
3 |
from datetime import datetime
|
4 |
from huggingface_hub import hf_hub_download
|
5 |
from pynvml import *
|
|
|
7 |
gpu_h = nvmlDeviceGetHandleByIndex(0)
|
8 |
ctx_limit = 1024
|
9 |
title = "RWKV-4-Pile-14B-20230313-ctx8192-test1050"
|
10 |
+
desc = f'''Links:<a href='https://github.com/BlinkDL/ChatRWKV' target="_blank" style="margin:0 0.5em">ChatRWKV</a><a href='https://github.com/BlinkDL/RWKV-LM' target="_blank" style="margin:0 0.5em">RWKV-LM</a><a href="https://pypi.org/project/rwkv/" target="_blank" style="margin:0 0.5em">RWKV pip package</a><a href="https://huggingface.co/spaces/BlinkDL/Raven-RWKV-7B" target="_blank" style="margin:0 0.5em">Raven 7B (alpaca-style)</a>
|
|
|
|
|
|
|
|
|
11 |
'''
|
12 |
|
13 |
os.environ["RWKV_JIT_ON"] = '1'
|
14 |
+
os.environ["RWKV_CUDA_ON"] = '0' # if '1' then use CUDA kernel for seq mode (much faster)
|
15 |
|
16 |
from rwkv.model import RWKV
|
17 |
model_path = hf_hub_download(repo_id="BlinkDL/rwkv-4-pile-14b", filename=f"{title}.pth")
|
18 |
model = RWKV(model=model_path, strategy='cuda fp16i8 *24 -> cuda fp16')
|
19 |
+
|
20 |
from rwkv.utils import PIPELINE, PIPELINE_ARGS
|
21 |
pipeline = PIPELINE(model, "20B_tokenizer.json")
|
22 |
|
|
|
103 |
["Simply put, the theory of relativity states that", 150, 1.0, 0.5, 0.2, 0.2],
|
104 |
]
|
105 |
|
106 |
+
chat_intro = '''The following is a coherent verbose detailed conversation between an AI girl named <|bot|> and <|user|>. One day, they meet at a café.
|
107 |
+
Note the following important facts about <|bot|>:
|
108 |
+
1. <|bot|> is very intelligent, creative and friendly.
|
109 |
+
2. <|bot|> likes to tell <|user|> a lot about herself and her opinions.
|
110 |
+
3. <|bot|> usually gives <|user|> kind, helpful and informative advices.
|
111 |
+
|
112 |
+
<|user|>: Hello, how are you doing?
|
113 |
+
|
114 |
+
<|bot|>: Hi! Thanks, I'm fine. What about you?
|
115 |
+
|
116 |
+
<|user|>: I am fine. It's nice to see you. Look, here is a store selling tea and juice. We can go and take a look. Would you like to chat with me for a while?
|
117 |
+
|
118 |
+
<|bot|>: Sure. Let's go inside. What would you like to talk about? I'm listening.
|
119 |
+
'''
|
120 |
+
|
121 |
+
def user(message, chatbot):
|
122 |
+
chatbot = chatbot or []
|
123 |
+
print(f"User: {message}")
|
124 |
+
return "", chatbot + [[message, None]]
|
125 |
+
|
126 |
+
def alternative(chatbot, history):
|
127 |
+
if not chatbot or not history:
|
128 |
+
return chatbot, history
|
129 |
+
|
130 |
+
chatbot[-1][1] = None
|
131 |
+
history[0] = copy.deepcopy(history[1])
|
132 |
+
|
133 |
+
return chatbot, history
|
134 |
+
|
135 |
+
def chat(
|
136 |
+
prompt,
|
137 |
+
user,
|
138 |
+
bot,
|
139 |
+
chatbot,
|
140 |
+
history,
|
141 |
+
temperature=1.0,
|
142 |
+
top_p=0.8,
|
143 |
+
presence_penalty=0.1,
|
144 |
+
count_penalty=0.1,
|
145 |
+
):
|
146 |
+
args = PIPELINE_ARGS(temperature=max(0.2, float(temperature)), top_p=float(top_p),
|
147 |
+
alpha_frequency=float(count_penalty),
|
148 |
+
alpha_presence=float(presence_penalty),
|
149 |
+
token_ban=[], # ban the generation of some tokens
|
150 |
+
token_stop=[]) # stop generation whenever you see any token here
|
151 |
+
|
152 |
+
if not chatbot:
|
153 |
+
return chatbot, history
|
154 |
+
|
155 |
+
message = chatbot[-1][0]
|
156 |
+
message = message.strip().replace('\r\n','\n').replace('\n\n','\n')
|
157 |
+
ctx = f"{user}: {message}\n\n{bot}:"
|
158 |
+
|
159 |
+
# gpu_info = nvmlDeviceGetMemoryInfo(gpu_h)
|
160 |
+
# print(f'vram {gpu_info.total} used {gpu_info.used} free {gpu_info.free}')
|
161 |
+
|
162 |
+
if not history:
|
163 |
+
prompt = prompt.replace("<|user|>", user.strip())
|
164 |
+
prompt = prompt.replace("<|bot|>", bot.strip())
|
165 |
+
prompt = prompt.strip()
|
166 |
+
prompt = f"\n{prompt}\n\n"
|
167 |
+
|
168 |
+
out, state = model.forward(pipeline.encode(prompt), None)
|
169 |
+
history = [state, None, []] # [state, state_pre, tokens]
|
170 |
+
print("History reloaded.")
|
171 |
+
|
172 |
+
[state, _, all_tokens] = history
|
173 |
+
state_pre_0 = copy.deepcopy(state)
|
174 |
|
175 |
+
out, state = model.forward(pipeline.encode(ctx)[-ctx_limit:], state)
|
176 |
+
state_pre_1 = copy.deepcopy(state) # For recovery
|
177 |
+
|
178 |
+
print("Bot: ", end='')
|
179 |
+
|
180 |
+
begin = len(all_tokens)
|
181 |
+
out_last = begin
|
182 |
+
out_str: str = ''
|
183 |
+
occurrence = {}
|
184 |
+
for i in range(300):
|
185 |
+
if i <= 0:
|
186 |
+
nl_bias = -float('inf')
|
187 |
+
elif i <= 30:
|
188 |
+
nl_bias = (i - 30) * 0.1
|
189 |
+
elif i <= 130:
|
190 |
+
nl_bias = 0
|
191 |
+
else:
|
192 |
+
nl_bias = (i - 130) * 0.25
|
193 |
+
out[187] += nl_bias
|
194 |
+
for n in occurrence:
|
195 |
+
out[n] -= (args.alpha_presence + occurrence[n] * args.alpha_frequency)
|
196 |
+
|
197 |
+
token = pipeline.sample_logits(out, temperature=args.temperature, top_p=args.top_p)
|
198 |
+
next_tokens = [token]
|
199 |
+
if token == 0:
|
200 |
+
next_tokens = pipeline.encode('\n\n')
|
201 |
+
all_tokens += next_tokens
|
202 |
+
|
203 |
+
if token not in occurrence:
|
204 |
+
occurrence[token] = 1
|
205 |
+
else:
|
206 |
+
occurrence[token] += 1
|
207 |
+
|
208 |
+
out, state = model.forward(next_tokens, state)
|
209 |
+
|
210 |
+
tmp = pipeline.decode(all_tokens[out_last:])
|
211 |
+
if '\ufffd' not in tmp:
|
212 |
+
print(tmp, end='', flush=True)
|
213 |
+
out_last = begin + i + 1
|
214 |
+
out_str += tmp
|
215 |
+
|
216 |
+
chatbot[-1][1] = out_str.strip()
|
217 |
+
history = [state, all_tokens]
|
218 |
+
yield chatbot, history
|
219 |
+
|
220 |
+
out_str = pipeline.decode(all_tokens[begin:])
|
221 |
+
out_str = out_str.replace("\r\n", '\n').replace('\\n', '\n')
|
222 |
+
|
223 |
+
if '\n\n' in out_str:
|
224 |
+
break
|
225 |
+
|
226 |
+
# State recovery
|
227 |
+
if f'{user}:' in out_str or f'{bot}:' in out_str:
|
228 |
+
idx_user = out_str.find(f'{user}:')
|
229 |
+
idx_user = len(out_str) if idx_user == -1 else idx_user
|
230 |
+
idx_bot = out_str.find(f'{bot}:')
|
231 |
+
idx_bot = len(out_str) if idx_bot == -1 else idx_bot
|
232 |
+
idx = min(idx_user, idx_bot)
|
233 |
+
|
234 |
+
if idx < len(out_str):
|
235 |
+
out_str = f" {out_str[:idx].strip()}\n\n"
|
236 |
+
tokens = pipeline.encode(out_str)
|
237 |
+
|
238 |
+
all_tokens = all_tokens[:begin] + tokens
|
239 |
+
out, state = model.forward(tokens, state_pre_1)
|
240 |
+
break
|
241 |
+
|
242 |
+
gc.collect()
|
243 |
+
torch.cuda.empty_cache()
|
244 |
+
|
245 |
+
chatbot[-1][1] = out_str.strip()
|
246 |
+
history = [state, state_pre_0, all_tokens]
|
247 |
+
yield chatbot, history
|
248 |
+
|
249 |
+
with gr.Blocks(title=title) as demo:
|
250 |
+
with gr.Tab("Generative"):
|
251 |
+
gr.Markdown(f'''{desc} *** <b>Please try examples first (bottom of page)</b> *** (edit them to your own question).\nDemo limited to ctxlen {ctx_limit}.''', label="Description")
|
252 |
+
with gr.Row():
|
253 |
+
with gr.Column():
|
254 |
+
prompt = gr.Textbox(lines=10, label="Prompt", value="Here's a short cyberpunk sci-fi adventure story. The story's main character is an artificial human created by a company called OpenBot.\n\nThe Story:\n")
|
255 |
+
with gr.Row():
|
256 |
+
submit = gr.Button("Submit", variant="primary")
|
257 |
+
clear = gr.Button("Clear", variant="secondary")
|
258 |
+
token_count = gr.Slider(10, 200, label="Max Tokens", step=10, value=150)
|
259 |
+
temperature = gr.Slider(0.2, 2.0, label="Temperature", step=0.1, value=1.0)
|
260 |
+
top_p = gr.Slider(0.0, 1.0, label="Top P", step=0.05, value=0.7)
|
261 |
+
presence_penalty = gr.Slider(0.0, 1.0, label="Presence Penalty", step=0.1, value=0.2)
|
262 |
+
count_penalty = gr.Slider(0.0, 1.0, label="Count Penalty", step=0.1, value=0.2)
|
263 |
+
with gr.Column():
|
264 |
+
output = gr.Textbox(label="Generated Output", lines=32)
|
265 |
+
data = gr.Dataset(components=[prompt, token_count, temperature, top_p, presence_penalty, count_penalty], samples=examples, label="Example Prompts", headers=["Prompt", "Max Tokens", "Temperature", "Top P", "Presence Penalty", "Count Penalty"])
|
266 |
+
submit.click(infer, [prompt, token_count, temperature, top_p, presence_penalty, count_penalty], [output])
|
267 |
+
clear.click(lambda: None, [], [output])
|
268 |
+
data.click(lambda x: x, [data], [prompt, token_count, temperature, top_p, presence_penalty, count_penalty])
|
269 |
+
|
270 |
+
with gr.Tab("Chat"):
|
271 |
+
gr.Markdown(f'''{desc} *** <b>Default Chat Scenario: You (Bob) and Bot (Alice) meet at a café.</b> ***\nIf you want to change the scenario, make sure to use an empty new line to separate different people's words. Also, make sure there is no empty new lines within one person's lines. Changes only take effect after clearing.''', label="Description")
|
272 |
+
with gr.Row():
|
273 |
+
with gr.Column():
|
274 |
+
chatbot = gr.Chatbot()
|
275 |
+
state = gr.State()
|
276 |
+
message = gr.Textbox(label="Message")
|
277 |
+
with gr.Row():
|
278 |
+
send = gr.Button("Send", variant="primary")
|
279 |
+
alt = gr.Button("Alternative", variant="secondary")
|
280 |
+
clear = gr.Button("Clear", variant="secondary")
|
281 |
+
with gr.Column():
|
282 |
+
with gr.Row():
|
283 |
+
user_name = gr.Textbox(lines=1, max_lines=1, label="User Name", value="Bob")
|
284 |
+
bot_name = gr.Textbox(lines=1, max_lines=1, label="Bot Name", value="Alice")
|
285 |
+
prompt = gr.Textbox(lines=10, max_lines=50, label="Scenario", value=chat_intro)
|
286 |
+
temperature = gr.Slider(0.2, 2.0, label="Temperature", step=0.1, value=1.0)
|
287 |
+
top_p = gr.Slider(0.0, 1.0, label="Top P", step=0.05, value=0.7)
|
288 |
+
presence_penalty = gr.Slider(0.0, 1.0, label="Presence Penalty", step=0.1, value=0.2)
|
289 |
+
count_penalty = gr.Slider(0.0, 1.0, label="Count Penalty", step=0.1, value=0.2)
|
290 |
+
chat_inputs = [
|
291 |
+
prompt,
|
292 |
+
user_name,
|
293 |
+
bot_name,
|
294 |
+
chatbot,
|
295 |
+
state,
|
296 |
+
temperature,
|
297 |
+
top_p,
|
298 |
+
presence_penalty,
|
299 |
+
count_penalty
|
300 |
+
]
|
301 |
+
chat_outputs = [chatbot, state]
|
302 |
+
message.submit(user, [message, chatbot], [message, chatbot], queue=False).then(chat, chat_inputs, chat_outputs)
|
303 |
+
send.click(user, [message, chatbot], [message, chatbot], queue=False).then(chat, chat_inputs, chat_outputs)
|
304 |
+
alt.click(alternative, [chatbot, state], [chatbot, state], queue=False).then(chat, chat_inputs, chat_outputs)
|
305 |
+
clear.click(lambda: ([], None, ""), [], [chatbot, state, message], queue=False)
|
306 |
|
307 |
demo.queue(max_size=10)
|
308 |
demo.launch(share=False)
|