Spaces:
Runtime error
Runtime error
zetavg
commited on
Commit
•
5929f2a
1
Parent(s):
0c97f91
simulate actual behavior in ui dev mode
Browse files- llama_lora/ui/finetune_ui.py +24 -0
- llama_lora/ui/inference_ui.py +23 -2
llama_lora/ui/finetune_ui.py
CHANGED
@@ -312,7 +312,31 @@ def do_train(
|
|
312 |
'completion': d['output']}
|
313 |
for d in data]
|
314 |
|
|
|
|
|
|
|
|
|
|
|
|
|
315 |
if Global.ui_dev_mode:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
316 |
message = f"""Currently in UI dev mode, not doing the actual training.
|
317 |
|
318 |
Train options: {json.dumps({
|
|
|
312 |
'completion': d['output']}
|
313 |
for d in data]
|
314 |
|
315 |
+
def get_progress_text(epoch, epochs, last_loss):
|
316 |
+
progress_detail = f"Epoch {math.ceil(epoch)}/{epochs}"
|
317 |
+
if last_loss is not None:
|
318 |
+
progress_detail += f", Loss: {last_loss:.4f}"
|
319 |
+
return f"Training... ({progress_detail})"
|
320 |
+
|
321 |
if Global.ui_dev_mode:
|
322 |
+
Global.should_stop_training = False
|
323 |
+
|
324 |
+
for i in range(300):
|
325 |
+
if (Global.should_stop_training):
|
326 |
+
return
|
327 |
+
epochs = 3
|
328 |
+
epoch = i / 100
|
329 |
+
last_loss = None
|
330 |
+
if (i > 20):
|
331 |
+
last_loss = 3 + (i - 0) * (0.5 - 3) / (300 - 0)
|
332 |
+
|
333 |
+
progress(
|
334 |
+
(i, 300),
|
335 |
+
desc="(Simulate) " + get_progress_text(epoch, epochs, last_loss)
|
336 |
+
)
|
337 |
+
|
338 |
+
time.sleep(0.1)
|
339 |
+
|
340 |
message = f"""Currently in UI dev mode, not doing the actual training.
|
341 |
|
342 |
Train options: {json.dumps({
|
llama_lora/ui/inference_ui.py
CHANGED
@@ -1,5 +1,6 @@
|
|
1 |
import gradio as gr
|
2 |
import time
|
|
|
3 |
|
4 |
import torch
|
5 |
import transformers
|
@@ -47,10 +48,30 @@ def do_inference(
|
|
47 |
lora_model_name = path_of_available_lora_model
|
48 |
|
49 |
if Global.ui_dev_mode:
|
50 |
-
message = f"
|
51 |
print(message)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
52 |
time.sleep(1)
|
53 |
-
yield message,
|
54 |
return
|
55 |
|
56 |
model = get_base_model()
|
|
|
1 |
import gradio as gr
|
2 |
import time
|
3 |
+
import json
|
4 |
|
5 |
import torch
|
6 |
import transformers
|
|
|
48 |
lora_model_name = path_of_available_lora_model
|
49 |
|
50 |
if Global.ui_dev_mode:
|
51 |
+
message = f"Hi, I’m currently in UI-development mode and do not have access to resources to process your request. However, this behavior is similar to what will actually happen, so you can try and see how it will work!\n\nBase model: {Global.base_model}\nLoRA model: {lora_model_name}\n\nThe following text is your prompt:\n\n{prompt}"
|
52 |
print(message)
|
53 |
+
|
54 |
+
if stream_output:
|
55 |
+
def word_generator(sentence):
|
56 |
+
lines = message.split('\n')
|
57 |
+
out = ""
|
58 |
+
for line in lines:
|
59 |
+
words = line.split(' ')
|
60 |
+
for i in range(len(words)):
|
61 |
+
if out:
|
62 |
+
out += ' '
|
63 |
+
out += words[i]
|
64 |
+
yield out
|
65 |
+
out += "\n"
|
66 |
+
yield out
|
67 |
+
|
68 |
+
for partial_sentence in word_generator(message):
|
69 |
+
yield partial_sentence, json.dumps(list(range(len(partial_sentence.split()))), indent=2)
|
70 |
+
time.sleep(0.05)
|
71 |
+
|
72 |
+
return
|
73 |
time.sleep(1)
|
74 |
+
yield message, json.dumps(list(range(len(message.split()))), indent=2)
|
75 |
return
|
76 |
|
77 |
model = get_base_model()
|