Sebastien De Greef
commited on
Commit
•
815cc64
1
Parent(s):
5159911
add more system info and detect HF spaces deployment
Browse files
app.py
CHANGED
@@ -10,23 +10,34 @@ import logging
|
|
10 |
from io import StringIO
|
11 |
import time
|
12 |
import asyncio
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
# Configure logging to use the string stream
|
17 |
-
logging.basicConfig(stream=log_stream, level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
|
18 |
-
logger = logging.getLogger(__name__)
|
19 |
-
log_contents = log_stream.getvalue()
|
20 |
-
print(log_contents)
|
21 |
-
logger.debug('This is a debug message')
|
22 |
|
23 |
hf_user = None
|
24 |
-
hfApi = HfApi()
|
25 |
try:
|
|
|
26 |
hf_user = hfApi.whoami()["name"]
|
27 |
except Exception as e:
|
28 |
hf_user = "not logged in"
|
29 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
|
31 |
# Dropdown options
|
32 |
model_options = [
|
@@ -55,6 +66,18 @@ gpu_stats = torch.cuda.get_device_properties(0)
|
|
55 |
start_gpu_memory = round(torch.cuda.max_memory_reserved() / 1024 / 1024 / 1024, 3)
|
56 |
max_memory = round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3)
|
57 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
58 |
model=None
|
59 |
tokenizer = None
|
60 |
dataset = None
|
@@ -100,8 +123,6 @@ def load_model(initial_model_name, load_in_4bit, max_sequence_length):
|
|
100 |
load_in_4bit = load_in_4bit,
|
101 |
# token = "hf_...", # use one if using gated models like meta-llama/Llama-2-7b-hf
|
102 |
)
|
103 |
-
log_contents = log_stream.getvalue()
|
104 |
-
print(log_contents)
|
105 |
return f"Model {initial_model_name} loaded, using {max_sequence_length} as max sequence length.", gr.update(visible=True, interactive=True), gr.update(interactive=True),gr.update(interactive=False), gr.update(interactive=False), gr.update(interactive=False)
|
106 |
|
107 |
def load_data(dataset_name, data_template_style, data_template):
|
@@ -162,17 +183,22 @@ def save_model(model_name, hub_model_name, hub_token, gguf_16bit, gguf_8bit, ggu
|
|
162 |
return "Model saved", gr.update(visible=True, interactive=True)
|
163 |
|
164 |
def username(profile: gr.OAuthProfile | None):
|
165 |
-
|
|
|
166 |
|
167 |
# Create the Gradio interface
|
168 |
with gr.Blocks(title="Unsloth fine-tuning") as demo:
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
|
|
|
|
|
|
|
|
176 |
with gr.Tab("Base Model Parameters"):
|
177 |
|
178 |
with gr.Row():
|
@@ -232,12 +258,12 @@ with gr.Blocks(title="Unsloth fine-tuning") as demo:
|
|
232 |
log_to_tensorboard = gr.Checkbox(label="Log to Tensorboard", value=True, interactive=True)
|
233 |
|
234 |
with gr.Row():
|
235 |
-
|
236 |
learning_rate = gr.Number(label="Learning Rate", value=2e-4, interactive=True)
|
237 |
|
238 |
-
with gr.Row():
|
239 |
weight_decay = gr.Number(label="Weight Decay", value=0.01, interactive=True)
|
240 |
-
lr_scheduler_type = gr.Dropdown(choices=["linear", "cosine", "constant"], label="LR Scheduler Type", value="linear")
|
241 |
gr.Markdown("---")
|
242 |
|
243 |
with gr.Row():
|
@@ -249,9 +275,9 @@ with gr.Blocks(title="Unsloth fine-tuning") as demo:
|
|
249 |
train_btn = gr.Button("Train", visible=True)
|
250 |
|
251 |
def train_model(model_name: str, lora_r: int, lora_alpha: int, lora_dropout: float, per_device_train_batch_size: int, warmup_steps: int, max_steps: int,
|
252 |
-
gradient_accumulation_steps: int, logging_steps: int, log_to_tensorboard: bool,
|
253 |
global model, tokenizer
|
254 |
-
print(f"$$$ Training model {model_name} with {lora_r} R, {lora_alpha} alpha, {lora_dropout} dropout, {per_device_train_batch_size} per device train batch size, {warmup_steps} warmup steps, {max_steps} max steps, {gradient_accumulation_steps} gradient accumulation steps, {logging_steps} logging steps, {log_to_tensorboard} log to tensorboard, {
|
255 |
iseed = seed
|
256 |
model = FastLanguageModel.get_peft_model(
|
257 |
model,
|
@@ -300,7 +326,7 @@ with gr.Blocks(title="Unsloth fine-tuning") as demo:
|
|
300 |
return "Model trained 100%",gr.update(visible=True, interactive=False), gr.update(visible=True, interactive=True), gr.update(interactive=True)
|
301 |
|
302 |
|
303 |
-
train_btn.click(train_model, inputs=[model_name, lora_r, lora_alpha, lora_dropout, per_device_train_batch_size, warmup_steps, max_steps, gradient_accumulation_steps, logging_steps, log_to_tensorboard,
|
304 |
|
305 |
with gr.Tab("Save & Push Options"):
|
306 |
|
|
|
10 |
from io import StringIO
|
11 |
import time
|
12 |
import asyncio
|
13 |
+
import psutil
|
14 |
+
import platform
|
15 |
+
import os
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
|
17 |
hf_user = None
|
|
|
18 |
try:
|
19 |
+
hfApi = HfApi()
|
20 |
hf_user = hfApi.whoami()["name"]
|
21 |
except Exception as e:
|
22 |
hf_user = "not logged in"
|
23 |
|
24 |
+
def get_human_readable_size(size, decimal_places=2):
|
25 |
+
for unit in ['B', 'KB', 'MB', 'GB', 'TB']:
|
26 |
+
if size < 1024.0:
|
27 |
+
break
|
28 |
+
size /= 1024.0
|
29 |
+
return f"{size:.{decimal_places}f} {unit}"
|
30 |
+
|
31 |
+
|
32 |
+
# get cpu stats
|
33 |
+
disk_stats = psutil.disk_usage('.')
|
34 |
+
print(get_human_readable_size(disk_stats.total))
|
35 |
+
cpu_info = platform.processor()
|
36 |
+
print(cpu_info)
|
37 |
+
os_info = platform.platform()
|
38 |
+
print(os_info)
|
39 |
+
|
40 |
+
memory = psutil.virtual_memory()
|
41 |
|
42 |
# Dropdown options
|
43 |
model_options = [
|
|
|
66 |
start_gpu_memory = round(torch.cuda.max_memory_reserved() / 1024 / 1024 / 1024, 3)
|
67 |
max_memory = round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3)
|
68 |
|
69 |
+
running_on_hf = False
|
70 |
+
if os.getenv("SYSTEM", None) == "spaces":
|
71 |
+
running_on_hf = True
|
72 |
+
|
73 |
+
system_info = f"""\
|
74 |
+
- **System:** {os_info}
|
75 |
+
- **CPU:** {cpu_info} **Memory:** {get_human_readable_size(memory.free)} free of {get_human_readable_size(memory.total)}
|
76 |
+
- **GPU:** {gpu_stats.name} ({max_memory} GB)
|
77 |
+
- **Disk:** {get_human_readable_size(disk_stats.free)} free of {get_human_readable_size(disk_stats.total)}
|
78 |
+
- **Hugging Face:** {running_on_hf}
|
79 |
+
"""
|
80 |
+
|
81 |
model=None
|
82 |
tokenizer = None
|
83 |
dataset = None
|
|
|
123 |
load_in_4bit = load_in_4bit,
|
124 |
# token = "hf_...", # use one if using gated models like meta-llama/Llama-2-7b-hf
|
125 |
)
|
|
|
|
|
126 |
return f"Model {initial_model_name} loaded, using {max_sequence_length} as max sequence length.", gr.update(visible=True, interactive=True), gr.update(interactive=True),gr.update(interactive=False), gr.update(interactive=False), gr.update(interactive=False)
|
127 |
|
128 |
def load_data(dataset_name, data_template_style, data_template):
|
|
|
183 |
return "Model saved", gr.update(visible=True, interactive=True)
|
184 |
|
185 |
def username(profile: gr.OAuthProfile | None):
|
186 |
+
hf_user = profile["name"] if profile else "not logged in"
|
187 |
+
return hf_user
|
188 |
|
189 |
# Create the Gradio interface
|
190 |
with gr.Blocks(title="Unsloth fine-tuning") as demo:
|
191 |
+
if (running_on_hf):
|
192 |
+
gr.LoginButton()
|
193 |
+
# logged_user = gr.Markdown(f"**User:** {hf_user}")
|
194 |
+
#demo.load(username, inputs=None, outputs=logged_user)
|
195 |
+
with gr.Row():
|
196 |
+
with gr.Column(scale=0.5):
|
197 |
+
gr.Image("unsloth.png", width="300px", interactive=False, show_download_button=False, show_label=False, show_share_button=False)
|
198 |
+
with gr.Column(min_width="550px", scale=1):
|
199 |
+
gr.Markdown(system_info)
|
200 |
+
with gr.Column(min_width="250px", scale=0.3):
|
201 |
+
gr.Markdown(f"**Links:**\n\n* [Unsloth Hub](https://huggingface.co/unsloth)\n\n* [Unsloth Docs](http://docs.unsloth.com/)\n\n* [Unsloth GitHub](https://github.com/unslothai/unsloth)")
|
202 |
with gr.Tab("Base Model Parameters"):
|
203 |
|
204 |
with gr.Row():
|
|
|
258 |
log_to_tensorboard = gr.Checkbox(label="Log to Tensorboard", value=True, interactive=True)
|
259 |
|
260 |
with gr.Row():
|
261 |
+
# optim = gr.Dropdown(choices=["adamw_8bit", "adamw", "sgd"], label="Optimizer", value="adamw_8bit")
|
262 |
learning_rate = gr.Number(label="Learning Rate", value=2e-4, interactive=True)
|
263 |
|
264 |
+
# with gr.Row():
|
265 |
weight_decay = gr.Number(label="Weight Decay", value=0.01, interactive=True)
|
266 |
+
# lr_scheduler_type = gr.Dropdown(choices=["linear", "cosine", "constant"], label="LR Scheduler Type", value="linear")
|
267 |
gr.Markdown("---")
|
268 |
|
269 |
with gr.Row():
|
|
|
275 |
train_btn = gr.Button("Train", visible=True)
|
276 |
|
277 |
def train_model(model_name: str, lora_r: int, lora_alpha: int, lora_dropout: float, per_device_train_batch_size: int, warmup_steps: int, max_steps: int,
|
278 |
+
gradient_accumulation_steps: int, logging_steps: int, log_to_tensorboard: bool, learning_rate, weight_decay, seed: int, output_dir, progress= gr.Progress()):
|
279 |
global model, tokenizer
|
280 |
+
print(f"$$$ Training model {model_name} with {lora_r} R, {lora_alpha} alpha, {lora_dropout} dropout, {per_device_train_batch_size} per device train batch size, {warmup_steps} warmup steps, {max_steps} max steps, {gradient_accumulation_steps} gradient accumulation steps, {logging_steps} logging steps, {log_to_tensorboard} log to tensorboard, {learning_rate} learning rate, {weight_decay} weight decay, {seed} seed, {output_dir} output dir")
|
281 |
iseed = seed
|
282 |
model = FastLanguageModel.get_peft_model(
|
283 |
model,
|
|
|
326 |
return "Model trained 100%",gr.update(visible=True, interactive=False), gr.update(visible=True, interactive=True), gr.update(interactive=True)
|
327 |
|
328 |
|
329 |
+
train_btn.click(train_model, inputs=[model_name, lora_r, lora_alpha, lora_dropout, per_device_train_batch_size, warmup_steps, max_steps, gradient_accumulation_steps, logging_steps, log_to_tensorboard, learning_rate, weight_decay, seed, output_dir], outputs=[train_output, train_btn])
|
330 |
|
331 |
with gr.Tab("Save & Push Options"):
|
332 |
|