multimodalart HF staff commited on
Commit
a441cea
1 Parent(s): 395ee78

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -8
app.py CHANGED
@@ -1,12 +1,12 @@
1
  import os
2
  import subprocess
3
  from typing import Union
 
4
  is_spaces = True if os.environ.get("SPACE_ID") else False
5
 
6
  if is_spaces:
7
  subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
8
  import spaces
9
- from huggingface_hub import whoami
10
 
11
  os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
12
  import sys
@@ -37,6 +37,7 @@ if not is_spaces:
37
 
38
  MAX_IMAGES = 150
39
 
 
40
  def load_captioning(uploaded_images, concept_sentence):
41
  gr.Info("Images uploaded!")
42
  updates = []
@@ -149,6 +150,16 @@ def start_training(
149
  ):
150
  if not lora_name:
151
  raise gr.Error("You forgot to insert your LoRA name! This name has to be unique.")
 
 
 
 
 
 
 
 
 
 
152
  print("Started training")
153
  slugged_lora_name = slugify(lora_name)
154
 
@@ -166,7 +177,11 @@ def start_training(
166
  config["config"]["process"][0]["network"]["linear_alpha"] = int(rank)
167
  config["config"]["process"][0]["datasets"][0]["folder_path"] = dataset_folder
168
  config["config"]["process"][0]["save"]["push_to_hub"] = True
169
- config["config"]["process"][0]["save"]["hf_repo_id"] = f"{profile.username}/{slugged_lora_name}"
 
 
 
 
170
  config["config"]["process"][0]["save"]["hf_private"] = True
171
  if concept_sentence:
172
  config["config"]["process"][0]["trigger_word"] = concept_sentence
@@ -189,7 +204,7 @@ def start_training(
189
  with open(config_path, "w") as f:
190
  yaml.dump(config, f)
191
  if is_spaces:
192
- print("Started training with spacerunner...")
193
  # copy config to dataset_folder as config.yaml
194
  shutil.copy(config_path, dataset_folder + "/config.yaml")
195
  # get location of this script
@@ -343,16 +358,14 @@ with gr.Blocks(theme=theme, css=css) as demo:
343
  progress_area = gr.Markdown("")
344
 
345
  with gr.Tab("Train on your device" if is_spaces else "Instructions"):
346
- gr.Markdown(
347
- f"""To use FLUX LoRA Ease locally with this UI, you can clone this repository (yes, HF Spaces are git repos!). You'll need ~23GB of VRAM
348
  ```bash
349
- git clone https://huggingface.co/spaces/autotrain-projects/flux-lora-ease
350
  cd flux-lora-ease
351
- ## Optional, start a venv environment ##
352
  python3 -m venv venv
353
  source venv/bin/activate
354
  # .\venv\Scripts\activate on windows
355
- # install torch first
356
  ## End of optional ##
357
  pip install -r requirements_local.txt
358
  ```
 
1
  import os
2
  import subprocess
3
  from typing import Union
4
+ from huggingface_hub import whoami
5
  is_spaces = True if os.environ.get("SPACE_ID") else False
6
 
7
  if is_spaces:
8
  subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
9
  import spaces
 
10
 
11
  os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
12
  import sys
 
37
 
38
  MAX_IMAGES = 150
39
 
40
+
41
  def load_captioning(uploaded_images, concept_sentence):
42
  gr.Info("Images uploaded!")
43
  updates = []
 
150
  ):
151
  if not lora_name:
152
  raise gr.Error("You forgot to insert your LoRA name! This name has to be unique.")
153
+
154
+ if not is_spaces:
155
+ try:
156
+ if whoami()["auth"]["accessToken"]["role"] == "write" or "repo.write" in whoami()["auth"]["accessToken"]["fineGrained"]["scoped"][0]["permissions"]:
157
+ gr.Info(f"Starting training locally {whoami()['name']}. Your LoRA will be available locally and in Hugging Face after it finishes.")
158
+ else:
159
+ raise gr.Error(f"You logged in to Hugging Face with not enough permissions, you need a token that allows writing to {whoami()['name']} profile.")
160
+ except:
161
+ raise gr.Error(f"You logged in to Hugging Face with not enough permissions, you need a token that allows writing to {whoami()['name']} profile.")
162
+
163
  print("Started training")
164
  slugged_lora_name = slugify(lora_name)
165
 
 
177
  config["config"]["process"][0]["network"]["linear_alpha"] = int(rank)
178
  config["config"]["process"][0]["datasets"][0]["folder_path"] = dataset_folder
179
  config["config"]["process"][0]["save"]["push_to_hub"] = True
180
+ try:
181
+ username = whoami()["name"] if not is_spaces else profile.username
182
+ except:
183
+ raise gr.Error("Error trying to retrieve your username. Are you sure you are logged in with Hugging Face?")
184
+ config["config"]["process"][0]["save"]["hf_repo_id"] = f"{username}/{slugged_lora_name}"
185
  config["config"]["process"][0]["save"]["hf_private"] = True
186
  if concept_sentence:
187
  config["config"]["process"][0]["trigger_word"] = concept_sentence
 
204
  with open(config_path, "w") as f:
205
  yaml.dump(config, f)
206
  if is_spaces:
207
+ gr.Info("Instantiating Spacerunner...")
208
  # copy config to dataset_folder as config.yaml
209
  shutil.copy(config_path, dataset_folder + "/config.yaml")
210
  # get location of this script
 
358
  progress_area = gr.Markdown("")
359
 
360
  with gr.Tab("Train on your device" if is_spaces else "Instructions"):
361
+ gr.Markdown(f"""To use FLUX LoRA Ease locally with this UI, you can clone this repository (yes, HF Spaces are git repos!). You'll need ~23GB of VRAM
 
362
  ```bash
363
+ git clone https://huggingface.co/spaces/flux-lora-ease/flux-lora-ease
364
  cd flux-lora-ease
365
+ ## Optional, start a venv environment (install torch first) ##
366
  python3 -m venv venv
367
  source venv/bin/activate
368
  # .\venv\Scripts\activate on windows
 
369
  ## End of optional ##
370
  pip install -r requirements_local.txt
371
  ```