Spaces:
ginipick
/
Running on Zero

ginipick commited on
Commit
80cec7b
·
verified ·
1 Parent(s): 7778e32

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -4
app.py CHANGED
@@ -12,6 +12,9 @@ import torch
12
  from diffusers import DiffusionPipeline
13
  from transformers import pipeline
14
  import gradio as gr
 
 
 
15
 
16
  # Download checkpoints
17
  snapshot_download(repo_id="franciszzj/Leffa", local_dir="./ckpts")
@@ -38,6 +41,27 @@ pt_model = LeffaModel(
38
  )
39
  pt_inference = LeffaInference(model=pt_model)
40
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
  translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en")
42
  base_model = "black-forest-labs/FLUX.1-dev"
43
  model_lora_repo = "Motas/Flux_Fashion_Photography_Style"
@@ -58,12 +82,12 @@ def generate_fashion(prompt, mode, cfg_scale, steps, randomize_seed, seed, width
58
  else:
59
  actual_prompt = prompt
60
 
61
- # 모드에 따른 LoRA 및 트리거워드 설정
62
  if mode == "Generate Model":
63
- pipe.load_lora_weights(model_lora_repo)
64
  trigger_word = "fashion photography, professional model"
65
  else:
66
- pipe.load_lora_weights(clothes_lora_repo)
67
  trigger_word = "upper clothing, fashion item"
68
 
69
  if randomize_seed:
@@ -76,13 +100,14 @@ def generate_fashion(prompt, mode, cfg_scale, steps, randomize_seed, seed, width
76
  if i % (steps // 10) == 0:
77
  progress(i / steps * 100, f"Processing step {i} of {steps}...")
78
 
79
- image = pipe(
80
  prompt=f"{actual_prompt} {trigger_word}",
81
  num_inference_steps=steps,
82
  guidance_scale=cfg_scale,
83
  width=width,
84
  height=height,
85
  generator=generator,
 
86
  joint_attention_kwargs={"scale": lora_scale},
87
  ).images[0]
88
 
 
12
  from diffusers import DiffusionPipeline
13
  from transformers import pipeline
14
  import gradio as gr
15
+ import os
16
+ from huggingface_hub import login
17
+
18
 
19
  # Download checkpoints
20
  snapshot_download(repo_id="franciszzj/Leffa", local_dir="./ckpts")
 
41
  )
42
  pt_inference = LeffaInference(model=pt_model)
43
 
44
+ HF_TOKEN = os.getenv("HF_TOKEN")
45
+ if HF_TOKEN is None:
46
+ raise ValueError("Please set the HF_TOKEN environment variable")
47
+ login(token=HF_TOKEN)
48
+
49
+ # FLUX 모델 초기화 부분 수정
50
+ fashion_pipe = DiffusionPipeline.from_pretrained(
51
+ base_model,
52
+ torch_dtype=torch.bfloat16,
53
+ use_auth_token=HF_TOKEN # 인증 토큰 추가
54
+ )
55
+ fashion_pipe.to("cuda")
56
+
57
+ # LoRA 로딩 함수 수정
58
+ def load_lora(pipe, repo_id):
59
+ pipe.load_lora_weights(
60
+ repo_id,
61
+ use_auth_token=HF_TOKEN
62
+ )
63
+ return pipe
64
+
65
  translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en")
66
  base_model = "black-forest-labs/FLUX.1-dev"
67
  model_lora_repo = "Motas/Flux_Fashion_Photography_Style"
 
82
  else:
83
  actual_prompt = prompt
84
 
85
+ # 모드에 따른 LoRA 로딩 및 트리거워드 설정
86
  if mode == "Generate Model":
87
+ fashion_pipe = load_lora(fashion_pipe, model_lora_repo)
88
  trigger_word = "fashion photography, professional model"
89
  else:
90
+ fashion_pipe = load_lora(fashion_pipe, clothes_lora_repo)
91
  trigger_word = "upper clothing, fashion item"
92
 
93
  if randomize_seed:
 
100
  if i % (steps // 10) == 0:
101
  progress(i / steps * 100, f"Processing step {i} of {steps}...")
102
 
103
+ image = fashion_pipe(
104
  prompt=f"{actual_prompt} {trigger_word}",
105
  num_inference_steps=steps,
106
  guidance_scale=cfg_scale,
107
  width=width,
108
  height=height,
109
  generator=generator,
110
+ use_auth_token=HF_TOKEN, # 인증 토큰 추가
111
  joint_attention_kwargs={"scale": lora_scale},
112
  ).images[0]
113