ssboost commited on
Commit
c59923b
โ€ข
1 Parent(s): 3f3f319

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -41
app.py CHANGED
@@ -13,7 +13,6 @@ import numpy as np
13
  from huggingface_hub import InferenceClient
14
  import os
15
 
16
-
17
  device = "cuda"
18
  ckpt_dir = snapshot_download(repo_id="Kwai-Kolors/Kolors")
19
  ckpt_IPA_dir = snapshot_download(repo_id="Kwai-Kolors/Kolors-IP-Adapter-Plus")
@@ -56,24 +55,6 @@ pipe_i2i.load_ip_adapter(f'{ckpt_IPA_dir}' , subfolder="", weight_name=["ip_adap
56
  MAX_SEED = np.iinfo(np.int32).max
57
  MAX_IMAGE_SIZE = 1024
58
 
59
- def call_api(content, system_message, max_tokens=1000, temperature=0.7, top_p=0.95):
60
- messages = [{"role": "system", "content": system_message}, {"role": "user", "content": content}]
61
- response = client.chat_completion(messages, max_tokens=max_tokens, temperature=temperature, top_p=top_p)
62
- return response.choices[0].message['content']
63
-
64
- def generate_prompt(korean_prompt):
65
- system_message = """
66
- Given the following description in Korean,
67
- translate and generate a concise English prompt suitable for a Stable Diffusion model.
68
- The prompt should be focused, descriptive,
69
- and contain specific keywords or phrases that will help guide the image generation process.
70
- Use simple and descriptive language, avoiding unnecessary words.
71
- Ensure the output is in English and follows the format typically used in Stable Diffusion prompts.
72
- The description is: [Insert Korean description here]
73
- """
74
- optimized_prompt = call_api(korean_prompt, system_message)
75
- return optimized_prompt # ์ตœ์ ํ™”๋œ ํ”„๋กฌํ”„ํŠธ ๋ฐ˜ํ™˜
76
-
77
  @spaces.GPU
78
  def infer(prompt,
79
  ip_adapter_image = None,
@@ -136,21 +117,6 @@ css="""
136
  with gr.Blocks(css=css) as Kolors:
137
  with gr.Row():
138
  with gr.Column(elem_id="col-left"):
139
- with gr.Row():
140
- korean_prompt = gr.Textbox(
141
- label="ํ•œ๊ตญ์–ด ํ”„๋กฌํ”„ํŠธ ์ž…๋ ฅ",
142
- placeholder="ํ•œ๊ตญ์–ด๋กœ ์›ํ•˜๋Š” ํ”„๋กฌํ”„ํŠธ๋ฅผ ์ž…๋ ฅํ•˜์„ธ์š”",
143
- lines=2
144
- )
145
- with gr.Row():
146
- generate_prompt_button = gr.Button("Generate Prompt")
147
- with gr.Row():
148
- optimized_prompt = gr.Textbox(
149
- label="์ตœ์ ํ™”๋œ ํ”„๋กฌํ”„ํŠธ ์ƒ์„ฑ",
150
- placeholder=" ",
151
- lines=2,
152
- interactive=False
153
- )
154
  with gr.Row():
155
  generated_prompt = gr.Textbox(
156
  label="ํ”„๋กฌํ”„ํŠธ ์ž…๋ ฅ",
@@ -219,13 +185,6 @@ with gr.Blocks(css=css) as Kolors:
219
  result = gr.Image(label="Result", show_label=False)
220
  download_button = gr.File(label="Download Image")
221
 
222
- # ์ตœ์ ํ™”๋œ ํ”„๋กฌํ”„ํŠธ ์ƒ์„ฑ ๋ฐ ๊ฒฐ๊ณผ ํ‘œ์‹œ
223
- generate_prompt_button.click(
224
- fn=generate_prompt,
225
- inputs=[korean_prompt],
226
- outputs=[optimized_prompt]
227
- )
228
-
229
  # ์ด๋ฏธ์ง€ ์ƒ์„ฑ ๋ฐ ๋‹ค์šด๋กœ๋“œ ํŒŒ์ผ ๊ฒฝ๋กœ ์„ค์ •
230
  run_button.click(
231
  fn=infer,
 
13
  from huggingface_hub import InferenceClient
14
  import os
15
 
 
16
  device = "cuda"
17
  ckpt_dir = snapshot_download(repo_id="Kwai-Kolors/Kolors")
18
  ckpt_IPA_dir = snapshot_download(repo_id="Kwai-Kolors/Kolors-IP-Adapter-Plus")
 
55
  MAX_SEED = np.iinfo(np.int32).max
56
  MAX_IMAGE_SIZE = 1024
57
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
58
  @spaces.GPU
59
  def infer(prompt,
60
  ip_adapter_image = None,
 
117
  with gr.Blocks(css=css) as Kolors:
118
  with gr.Row():
119
  with gr.Column(elem_id="col-left"):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
120
  with gr.Row():
121
  generated_prompt = gr.Textbox(
122
  label="ํ”„๋กฌํ”„ํŠธ ์ž…๋ ฅ",
 
185
  result = gr.Image(label="Result", show_label=False)
186
  download_button = gr.File(label="Download Image")
187
 
 
 
 
 
 
 
 
188
  # ์ด๋ฏธ์ง€ ์ƒ์„ฑ ๋ฐ ๋‹ค์šด๋กœ๋“œ ํŒŒ์ผ ๊ฒฝ๋กœ ์„ค์ •
189
  run_button.click(
190
  fn=infer,