Spaces:
Running
on
Zero
Running
on
Zero
artificialguybr
commited on
Commit
•
66bd20d
1
Parent(s):
81c2d2d
Update app.py
Browse files
app.py
CHANGED
@@ -16,11 +16,11 @@ def query(payload, api_url, token):
|
|
16 |
return io.BytesIO(response.content)
|
17 |
|
18 |
# Define the function to run when the button is clicked
|
19 |
-
def run_lora(prompt
|
20 |
-
selected_lora = loras[0]
|
21 |
api_url = f"https://api-inference.huggingface.co/models/{selected_lora['repo']}"
|
22 |
trigger_word = selected_lora["trigger_word"]
|
23 |
-
token = os.getenv("API_TOKEN")
|
24 |
payload = {"inputs": f"{prompt} {trigger_word}"}
|
25 |
image_bytes = query(payload, api_url, token)
|
26 |
return Image.open(image_bytes)
|
@@ -29,32 +29,24 @@ def run_lora(prompt, weight):
|
|
29 |
print("Before Gradio Interface")
|
30 |
|
31 |
with gr.Blocks() as app:
|
32 |
-
title = gr.HTML("<h1>LoRA the Explorer</h1>")
|
33 |
-
gallery = gr.Gallery(
|
34 |
-
[(item["image"], item["title"]) for item in loras],
|
35 |
-
label="LoRA Gallery",
|
36 |
-
allow_preview=False,
|
37 |
-
columns=3,
|
38 |
-
)
|
39 |
-
prompt = gr.Textbox(label="Prompt", lines=1, max_lines=1, placeholder="Type a prompt after selecting a LoRA")
|
40 |
-
advanced_options = gr.Accordion("Advanced options", open=False)
|
41 |
-
weight = gr.Slider(0, 10, value=1, step=0.1, label="LoRA weight")
|
42 |
-
result = gr.Image(interactive=False, label="Generated Image")
|
43 |
-
|
44 |
with gr.Row():
|
45 |
with gr.Column():
|
46 |
-
title
|
47 |
-
gallery
|
48 |
-
|
49 |
-
|
50 |
-
|
|
|
|
|
|
|
|
|
|
|
51 |
gr.Button("Run").click(
|
52 |
fn=run_lora,
|
53 |
-
inputs=[prompt
|
54 |
outputs=[result]
|
55 |
)
|
56 |
-
|
57 |
-
result
|
58 |
|
59 |
print("After Gradio Interface")
|
60 |
|
|
|
16 |
return io.BytesIO(response.content)
|
17 |
|
18 |
# Define the function to run when the button is clicked
|
19 |
+
def run_lora(prompt):
|
20 |
+
selected_lora = loras[0] # You can change this to select a different LoRA if needed
|
21 |
api_url = f"https://api-inference.huggingface.co/models/{selected_lora['repo']}"
|
22 |
trigger_word = selected_lora["trigger_word"]
|
23 |
+
token = os.getenv("API_TOKEN") # Make sure you have set your API token
|
24 |
payload = {"inputs": f"{prompt} {trigger_word}"}
|
25 |
image_bytes = query(payload, api_url, token)
|
26 |
return Image.open(image_bytes)
|
|
|
29 |
print("Before Gradio Interface")
|
30 |
|
31 |
with gr.Blocks() as app:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
32 |
with gr.Row():
|
33 |
with gr.Column():
|
34 |
+
title = gr.HTML("<h1>LoRA the Explorer</h1>")
|
35 |
+
gallery = gr.Gallery(
|
36 |
+
[(item["image"], item["title"]) for item in loras],
|
37 |
+
label="LoRA Gallery",
|
38 |
+
allow_preview=False,
|
39 |
+
columns=1, # Change to 1 column to make images smaller
|
40 |
+
item_size=(150, 150) # Set the size of the gallery images
|
41 |
+
)
|
42 |
+
with gr.Column():
|
43 |
+
prompt = gr.Textbox(label="Prompt", lines=1, max_lines=1, placeholder="Type a prompt after selecting a LoRA")
|
44 |
gr.Button("Run").click(
|
45 |
fn=run_lora,
|
46 |
+
inputs=[prompt],
|
47 |
outputs=[result]
|
48 |
)
|
49 |
+
result = gr.Image(interactive=False, label="Generated Image")
|
|
|
50 |
|
51 |
print("After Gradio Interface")
|
52 |
|