Fabrice-TIERCELIN
commited on
Commit
•
a9d7ba6
1
Parent(s):
8f3e82e
, ["9 min", 9], ["10 min", 10]
Browse files- gradio_demo.py +1 -20
gradio_demo.py
CHANGED
@@ -519,7 +519,7 @@ with gr.Blocks(title="SUPIR") as interface:
|
|
519 |
prompt = gr.Textbox(label="Image description", info="Help the AI understand what the image represents; describe as much as possible, especially the details we can't see on the original image; I advise you to write in English because other languages may not be handled", value="", placeholder="A 33 years old man, walking, in the street, Santiago, morning, Summer, photorealistic", lines=3)
|
520 |
prompt_hint = gr.HTML("You can use a <a href='"'https://huggingface.co/spaces/MaziyarPanahi/llava-llama-3-8b'"'>LlaVa space</a> to auto-generate the description of your image.")
|
521 |
upscale = gr.Radio([["x1", 1], ["x2", 2], ["x3", 3], ["x4", 4], ["x5", 5], ["x6", 6], ["x7", 7], ["x8", 8]], label="Upscale factor", info="Resolution x1 to x8", value=2, interactive=True)
|
522 |
-
allocation = gr.Radio([["1 min", 1], ["2 min", 2], ["3 min", 3], ["4 min", 4], ["5 min", 5], ["6 min", 6], ["7 min", 7], ["8 min", 8]
|
523 |
output_format = gr.Radio([["*.png", "png"], ["*.webp", "webp"], ["*.jpeg", "jpeg"], ["*.gif", "gif"], ["*.bmp", "bmp"]], label="Image format for result", info="File extention", value="png", interactive=True)
|
524 |
|
525 |
with gr.Accordion("Pre-denoising (optional)", open=False):
|
@@ -728,17 +728,6 @@ with gr.Blocks(title="SUPIR") as interface:
|
|
728 |
denoise_information
|
729 |
])
|
730 |
|
731 |
-
llave_button.click(fn = check, inputs = [
|
732 |
-
denoise_image
|
733 |
-
], outputs = [], queue = False, show_progress = False).success(fn = llave_process, inputs = [
|
734 |
-
denoise_image,
|
735 |
-
temperature,
|
736 |
-
top_p,
|
737 |
-
qs
|
738 |
-
], outputs = [
|
739 |
-
prompt
|
740 |
-
])
|
741 |
-
|
742 |
diffusion_button.click(fn = update_seed, inputs = [
|
743 |
randomize_seed,
|
744 |
seed
|
@@ -805,13 +794,5 @@ with gr.Blocks(title="SUPIR") as interface:
|
|
805 |
spt_linear_s_stage2,
|
806 |
model_select
|
807 |
])
|
808 |
-
|
809 |
-
submit_button.click(fn = submit_feedback, inputs = [
|
810 |
-
event_id,
|
811 |
-
fb_score,
|
812 |
-
fb_text
|
813 |
-
], outputs = [
|
814 |
-
fb_text
|
815 |
-
])
|
816 |
|
817 |
interface.queue(10).launch()
|
|
|
519 |
prompt = gr.Textbox(label="Image description", info="Help the AI understand what the image represents; describe as much as possible, especially the details we can't see on the original image; I advise you to write in English because other languages may not be handled", value="", placeholder="A 33 years old man, walking, in the street, Santiago, morning, Summer, photorealistic", lines=3)
|
520 |
prompt_hint = gr.HTML("You can use a <a href='"'https://huggingface.co/spaces/MaziyarPanahi/llava-llama-3-8b'"'>LlaVa space</a> to auto-generate the description of your image.")
|
521 |
upscale = gr.Radio([["x1", 1], ["x2", 2], ["x3", 3], ["x4", 4], ["x5", 5], ["x6", 6], ["x7", 7], ["x8", 8]], label="Upscale factor", info="Resolution x1 to x8", value=2, interactive=True)
|
522 |
+
allocation = gr.Radio([["1 min", 1], ["2 min", 2], ["3 min", 3], ["4 min", 4], ["5 min", 5], ["6 min", 6], ["7 min", 7], ["8 min", 8]], label="GPU allocation time", info="lower=May abort run, higher=Quota penalty for next runs", value=6, interactive=True)
|
523 |
output_format = gr.Radio([["*.png", "png"], ["*.webp", "webp"], ["*.jpeg", "jpeg"], ["*.gif", "gif"], ["*.bmp", "bmp"]], label="Image format for result", info="File extention", value="png", interactive=True)
|
524 |
|
525 |
with gr.Accordion("Pre-denoising (optional)", open=False):
|
|
|
728 |
denoise_information
|
729 |
])
|
730 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
731 |
diffusion_button.click(fn = update_seed, inputs = [
|
732 |
randomize_seed,
|
733 |
seed
|
|
|
794 |
spt_linear_s_stage2,
|
795 |
model_select
|
796 |
])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
797 |
|
798 |
interface.queue(10).launch()
|