Spaces:
Paused
Paused
Fabrice-TIERCELIN
commited on
Commit
•
d4f0399
1
Parent(s):
1448721
10 min allocation
Browse files
app.py
CHANGED
@@ -137,9 +137,9 @@ def stage2_process(
|
|
137 |
output_format,
|
138 |
allocation
|
139 |
):
|
140 |
-
print("noisy_image
|
|
|
141 |
print("rotation: " + str(rotation))
|
142 |
-
print("denoise_image: " + str(denoise_image))
|
143 |
print("prompt: " + str(prompt))
|
144 |
print("a_prompt: " + str(a_prompt))
|
145 |
print("n_prompt: " + str(n_prompt))
|
@@ -315,7 +315,7 @@ def restore(
|
|
315 |
allocation
|
316 |
):
|
317 |
start = time.time()
|
318 |
-
print('
|
319 |
|
320 |
if torch.cuda.device_count() == 0:
|
321 |
gr.Warning('Set this space to GPU config to make it work.')
|
@@ -360,7 +360,7 @@ def restore(
|
|
360 |
# All the results have the same size
|
361 |
result_height, result_width, result_channel = np.array(results[0]).shape
|
362 |
|
363 |
-
print('<<==
|
364 |
end = time.time()
|
365 |
secondes = int(end - start)
|
366 |
minutes = math.floor(secondes / 60)
|
@@ -474,8 +474,8 @@ with gr.Blocks() as interface:
|
|
474 |
prompt = gr.Textbox(label="Image description", info="Help the AI understand what the image represents; describe as much as possible, especially the details we can't see on the original image; you can write in any language", value="", placeholder="A 33 years old man, walking, in the street, Santiago, morning, Summer, photorealistic", lines=3)
|
475 |
prompt_hint = gr.HTML("You can use a <a href='"'https://huggingface.co/spaces/MaziyarPanahi/llava-llama-3-8b'"'>LlaVa space</a> to auto-generate the description of your image.")
|
476 |
upscale = gr.Radio([["x1", 1], ["x2", 2], ["x3", 3], ["x4", 4], ["x5", 5], ["x6", 6], ["x7", 7], ["x8", 8]], label="Upscale factor", info="Resolution x1 to x8", value=2, interactive=True)
|
477 |
-
allocation = gr.Radio([["1 min", 1], ["2 min", 2], ["3 min", 3], ["4 min", 4], ["5 min", 5], ["6 min", 6], ["7 min", 7], ["8 min", 8]], label="GPU allocation time", info="lower=May abort run, higher=Quota penalty for next runs", value=6, interactive=True)
|
478 |
-
output_format = gr.Radio([["As input", "input"], ["*.png", "png"], ["*.webp", "webp"], ["*.jpeg", "jpeg"], ["*.gif", "gif"], ["*.bmp", "bmp"]], label="Image format for result", info="File extention", value="
|
479 |
|
480 |
with gr.Accordion("Pre-denoising (optional)", open=False):
|
481 |
gamma_correction = gr.Slider(label="Gamma Correction", info = "lower=lighter, higher=darker", minimum=0.1, maximum=2.0, value=1.0, step=0.1)
|
@@ -612,7 +612,7 @@ with gr.Blocks() as interface:
|
|
612 |
False,
|
613 |
0.,
|
614 |
"v0-Q",
|
615 |
-
"
|
616 |
5
|
617 |
],
|
618 |
[
|
@@ -643,7 +643,7 @@ with gr.Blocks() as interface:
|
|
643 |
False,
|
644 |
0.,
|
645 |
"v0-Q",
|
646 |
-
"
|
647 |
4
|
648 |
],
|
649 |
[
|
@@ -674,7 +674,7 @@ with gr.Blocks() as interface:
|
|
674 |
False,
|
675 |
0.,
|
676 |
"v0-Q",
|
677 |
-
"
|
678 |
4
|
679 |
],
|
680 |
[
|
@@ -705,7 +705,7 @@ with gr.Blocks() as interface:
|
|
705 |
False,
|
706 |
0.,
|
707 |
"v0-Q",
|
708 |
-
"
|
709 |
4
|
710 |
],
|
711 |
],
|
|
|
137 |
output_format,
|
138 |
allocation
|
139 |
):
|
140 |
+
print("noisy_image:\n" + str(noisy_image))
|
141 |
+
print("denoise_image:\n" + str(denoise_image))
|
142 |
print("rotation: " + str(rotation))
|
|
|
143 |
print("prompt: " + str(prompt))
|
144 |
print("a_prompt: " + str(a_prompt))
|
145 |
print("n_prompt: " + str(n_prompt))
|
|
|
315 |
allocation
|
316 |
):
|
317 |
start = time.time()
|
318 |
+
print('restore ==>>')
|
319 |
|
320 |
if torch.cuda.device_count() == 0:
|
321 |
gr.Warning('Set this space to GPU config to make it work.')
|
|
|
360 |
# All the results have the same size
|
361 |
result_height, result_width, result_channel = np.array(results[0]).shape
|
362 |
|
363 |
+
print('<<== restore')
|
364 |
end = time.time()
|
365 |
secondes = int(end - start)
|
366 |
minutes = math.floor(secondes / 60)
|
|
|
474 |
prompt = gr.Textbox(label="Image description", info="Help the AI understand what the image represents; describe as much as possible, especially the details we can't see on the original image; you can write in any language", value="", placeholder="A 33 years old man, walking, in the street, Santiago, morning, Summer, photorealistic", lines=3)
|
475 |
prompt_hint = gr.HTML("You can use a <a href='"'https://huggingface.co/spaces/MaziyarPanahi/llava-llama-3-8b'"'>LlaVa space</a> to auto-generate the description of your image.")
|
476 |
upscale = gr.Radio([["x1", 1], ["x2", 2], ["x3", 3], ["x4", 4], ["x5", 5], ["x6", 6], ["x7", 7], ["x8", 8]], label="Upscale factor", info="Resolution x1 to x8", value=2, interactive=True)
|
477 |
+
allocation = gr.Radio([["1 min", 1], ["2 min", 2], ["3 min", 3], ["4 min", 4], ["5 min", 5], ["6 min", 6], ["7 min", 7], ["8 min", 8], ["9 min (discouraged)", 9], ["10 min (discouraged)", 10]], label="GPU allocation time", info="lower=May abort run, higher=Quota penalty for next runs", value=6, interactive=True)
|
478 |
+
output_format = gr.Radio([["As input", "input"], ["*.png", "png"], ["*.webp", "webp"], ["*.jpeg", "jpeg"], ["*.gif", "gif"], ["*.bmp", "bmp"]], label="Image format for result", info="File extention", value="input", interactive=True)
|
479 |
|
480 |
with gr.Accordion("Pre-denoising (optional)", open=False):
|
481 |
gamma_correction = gr.Slider(label="Gamma Correction", info = "lower=lighter, higher=darker", minimum=0.1, maximum=2.0, value=1.0, step=0.1)
|
|
|
612 |
False,
|
613 |
0.,
|
614 |
"v0-Q",
|
615 |
+
"input",
|
616 |
5
|
617 |
],
|
618 |
[
|
|
|
643 |
False,
|
644 |
0.,
|
645 |
"v0-Q",
|
646 |
+
"input",
|
647 |
4
|
648 |
],
|
649 |
[
|
|
|
674 |
False,
|
675 |
0.,
|
676 |
"v0-Q",
|
677 |
+
"input",
|
678 |
4
|
679 |
],
|
680 |
[
|
|
|
705 |
False,
|
706 |
0.,
|
707 |
"v0-Q",
|
708 |
+
"input",
|
709 |
4
|
710 |
],
|
711 |
],
|