Spaces:
Running
on
Zero
Running
on
Zero
tianleliphoebe
commited on
Commit
•
513a020
1
Parent(s):
07e4294
fix regenerate bug
Browse files- serve/constants.py +12 -0
- serve/vote_utils.py +36 -20
serve/constants.py
CHANGED
@@ -17,4 +17,16 @@ SAVE_IMAGE = "save_image"
|
|
17 |
SAVE_VIDEO = "save_video"
|
18 |
SAVE_LOG = "save_log"
|
19 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
|
|
|
17 |
SAVE_VIDEO = "save_video"
|
18 |
SAVE_LOG = "save_log"
|
19 |
|
20 |
+
IMAGE_GENERATION_MODELS = ['fal_LCM(v1.5/XL)_text2image','fal_SDXLTurbo_text2image','fal_SDXL_text2image', 'imagenhub_PixArtAlpha_generation', 'fal_PixArtSigma_text2image',
|
21 |
+
'imagenhub_OpenJourney_generation','fal_SDXLLightning_text2image', 'fal_StableCascade_text2image',
|
22 |
+
'playground_PlayGroundV2_generation', 'playground_PlayGroundV2.5_generation']
|
23 |
+
IMAGE_EDITION_MODELS = ['imagenhub_CycleDiffusion_edition', 'imagenhub_Pix2PixZero_edition', 'imagenhub_Prompt2prompt_edition',
|
24 |
+
'imagenhub_SDEdit_edition', 'imagenhub_InstructPix2Pix_edition',
|
25 |
+
'imagenhub_MagicBrush_edition', 'imagenhub_PNP_edition',
|
26 |
+
'imagenhub_InfEdit_edition', 'imagenhub_CosXLEdit_edition']
|
27 |
+
VIDEO_GENERATION_MODELS = ['fal_AnimateDiff_text2video',
|
28 |
+
'fal_AnimateDiffTurbo_text2video',
|
29 |
+
'videogenhub_LaVie_generation', 'videogenhub_VideoCrafter2_generation',
|
30 |
+
'videogenhub_ModelScope_generation', 'videogenhub_OpenSora_generation']
|
31 |
+
|
32 |
|
serve/vote_utils.py
CHANGED
@@ -7,7 +7,7 @@ import regex as re
|
|
7 |
from pathlib import Path
|
8 |
from .utils import *
|
9 |
from .log_utils import build_logger
|
10 |
-
from .constants import IMAGE_DIR, VIDEO_DIR
|
11 |
import imageio
|
12 |
|
13 |
ig_logger = build_logger("gradio_web_server_image_generation", "gr_web_image_generation.log") # ig = image generation, loggers for single model direct chat
|
@@ -177,7 +177,8 @@ def leftvote_last_response_igm(
|
|
177 |
gr.Markdown(f"### Model A: {state0.model_name.split('_')[1]}", visible=True),
|
178 |
gr.Markdown(f"### Model B: {state1.model_name.split('_')[1]}", visible=True))
|
179 |
else:
|
180 |
-
return ("",) + (disable_btn,) * 4 + (gr.Markdown(
|
|
|
181 |
|
182 |
def rightvote_last_response_igm(
|
183 |
state0, state1, model_selector0, model_selector1, request: gr.Request
|
@@ -190,8 +191,8 @@ def rightvote_last_response_igm(
|
|
190 |
if model_selector0 == "":
|
191 |
return ("",) + (disable_btn,) * 4 + (gr.Markdown(f"### Model A: {state0.model_name.split('_')[1]}", visible=True), gr.Markdown(f"### Model B: {state1.model_name.split('_')[1]}", visible=True))
|
192 |
else:
|
193 |
-
|
194 |
-
|
195 |
|
196 |
|
197 |
def tievote_last_response_igm(
|
@@ -206,7 +207,8 @@ def tievote_last_response_igm(
|
|
206 |
gr.Markdown(f"### Model A: {state0.model_name.split('_')[1]}", visible=True),
|
207 |
gr.Markdown(f"### Model B: {state1.model_name.split('_')[1]}", visible=True))
|
208 |
else:
|
209 |
-
return ("",) + (disable_btn,) * 4 + (gr.Markdown(
|
|
|
210 |
|
211 |
|
212 |
def bothbad_vote_last_response_igm(
|
@@ -221,7 +223,8 @@ def bothbad_vote_last_response_igm(
|
|
221 |
gr.Markdown(f"### Model A: {state0.model_name.split('_')[1]}", visible=True),
|
222 |
gr.Markdown(f"### Model B: {state1.model_name.split('_')[1]}", visible=True))
|
223 |
else:
|
224 |
-
return ("",) + (disable_btn,) * 4 + (gr.Markdown(
|
|
|
225 |
|
226 |
## Image Editing (IE) Single Model Direct Chat
|
227 |
|
@@ -259,7 +262,7 @@ def leftvote_last_response_iem(
|
|
259 |
if model_selector0 == "":
|
260 |
names = (gr.Markdown(f"### Model A: {state0.model_name.split('_')[1]}", visible=True), gr.Markdown(f"### Model B: {state1.model_name.split('_')[1]}", visible=True))
|
261 |
else:
|
262 |
-
names = (gr.Markdown(
|
263 |
return names + ("", "", gr.Image(height=512, width=512, type="pil"), "") + (disable_btn,) * 4
|
264 |
|
265 |
def rightvote_last_response_iem(
|
@@ -277,7 +280,7 @@ def rightvote_last_response_iem(
|
|
277 |
names = (gr.Markdown(f"### Model A: {state0.model_name.split('_')[1]}", visible=True),
|
278 |
gr.Markdown(f"### Model B: {state1.model_name.split('_')[1]}", visible=True))
|
279 |
else:
|
280 |
-
names = (gr.Markdown(
|
281 |
return names + ("", "", gr.Image(height=512, width=512, type="pil"), "") + (disable_btn,) * 4
|
282 |
|
283 |
def tievote_last_response_iem(
|
@@ -291,7 +294,7 @@ def tievote_last_response_iem(
|
|
291 |
names = (gr.Markdown(f"### Model A: {state0.model_name.split('_')[1]}", visible=True),
|
292 |
gr.Markdown(f"### Model B: {state1.model_name.split('_')[1]}", visible=True))
|
293 |
else:
|
294 |
-
names = (gr.Markdown(
|
295 |
return names + ("", "", gr.Image(height=512, width=512, type="pil"), "") + (disable_btn,) * 4
|
296 |
|
297 |
def bothbad_vote_last_response_iem(
|
@@ -305,7 +308,7 @@ def bothbad_vote_last_response_iem(
|
|
305 |
names = (gr.Markdown(f"### Model A: {state0.model_name.split('_')[1]}", visible=True),
|
306 |
gr.Markdown(f"### Model B: {state1.model_name.split('_')[1]}", visible=True))
|
307 |
else:
|
308 |
-
names = (gr.Markdown(
|
309 |
return names + ("", "", gr.Image(height=512, width=512, type="pil"), "") + (disable_btn,) * 4
|
310 |
|
311 |
|
@@ -342,8 +345,8 @@ def leftvote_last_response_vgm(
|
|
342 |
return ("",) + (disable_btn,) * 4 + (gr.Markdown(f"### Model A: {state0.model_name.split('_')[1]}", visible=True), gr.Markdown(f"### Model B: {state1.model_name.split('_')[1]}", visible=True))
|
343 |
else:
|
344 |
return ("",) + (disable_btn,) * 4 + (
|
345 |
-
gr.Markdown(
|
346 |
-
gr.Markdown(
|
347 |
|
348 |
|
349 |
def rightvote_last_response_vgm(
|
@@ -359,8 +362,8 @@ def rightvote_last_response_vgm(
|
|
359 |
gr.Markdown(f"### Model B: {state1.model_name.split('_')[1]}", visible=True))
|
360 |
else:
|
361 |
return ("",) + (disable_btn,) * 4 + (
|
362 |
-
gr.Markdown(
|
363 |
-
gr.Markdown(
|
364 |
|
365 |
def tievote_last_response_vgm(
|
366 |
state0, state1, model_selector0, model_selector1, request: gr.Request
|
@@ -375,8 +378,8 @@ def tievote_last_response_vgm(
|
|
375 |
gr.Markdown(f"### Model B: {state1.model_name.split('_')[1]}", visible=True))
|
376 |
else:
|
377 |
return ("",) + (disable_btn,) * 4 + (
|
378 |
-
gr.Markdown(
|
379 |
-
gr.Markdown(
|
380 |
|
381 |
|
382 |
def bothbad_vote_last_response_vgm(
|
@@ -392,8 +395,8 @@ def bothbad_vote_last_response_vgm(
|
|
392 |
gr.Markdown(f"### Model B: {state1.model_name.split('_')[1]}", visible=True))
|
393 |
else:
|
394 |
return ("",) + (disable_btn,) * 4 + (
|
395 |
-
gr.Markdown(
|
396 |
-
gr.Markdown(
|
397 |
|
398 |
share_js = """
|
399 |
function (a, b, c, d) {
|
@@ -482,8 +485,6 @@ class VideoStateVG:
|
|
482 |
return base
|
483 |
|
484 |
|
485 |
-
|
486 |
-
|
487 |
def generate_ig(gen_func, state, text, model_name, request: gr.Request):
|
488 |
if not text:
|
489 |
raise gr.Warning("Prompt cannot be empty.")
|
@@ -599,6 +600,11 @@ def generate_igm_annoy(gen_func, state0, state1, text, model_name0, model_name1,
|
|
599 |
start_tstamp = time.time()
|
600 |
model_name0 = re.sub(r"### Model A: ", "", model_name0)
|
601 |
model_name1 = re.sub(r"### Model B: ", "", model_name1)
|
|
|
|
|
|
|
|
|
|
|
602 |
generated_image0, generated_image1, model_name0, model_name1 = gen_func(text, model_name0, model_name1)
|
603 |
state0.prompt = text
|
604 |
state1.prompt = text
|
@@ -797,6 +803,11 @@ def generate_iem_annoy(gen_func, state0, state1, source_text, target_text, instr
|
|
797 |
start_tstamp = time.time()
|
798 |
model_name0 = re.sub(r"### Model A: ", "", model_name0)
|
799 |
model_name1 = re.sub(r"### Model B: ", "", model_name1)
|
|
|
|
|
|
|
|
|
|
|
800 |
generated_image0, generated_image1, model_name0, model_name1 = gen_func(source_text, target_text, instruct_text, source_image, model_name0, model_name1)
|
801 |
state0.source_prompt = source_text
|
802 |
state0.target_prompt = target_text
|
@@ -991,6 +1002,11 @@ def generate_vgm_annoy(gen_func, state0, state1, text, model_name0, model_name1,
|
|
991 |
start_tstamp = time.time()
|
992 |
model_name0 = re.sub(r"### Model A: ", "", model_name0)
|
993 |
model_name1 = re.sub(r"### Model B: ", "", model_name1)
|
|
|
|
|
|
|
|
|
|
|
994 |
generated_video0, generated_video1, model_name0, model_name1 = gen_func(text, model_name0, model_name1)
|
995 |
state0.prompt = text
|
996 |
state1.prompt = text
|
|
|
7 |
from pathlib import Path
|
8 |
from .utils import *
|
9 |
from .log_utils import build_logger
|
10 |
+
from .constants import IMAGE_DIR, VIDEO_DIR, IMAGE_GENERATION_MODELS, IMAGE_EDITION_MODELS, VIDEO_GENERATION_MODELS
|
11 |
import imageio
|
12 |
|
13 |
ig_logger = build_logger("gradio_web_server_image_generation", "gr_web_image_generation.log") # ig = image generation, loggers for single model direct chat
|
|
|
177 |
gr.Markdown(f"### Model A: {state0.model_name.split('_')[1]}", visible=True),
|
178 |
gr.Markdown(f"### Model B: {state1.model_name.split('_')[1]}", visible=True))
|
179 |
else:
|
180 |
+
return ("",) + (disable_btn,) * 4 + (gr.Markdown(state0.model_name, visible=True),
|
181 |
+
gr.Markdown(state1.model_name, visible=True))
|
182 |
|
183 |
def rightvote_last_response_igm(
|
184 |
state0, state1, model_selector0, model_selector1, request: gr.Request
|
|
|
191 |
if model_selector0 == "":
|
192 |
return ("",) + (disable_btn,) * 4 + (gr.Markdown(f"### Model A: {state0.model_name.split('_')[1]}", visible=True), gr.Markdown(f"### Model B: {state1.model_name.split('_')[1]}", visible=True))
|
193 |
else:
|
194 |
+
return ("",) + (disable_btn,) * 4 + (gr.Markdown(state0.model_name, visible=True),
|
195 |
+
gr.Markdown(state1.model_name, visible=True))
|
196 |
|
197 |
|
198 |
def tievote_last_response_igm(
|
|
|
207 |
gr.Markdown(f"### Model A: {state0.model_name.split('_')[1]}", visible=True),
|
208 |
gr.Markdown(f"### Model B: {state1.model_name.split('_')[1]}", visible=True))
|
209 |
else:
|
210 |
+
return ("",) + (disable_btn,) * 4 + (gr.Markdown(state0.model_name, visible=True),
|
211 |
+
gr.Markdown(state1.model_name, visible=True))
|
212 |
|
213 |
|
214 |
def bothbad_vote_last_response_igm(
|
|
|
223 |
gr.Markdown(f"### Model A: {state0.model_name.split('_')[1]}", visible=True),
|
224 |
gr.Markdown(f"### Model B: {state1.model_name.split('_')[1]}", visible=True))
|
225 |
else:
|
226 |
+
return ("",) + (disable_btn,) * 4 + (gr.Markdown(state0.model_name, visible=True),
|
227 |
+
gr.Markdown(state1.model_name, visible=True))
|
228 |
|
229 |
## Image Editing (IE) Single Model Direct Chat
|
230 |
|
|
|
262 |
if model_selector0 == "":
|
263 |
names = (gr.Markdown(f"### Model A: {state0.model_name.split('_')[1]}", visible=True), gr.Markdown(f"### Model B: {state1.model_name.split('_')[1]}", visible=True))
|
264 |
else:
|
265 |
+
names = (gr.Markdown(state0.model_name, visible=False), gr.Markdown(state1.model_name, visible=False))
|
266 |
return names + ("", "", gr.Image(height=512, width=512, type="pil"), "") + (disable_btn,) * 4
|
267 |
|
268 |
def rightvote_last_response_iem(
|
|
|
280 |
names = (gr.Markdown(f"### Model A: {state0.model_name.split('_')[1]}", visible=True),
|
281 |
gr.Markdown(f"### Model B: {state1.model_name.split('_')[1]}", visible=True))
|
282 |
else:
|
283 |
+
names = (gr.Markdown(state0.model_name, visible=False), gr.Markdown(state1.model_name, visible=False))
|
284 |
return names + ("", "", gr.Image(height=512, width=512, type="pil"), "") + (disable_btn,) * 4
|
285 |
|
286 |
def tievote_last_response_iem(
|
|
|
294 |
names = (gr.Markdown(f"### Model A: {state0.model_name.split('_')[1]}", visible=True),
|
295 |
gr.Markdown(f"### Model B: {state1.model_name.split('_')[1]}", visible=True))
|
296 |
else:
|
297 |
+
names = (gr.Markdown(state0.model_name, visible=False), gr.Markdown(state1.model_name, visible=False))
|
298 |
return names + ("", "", gr.Image(height=512, width=512, type="pil"), "") + (disable_btn,) * 4
|
299 |
|
300 |
def bothbad_vote_last_response_iem(
|
|
|
308 |
names = (gr.Markdown(f"### Model A: {state0.model_name.split('_')[1]}", visible=True),
|
309 |
gr.Markdown(f"### Model B: {state1.model_name.split('_')[1]}", visible=True))
|
310 |
else:
|
311 |
+
names = (gr.Markdown(state0.model_name, visible=False), gr.Markdown(state1.model_name, visible=False))
|
312 |
return names + ("", "", gr.Image(height=512, width=512, type="pil"), "") + (disable_btn,) * 4
|
313 |
|
314 |
|
|
|
345 |
return ("",) + (disable_btn,) * 4 + (gr.Markdown(f"### Model A: {state0.model_name.split('_')[1]}", visible=True), gr.Markdown(f"### Model B: {state1.model_name.split('_')[1]}", visible=True))
|
346 |
else:
|
347 |
return ("",) + (disable_btn,) * 4 + (
|
348 |
+
gr.Markdown(state0.model_name, visible=False),
|
349 |
+
gr.Markdown(state1.model_name, visible=False))
|
350 |
|
351 |
|
352 |
def rightvote_last_response_vgm(
|
|
|
362 |
gr.Markdown(f"### Model B: {state1.model_name.split('_')[1]}", visible=True))
|
363 |
else:
|
364 |
return ("",) + (disable_btn,) * 4 + (
|
365 |
+
gr.Markdown(state0.model_name, visible=False),
|
366 |
+
gr.Markdown(state1.model_name, visible=False))
|
367 |
|
368 |
def tievote_last_response_vgm(
|
369 |
state0, state1, model_selector0, model_selector1, request: gr.Request
|
|
|
378 |
gr.Markdown(f"### Model B: {state1.model_name.split('_')[1]}", visible=True))
|
379 |
else:
|
380 |
return ("",) + (disable_btn,) * 4 + (
|
381 |
+
gr.Markdown(state0.model_name, visible=False),
|
382 |
+
gr.Markdown(state1.model_name, visible=False))
|
383 |
|
384 |
|
385 |
def bothbad_vote_last_response_vgm(
|
|
|
395 |
gr.Markdown(f"### Model B: {state1.model_name.split('_')[1]}", visible=True))
|
396 |
else:
|
397 |
return ("",) + (disable_btn,) * 4 + (
|
398 |
+
gr.Markdown(state0.model_name, visible=False),
|
399 |
+
gr.Markdown(state1.model_name, visible=False))
|
400 |
|
401 |
share_js = """
|
402 |
function (a, b, c, d) {
|
|
|
485 |
return base
|
486 |
|
487 |
|
|
|
|
|
488 |
def generate_ig(gen_func, state, text, model_name, request: gr.Request):
|
489 |
if not text:
|
490 |
raise gr.Warning("Prompt cannot be empty.")
|
|
|
600 |
start_tstamp = time.time()
|
601 |
model_name0 = re.sub(r"### Model A: ", "", model_name0)
|
602 |
model_name1 = re.sub(r"### Model B: ", "", model_name1)
|
603 |
+
model_map = {model_name.split('_')[1]: model_name for model_name in IMAGE_GENERATION_MODELS}
|
604 |
+
if model_name0 in model_map:
|
605 |
+
model_name0 = model_map[model_name0]
|
606 |
+
if model_name1 in model_map:
|
607 |
+
model_name1 = model_map[model_name1]
|
608 |
generated_image0, generated_image1, model_name0, model_name1 = gen_func(text, model_name0, model_name1)
|
609 |
state0.prompt = text
|
610 |
state1.prompt = text
|
|
|
803 |
start_tstamp = time.time()
|
804 |
model_name0 = re.sub(r"### Model A: ", "", model_name0)
|
805 |
model_name1 = re.sub(r"### Model B: ", "", model_name1)
|
806 |
+
model_map = {model_name.split('_')[1]: model_name for model_name in IMAGE_EDITION_MODELS}
|
807 |
+
if model_name0 in model_map:
|
808 |
+
model_name0 = model_map[model_name0]
|
809 |
+
if model_name1 in model_map:
|
810 |
+
model_name1 = model_map[model_name1]
|
811 |
generated_image0, generated_image1, model_name0, model_name1 = gen_func(source_text, target_text, instruct_text, source_image, model_name0, model_name1)
|
812 |
state0.source_prompt = source_text
|
813 |
state0.target_prompt = target_text
|
|
|
1002 |
start_tstamp = time.time()
|
1003 |
model_name0 = re.sub(r"### Model A: ", "", model_name0)
|
1004 |
model_name1 = re.sub(r"### Model B: ", "", model_name1)
|
1005 |
+
model_map = {model_name.split('_')[1]: model_name for model_name in VIDEO_GENERATION_MODELS}
|
1006 |
+
if model_name0 in model_map:
|
1007 |
+
model_name0 = model_map[model_name0]
|
1008 |
+
if model_name1 in model_map:
|
1009 |
+
model_name1 = model_map[model_name1]
|
1010 |
generated_video0, generated_video1, model_name0, model_name1 = gen_func(text, model_name0, model_name1)
|
1011 |
state0.prompt = text
|
1012 |
state1.prompt = text
|