vinesmsuic commited on
Commit
b34109c
1 Parent(s): 07bbd4d

adding video random sampling function

Browse files
model/fetch_museum_results/__init__.py CHANGED
@@ -1,5 +1,5 @@
1
  from .imagen_museum import TASK_DICT, DOMAIN
2
- from .imagen_museum import fetch_indexes
3
  import random
4
 
5
  ARENA_TO_IG_MUSEUM = {"LCM(v1.5/XL)":"LCM",
@@ -56,3 +56,36 @@ def draw_from_imagen_museum(task, model_name):
56
  return [[source_image_link, image_link], [input_caption, output_caption, instruction]]
57
  else:
58
  raise ValueError("Task not supported")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  from .imagen_museum import TASK_DICT, DOMAIN
2
+ from .imagen_museum import fetch_indexes, fetch_indexes_no_csv
3
  import random
4
 
5
  ARENA_TO_IG_MUSEUM = {"LCM(v1.5/XL)":"LCM",
 
56
  return [[source_image_link, image_link], [input_caption, output_caption, instruction]]
57
  else:
58
  raise ValueError("Task not supported")
59
+
60
+ def draw2_from_videogen_museum(task, model_name1, model_name2):
61
+ domain = "https://github.com/ChromAIca/VideoGenMuseum/raw/main/Museum/"
62
+ baselink = domain + "VideoGenHub_Text-Guided_VG"
63
+
64
+ matched_results = fetch_indexes_no_csv(baselink)
65
+ r = random.Random()
66
+ uid, value = r.choice(list(matched_results.items()))
67
+ video_link_1 = baselink + "/" + model_name1 + "/" + uid
68
+ video_link_2 = baselink + "/" + model_name2 + "/" + uid
69
+
70
+ if task == "t2v": # Video Gen
71
+ prompt = value['prompt_en']
72
+ return [[video_link_1, video_link_2], [prompt]]
73
+ else:
74
+ raise ValueError("Task not supported")
75
+
76
+ def draw_from_videogen_museum(task, model_name):
77
+ domain = "https://github.com/ChromAIca/VideoGenMuseum/raw/main/Museum/"
78
+ baselink = domain + "VideoGenHub_Text-Guided_VG"
79
+
80
+ matched_results = fetch_indexes_no_csv(baselink)
81
+ r = random.Random()
82
+ uid, value = r.choice(list(matched_results.items()))
83
+ model = model_name
84
+ video_link = baselink + "/" + model + "/" + uid
85
+ print(video_link)
86
+
87
+ if task == "t2v": # Video Gen
88
+ prompt = value['prompt_en']
89
+ return [video_link, prompt]
90
+ else:
91
+ raise ValueError("Task not supported")
model/fetch_museum_results/imagen_museum/__init__.py CHANGED
@@ -116,6 +116,10 @@ def fetch_indexes(baselink):
116
  matched_results = fetch_data_and_match(baselink+"/dataset_lookup.csv", baselink+"/dataset_lookup.json")
117
  return matched_results
118
 
 
 
 
 
119
  if __name__ == "__main__":
120
  domain = "https://chromaica.github.io/Museum/"
121
  baselink = domain + "ImagenHub_Text-Guided_IE"
 
116
  matched_results = fetch_data_and_match(baselink+"/dataset_lookup.csv", baselink+"/dataset_lookup.json")
117
  return matched_results
118
 
119
+ def fetch_indexes_no_csv(baselink):
120
+ matched_results = fetch_json_data(baselink+"/dataset_lookup.json")
121
+ return matched_results
122
+
123
  if __name__ == "__main__":
124
  domain = "https://chromaica.github.io/Museum/"
125
  baselink = domain + "ImagenHub_Text-Guided_IE"
model/model_manager.py CHANGED
@@ -6,7 +6,7 @@ import io, base64, json
6
  import spaces
7
  from PIL import Image
8
  from .models import IMAGE_GENERATION_MODELS, IMAGE_EDITION_MODELS, VIDEO_GENERATION_MODELS, load_pipeline
9
- from .fetch_museum_results import draw_from_imagen_museum, draw2_from_imagen_museum
10
 
11
  class ModelManager:
12
  def __init__(self):
@@ -161,7 +161,12 @@ class ModelManager:
161
  return result
162
 
163
  def generate_video_vg_museum(self, model_name):
164
- raise NotImplementedError
 
 
 
 
 
165
 
166
  def generate_video_vg_parallel_anony(self, prompt, model_A, model_B):
167
  if model_A == "" and model_B == "":
@@ -181,7 +186,14 @@ class ModelManager:
181
  else:
182
  model_names = [model_A, model_B]
183
 
184
- raise NotImplementedError
 
 
 
 
 
 
 
185
 
186
  def generate_video_vg_parallel(self, prompt, model_A, model_B):
187
  model_names = [model_A, model_B]
@@ -193,5 +205,10 @@ class ModelManager:
193
 
194
  def generate_video_vg_museum_parallel(self, model_A, model_B):
195
  model_names = [model_A, model_B]
196
-
197
- raise NotImplementedError
 
 
 
 
 
 
6
  import spaces
7
  from PIL import Image
8
  from .models import IMAGE_GENERATION_MODELS, IMAGE_EDITION_MODELS, VIDEO_GENERATION_MODELS, load_pipeline
9
+ from .fetch_museum_results import draw_from_imagen_museum, draw2_from_imagen_museum, draw_from_videogen_museum, draw2_from_videogen_museum
10
 
11
  class ModelManager:
12
  def __init__(self):
 
161
  return result
162
 
163
  def generate_video_vg_museum(self, model_name):
164
+ model_name = model_name.split('_')[1]
165
+ result_list = draw_from_videogen_museum("t2v", model_name)
166
+ video_link = result_list[0]
167
+ prompt = result_list[1]
168
+
169
+ return video_link, prompt
170
 
171
  def generate_video_vg_parallel_anony(self, prompt, model_A, model_B):
172
  if model_A == "" and model_B == "":
 
186
  else:
187
  model_names = [model_A, model_B]
188
 
189
+ with concurrent.futures.ThreadPoolExecutor() as executor:
190
+ model_1 = model_names[0].split('_')[1]
191
+ model_2 = model_names[1].split('_')[1]
192
+ result_list = draw2_from_videogen_museum("t2v", model_1, model_2)
193
+ video_links = result_list[0]
194
+ prompt_list = result_list[1]
195
+
196
+ return video_links[0], video_links[1], model_names[0], model_names[1], prompt_list[0]
197
 
198
  def generate_video_vg_parallel(self, prompt, model_A, model_B):
199
  model_names = [model_A, model_B]
 
205
 
206
  def generate_video_vg_museum_parallel(self, model_A, model_B):
207
  model_names = [model_A, model_B]
208
+ with concurrent.futures.ThreadPoolExecutor() as executor:
209
+ model_1 = model_A.split('_')[1]
210
+ model_2 = model_B.split('_')[1]
211
+ result_list = draw2_from_videogen_museum("t2v", model_1, model_2)
212
+ video_links = result_list[0]
213
+ prompt_list = result_list[1]
214
+ return video_links[0], video_links[1], prompt_list[0]
serve/gradio_web_video_generation.py CHANGED
@@ -9,8 +9,11 @@ from .vote_utils import (
9
  bothbad_vote_last_response_vgm as bothbad_vote_last_response,
10
  # share_click_vgm as share_click,
11
  generate_vg,
 
12
  generate_vgm,
 
13
  generate_vgm_annoy,
 
14
  share_js
15
  )
16
  from functools import partial
@@ -38,6 +41,7 @@ Find out who is the 🥇conditional video generation models! More models are goi
38
  state0 = gr.State()
39
  state1 = gr.State()
40
  gen_func = partial(generate_vgm_annoy, models.generate_video_vg_parallel_anony)
 
41
 
42
  gr.Markdown(notice_markdown, elem_id="notice_markdown")
43
 
@@ -79,6 +83,8 @@ Find out who is the 🥇conditional video generation models! More models are goi
79
  elem_id="input_box",
80
  )
81
  send_btn = gr.Button(value="Send", variant="primary", scale=0)
 
 
82
 
83
  with gr.Row():
84
  clear_btn = gr.Button(value="🎲 New Round", interactive=False)
@@ -120,6 +126,17 @@ Find out who is the 🥇conditional video generation models! More models are goi
120
  outputs=btn_list
121
  )
122
 
 
 
 
 
 
 
 
 
 
 
 
123
  clear_btn.click(
124
  clear_history_side_by_side_anony,
125
  inputs=None,
@@ -190,6 +207,7 @@ def build_side_by_side_ui_named_vg(models):
190
  state0 = gr.State()
191
  state1 = gr.State()
192
  gen_func = partial(generate_vgm, models.generate_video_vg_parallel)
 
193
  gr.Markdown(notice_markdown, elem_id="notice_markdown")
194
 
195
  with gr.Group(elem_id="share-region-named"):
@@ -239,6 +257,8 @@ def build_side_by_side_ui_named_vg(models):
239
  elem_id="input_box"
240
  )
241
  send_btn = gr.Button(value="Send", variant="primary", scale=0)
 
 
242
 
243
  with gr.Row():
244
  clear_btn = gr.Button(value="🗑️ Clear history", interactive=False)
@@ -289,6 +309,17 @@ def build_side_by_side_ui_named_vg(models):
289
  inputs=None,
290
  outputs=btn_list
291
  )
 
 
 
 
 
 
 
 
 
 
 
292
  regenerate_btn.click(
293
  gen_func,
294
  inputs=[state0, state1, textbox, model_selector_left, model_selector_right],
@@ -358,6 +389,7 @@ def build_single_model_ui_vg(models, add_promotion_links=False):
358
 
359
  state = gr.State()
360
  gen_func = partial(generate_vg, models.generate_video_vg)
 
361
  gr.Markdown(notice_markdown, elem_id="notice_markdown")
362
 
363
  model_list = models.model_vg_list
@@ -387,6 +419,8 @@ def build_single_model_ui_vg(models, add_promotion_links=False):
387
  )
388
 
389
  send_btn = gr.Button(value="Send", variant="primary", scale=0)
 
 
390
 
391
  with gr.Row():
392
  chatbot = gr.Video(width=512, autoplay=True)
@@ -442,6 +476,18 @@ def build_single_model_ui_vg(models, add_promotion_links=False):
442
  outputs=btn_list
443
  )
444
 
 
 
 
 
 
 
 
 
 
 
 
 
445
  upvote_btn.click(
446
  upvote_last_response,
447
  inputs=[state, model_selector],
 
9
  bothbad_vote_last_response_vgm as bothbad_vote_last_response,
10
  # share_click_vgm as share_click,
11
  generate_vg,
12
+ generate_vg_museum,
13
  generate_vgm,
14
+ generate_vgm_museum,
15
  generate_vgm_annoy,
16
+ generate_vgm_annoy_museum,
17
  share_js
18
  )
19
  from functools import partial
 
41
  state0 = gr.State()
42
  state1 = gr.State()
43
  gen_func = partial(generate_vgm_annoy, models.generate_video_vg_parallel_anony)
44
+ gen_func_random = partial(generate_vgm_annoy_museum, models.generate_video_vg_museum_parallel_anony)
45
 
46
  gr.Markdown(notice_markdown, elem_id="notice_markdown")
47
 
 
83
  elem_id="input_box",
84
  )
85
  send_btn = gr.Button(value="Send", variant="primary", scale=0)
86
+ draw_btn = gr.Button(value="🎲 Random sample", variant="primary", scale=0)
87
+
88
 
89
  with gr.Row():
90
  clear_btn = gr.Button(value="🎲 New Round", interactive=False)
 
126
  outputs=btn_list
127
  )
128
 
129
+ draw_btn.click(
130
+ gen_func_random,
131
+ inputs=[state0, state1, model_selector_left, model_selector_right],
132
+ outputs=[state0, state1, chatbot_left, chatbot_right, textbox, model_selector_left, model_selector_right],
133
+ api_name="draw_btn_annony"
134
+ ).then(
135
+ enable_buttons_side_by_side,
136
+ inputs=None,
137
+ outputs=btn_list
138
+ )
139
+
140
  clear_btn.click(
141
  clear_history_side_by_side_anony,
142
  inputs=None,
 
207
  state0 = gr.State()
208
  state1 = gr.State()
209
  gen_func = partial(generate_vgm, models.generate_video_vg_parallel)
210
+ gen_func_random = partial(generate_vgm_museum, models.generate_video_vg_museum_parallel)
211
  gr.Markdown(notice_markdown, elem_id="notice_markdown")
212
 
213
  with gr.Group(elem_id="share-region-named"):
 
257
  elem_id="input_box"
258
  )
259
  send_btn = gr.Button(value="Send", variant="primary", scale=0)
260
+ draw_btn = gr.Button(value="🎲 Random sample", variant="primary", scale=0)
261
+
262
 
263
  with gr.Row():
264
  clear_btn = gr.Button(value="🗑️ Clear history", interactive=False)
 
309
  inputs=None,
310
  outputs=btn_list
311
  )
312
+ draw_btn.click(
313
+ gen_func_random,
314
+ inputs=[state0, state1, model_selector_left, model_selector_right],
315
+ outputs=[state0, state1, chatbot_left, chatbot_right, textbox],
316
+ api_name="draw_side_by_side"
317
+ ).then(
318
+ enable_buttons_side_by_side,
319
+ inputs=None,
320
+ outputs=btn_list
321
+ )
322
+
323
  regenerate_btn.click(
324
  gen_func,
325
  inputs=[state0, state1, textbox, model_selector_left, model_selector_right],
 
389
 
390
  state = gr.State()
391
  gen_func = partial(generate_vg, models.generate_video_vg)
392
+ gen_func_random = partial(generate_vg_museum, models.generate_video_vg_museum)
393
  gr.Markdown(notice_markdown, elem_id="notice_markdown")
394
 
395
  model_list = models.model_vg_list
 
419
  )
420
 
421
  send_btn = gr.Button(value="Send", variant="primary", scale=0)
422
+ draw_btn = gr.Button(value="🎲 Random sample", variant="primary", scale=0)
423
+
424
 
425
  with gr.Row():
426
  chatbot = gr.Video(width=512, autoplay=True)
 
476
  outputs=btn_list
477
  )
478
 
479
+ draw_btn.click(
480
+ gen_func_random,
481
+ inputs=[state, model_selector],
482
+ outputs=[state, chatbot, textbox],
483
+ api_name="draw_btn_single",
484
+ show_progress="full"
485
+ ).success(
486
+ enable_buttons,
487
+ inputs=None,
488
+ outputs=btn_list
489
+ )
490
+
491
  upvote_btn.click(
492
  upvote_last_response,
493
  inputs=[state, model_selector],
serve/vote_utils.py CHANGED
@@ -1249,6 +1249,46 @@ def generate_vg(gen_func, state, text, model_name, request: gr.Request):
1249
  save_video_file_on_log_server(output_file)
1250
  yield state, output_file
1251
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1252
  def generate_vgm(gen_func, state0, state1, text, model_name0, model_name1, request: gr.Request):
1253
  if not text:
1254
  raise gr.Warning("Prompt cannot be empty.")
@@ -1326,6 +1366,72 @@ def generate_vgm(gen_func, state0, state1, text, model_name0, model_name1, reque
1326
  save_video_file_on_log_server(output_file)
1327
  yield state0, state1, f'{VIDEO_DIR}/generation/{state0.conv_id}.mp4', f'{VIDEO_DIR}/generation/{state1.conv_id}.mp4'
1328
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1329
 
1330
  def generate_vgm_annoy(gen_func, state0, state1, text, model_name0, model_name1, request: gr.Request):
1331
  if not text:
@@ -1393,4 +1499,67 @@ def generate_vgm_annoy(gen_func, state0, state1, text, model_name0, model_name1,
1393
  save_video_file_on_log_server(output_file)
1394
 
1395
  yield state0, state1, f'{VIDEO_DIR}/generation/{state0.conv_id}.mp4', f'{VIDEO_DIR}/generation/{state1.conv_id}.mp4', \
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1396
  gr.Markdown(f"### Model A: {model_name0}", visible=False), gr.Markdown(f"### Model B: {model_name1}", visible=False)
 
1249
  save_video_file_on_log_server(output_file)
1250
  yield state, output_file
1251
 
1252
+ def generate_vg_museum(gen_func, state, model_name, request: gr.Request):
1253
+ if state is None:
1254
+ state = VideoStateVG(model_name)
1255
+ ip = get_ip(request)
1256
+ vg_logger.info(f"generate. ip: {ip}")
1257
+ start_tstamp = time.time()
1258
+ generated_video, text = gen_func(model_name)
1259
+ state.prompt = text
1260
+ state.output = generated_video
1261
+ state.model_name = model_name
1262
+
1263
+ # yield state, generated_video
1264
+
1265
+ finish_tstamp = time.time()
1266
+
1267
+ with open(get_conv_log_filename(), "a") as fout:
1268
+ data = {
1269
+ "tstamp": round(finish_tstamp, 4),
1270
+ "type": "chat",
1271
+ "model": model_name,
1272
+ "gen_params": {},
1273
+ "start": round(start_tstamp, 4),
1274
+ "finish": round(finish_tstamp, 4),
1275
+ "state": state.dict(),
1276
+ "ip": get_ip(request),
1277
+ }
1278
+ fout.write(json.dumps(data) + "\n")
1279
+ append_json_item_on_log_server(data, get_conv_log_filename())
1280
+
1281
+ output_file = f'{VIDEO_DIR}/generation/{state.conv_id}.mp4'
1282
+ os.makedirs(os.path.dirname(output_file), exist_ok=True)
1283
+
1284
+ r = requests.get(state.output)
1285
+ with open(output_file, 'wb') as outfile:
1286
+ outfile.write(r.content)
1287
+
1288
+ save_video_file_on_log_server(output_file)
1289
+ yield state, output_file, text
1290
+
1291
+
1292
  def generate_vgm(gen_func, state0, state1, text, model_name0, model_name1, request: gr.Request):
1293
  if not text:
1294
  raise gr.Warning("Prompt cannot be empty.")
 
1366
  save_video_file_on_log_server(output_file)
1367
  yield state0, state1, f'{VIDEO_DIR}/generation/{state0.conv_id}.mp4', f'{VIDEO_DIR}/generation/{state1.conv_id}.mp4'
1368
 
1369
+ def generate_vgm_museum(gen_func, state0, state1, model_name0, model_name1, request: gr.Request):
1370
+ if state0 is None:
1371
+ state0 = VideoStateVG(model_name0)
1372
+ if state1 is None:
1373
+ state1 = VideoStateVG(model_name1)
1374
+ ip = get_ip(request)
1375
+ igm_logger.info(f"generate. ip: {ip}")
1376
+ start_tstamp = time.time()
1377
+ # Remove ### Model (A|B): from model name
1378
+ model_name0 = re.sub(r"### Model A: ", "", model_name0)
1379
+ model_name1 = re.sub(r"### Model B: ", "", model_name1)
1380
+ generated_video0, generated_video1, text = gen_func(model_name0, model_name1)
1381
+ state0.prompt = text
1382
+ state1.prompt = text
1383
+ state0.output = generated_video0
1384
+ state1.output = generated_video1
1385
+ state0.model_name = model_name0
1386
+ state1.model_name = model_name1
1387
+
1388
+ # yield state0, state1, generated_video0, generated_video1
1389
+ print("====== model name =========")
1390
+ print(state0.model_name)
1391
+ print(state1.model_name)
1392
+
1393
+
1394
+ finish_tstamp = time.time()
1395
+
1396
+
1397
+ with open(get_conv_log_filename(), "a") as fout:
1398
+ data = {
1399
+ "tstamp": round(finish_tstamp, 4),
1400
+ "type": "chat",
1401
+ "model": model_name0,
1402
+ "gen_params": {},
1403
+ "start": round(start_tstamp, 4),
1404
+ "finish": round(finish_tstamp, 4),
1405
+ "state": state0.dict(),
1406
+ "ip": get_ip(request),
1407
+ }
1408
+ fout.write(json.dumps(data) + "\n")
1409
+ append_json_item_on_log_server(data, get_conv_log_filename())
1410
+ data = {
1411
+ "tstamp": round(finish_tstamp, 4),
1412
+ "type": "chat",
1413
+ "model": model_name1,
1414
+ "gen_params": {},
1415
+ "start": round(start_tstamp, 4),
1416
+ "finish": round(finish_tstamp, 4),
1417
+ "state": state1.dict(),
1418
+ "ip": get_ip(request),
1419
+ }
1420
+ fout.write(json.dumps(data) + "\n")
1421
+ append_json_item_on_log_server(data, get_conv_log_filename())
1422
+
1423
+ for i, state in enumerate([state0, state1]):
1424
+ output_file = f'{VIDEO_DIR}/generation/{state.conv_id}.mp4'
1425
+ os.makedirs(os.path.dirname(output_file), exist_ok=True)
1426
+ print(state.model_name)
1427
+
1428
+ r = requests.get(state.output)
1429
+ with open(output_file, 'wb') as outfile:
1430
+ outfile.write(r.content)
1431
+
1432
+ save_video_file_on_log_server(output_file)
1433
+ yield state0, state1, f'{VIDEO_DIR}/generation/{state0.conv_id}.mp4', f'{VIDEO_DIR}/generation/{state1.conv_id}.mp4', text
1434
+
1435
 
1436
  def generate_vgm_annoy(gen_func, state0, state1, text, model_name0, model_name1, request: gr.Request):
1437
  if not text:
 
1499
  save_video_file_on_log_server(output_file)
1500
 
1501
  yield state0, state1, f'{VIDEO_DIR}/generation/{state0.conv_id}.mp4', f'{VIDEO_DIR}/generation/{state1.conv_id}.mp4', \
1502
+ gr.Markdown(f"### Model A: {model_name0}", visible=False), gr.Markdown(f"### Model B: {model_name1}", visible=False)
1503
+
1504
+ def generate_vgm_annoy_museum(gen_func, state0, state1, model_name0, model_name1, request: gr.Request):
1505
+ if state0 is None:
1506
+ state0 = VideoStateVG(model_name0)
1507
+ if state1 is None:
1508
+ state1 = VideoStateVG(model_name1)
1509
+ ip = get_ip(request)
1510
+ vgm_logger.info(f"generate. ip: {ip}")
1511
+ start_tstamp = time.time()
1512
+ model_name0 = ""
1513
+ model_name1 = ""
1514
+ generated_video0, generated_video1, model_name0, model_name1, text = gen_func(model_name0, model_name1)
1515
+ state0.prompt = text
1516
+ state1.prompt = text
1517
+ state0.output = generated_video0
1518
+ state1.output = generated_video1
1519
+ state0.model_name = model_name0
1520
+ state1.model_name = model_name1
1521
+
1522
+ # yield state0, state1, generated_video0, generated_video1, \
1523
+ # gr.Markdown(f"### Model A: {model_name0}"), gr.Markdown(f"### Model B: {model_name1}")
1524
+
1525
+ finish_tstamp = time.time()
1526
+ # logger.info(f"===output===: {output}")
1527
+
1528
+ with open(get_conv_log_filename(), "a") as fout:
1529
+ data = {
1530
+ "tstamp": round(finish_tstamp, 4),
1531
+ "type": "chat",
1532
+ "model": model_name0,
1533
+ "gen_params": {},
1534
+ "start": round(start_tstamp, 4),
1535
+ "finish": round(finish_tstamp, 4),
1536
+ "state": state0.dict(),
1537
+ "ip": get_ip(request),
1538
+ }
1539
+ fout.write(json.dumps(data) + "\n")
1540
+ append_json_item_on_log_server(data, get_conv_log_filename())
1541
+ data = {
1542
+ "tstamp": round(finish_tstamp, 4),
1543
+ "type": "chat",
1544
+ "model": model_name1,
1545
+ "gen_params": {},
1546
+ "start": round(start_tstamp, 4),
1547
+ "finish": round(finish_tstamp, 4),
1548
+ "state": state1.dict(),
1549
+ "ip": get_ip(request),
1550
+ }
1551
+ fout.write(json.dumps(data) + "\n")
1552
+ append_json_item_on_log_server(data, get_conv_log_filename())
1553
+
1554
+ for i, state in enumerate([state0, state1]):
1555
+ output_file = f'{VIDEO_DIR}/generation/{state.conv_id}.mp4'
1556
+ os.makedirs(os.path.dirname(output_file), exist_ok=True)
1557
+
1558
+ r = requests.get(state.output)
1559
+ with open(output_file, 'wb') as outfile:
1560
+ outfile.write(r.content)
1561
+
1562
+ save_video_file_on_log_server(output_file)
1563
+
1564
+ yield state0, state1, f'{VIDEO_DIR}/generation/{state0.conv_id}.mp4', f'{VIDEO_DIR}/generation/{state1.conv_id}.mp4', text,\
1565
  gr.Markdown(f"### Model A: {model_name0}", visible=False), gr.Markdown(f"### Model B: {model_name1}", visible=False)