epishchik commited on
Commit
9356f36
1 Parent(s): 351e037

combine subdatasets in global dataset

Browse files
Files changed (1) hide show
  1. super-resolution-games.py +141 -20
super-resolution-games.py CHANGED
@@ -1,4 +1,5 @@
1
  import datasets
 
2
 
3
 
4
  _DESCRIPTION = ''
@@ -20,13 +21,15 @@ _PROJECTS_GAME_ENGINE = [
20
  'GameEngine_SlayAnimationSample',
21
  'GameEngine_StylizedRendering',
22
  'GameEngine_SubwaySequencer',
23
- 'GameEngine_SunTemple'
 
24
  ]
25
 
26
  _PROJECTS_DOWNSCALE = [
27
  'Downscale_DMXPrevisSample',
28
  'Downscale_Dota2',
29
- 'Downscale_CitySample'
 
30
  ]
31
 
32
  _PROJECTS = _PROJECTS_GAME_ENGINE + _PROJECTS_DOWNSCALE
@@ -48,13 +51,15 @@ _DESCRIPTION_DATA_GAME_ENGINE = {
48
  'GameEngine_SlayAnimationSample': '',
49
  'GameEngine_StylizedRendering': '',
50
  'GameEngine_SubwaySequencer': '',
51
- 'GameEngine_SunTemple': ''
 
52
  }
53
 
54
  _DESCRIPTION_DATA_DOWNSCALE = {
55
  'Downscale_DMXPrevisSample': '',
56
  'Downscale_Dota2': '',
57
- 'Downscale_CitySample': ''
 
58
  }
59
 
60
  _DESCRIPTION_DATA = {
@@ -314,6 +319,46 @@ _DATA_FILES_GAME_ENGINE = {
314
  }
315
  }
316
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
317
  _DATA_FILES_DOWNSCALE = {
318
  'Downscale_DMXPrevisSample': {
319
  'train': {
@@ -359,6 +404,46 @@ _DATA_FILES_DOWNSCALE = {
359
  }
360
  }
361
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
362
  _DATA_FILES = {
363
  **_DATA_FILES_GAME_ENGINE,
364
  **_DATA_FILES_DOWNSCALE
@@ -394,30 +479,66 @@ class SuperResolutionGames(datasets.GeneratorBasedBuilder):
394
  data_files = self.config.data_files
395
  train_archives, val_archives = data_files['train'], data_files['val']
396
 
397
- train_archives_downloaded = {
398
- k: dl_manager.download(train_archives[k]) \
399
- for k in train_archives.keys()
400
- }
 
 
 
 
 
 
 
 
 
401
 
402
- val_archives_downloaded = {
403
- k: dl_manager.download(val_archives[k]) \
404
- for k in val_archives.keys()
405
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
406
 
407
  splits = [
408
  datasets.SplitGenerator(
409
  name=datasets.Split.TRAIN,
410
- gen_kwargs={
411
- k: dl_manager.iter_archive(train_archives_downloaded[k]) \
412
- for k in train_archives_downloaded.keys()
413
- }
414
  ),
415
  datasets.SplitGenerator(
416
  name=datasets.Split.VALIDATION,
417
- gen_kwargs={
418
- k: dl_manager.iter_archive(val_archives_downloaded[k]) \
419
- for k in val_archives_downloaded.keys()
420
- }
421
  )
422
  ]
423
  return splits
 
1
  import datasets
2
+ from itertools import chain
3
 
4
 
5
  _DESCRIPTION = ''
 
21
  'GameEngine_SlayAnimationSample',
22
  'GameEngine_StylizedRendering',
23
  'GameEngine_SubwaySequencer',
24
+ 'GameEngine_SunTemple',
25
+ 'GameEngine_All'
26
  ]
27
 
28
  _PROJECTS_DOWNSCALE = [
29
  'Downscale_DMXPrevisSample',
30
  'Downscale_Dota2',
31
+ 'Downscale_CitySample',
32
+ 'Downscale_All'
33
  ]
34
 
35
  _PROJECTS = _PROJECTS_GAME_ENGINE + _PROJECTS_DOWNSCALE
 
51
  'GameEngine_SlayAnimationSample': '',
52
  'GameEngine_StylizedRendering': '',
53
  'GameEngine_SubwaySequencer': '',
54
+ 'GameEngine_SunTemple': '',
55
+ 'GameEngine_All': ''
56
  }
57
 
58
  _DESCRIPTION_DATA_DOWNSCALE = {
59
  'Downscale_DMXPrevisSample': '',
60
  'Downscale_Dota2': '',
61
+ 'Downscale_CitySample': '',
62
+ 'Downscale_All': ''
63
  }
64
 
65
  _DESCRIPTION_DATA = {
 
319
  }
320
  }
321
 
322
+ _DATA_FILES_GAME_ENGINE_KEYS = list(_DATA_FILES_GAME_ENGINE.keys())
323
+ _DATA_FILES_GAME_ENGINE['GameEngine_All'] = {
324
+ 'train': {
325
+ 'r270p': sorted([
326
+ _DATA_FILES_GAME_ENGINE[k]['train']['r270p'] \
327
+ for k in _DATA_FILES_GAME_ENGINE_KEYS
328
+ ]),
329
+ 'r360p': sorted([
330
+ _DATA_FILES_GAME_ENGINE[k]['train']['r360p'] \
331
+ for k in _DATA_FILES_GAME_ENGINE_KEYS
332
+ ]),
333
+ 'r540p': sorted([
334
+ _DATA_FILES_GAME_ENGINE[k]['train']['r540p'] \
335
+ for k in _DATA_FILES_GAME_ENGINE_KEYS
336
+ ]),
337
+ 'r1080p': sorted([
338
+ _DATA_FILES_GAME_ENGINE[k]['train']['r1080p'] \
339
+ for k in _DATA_FILES_GAME_ENGINE_KEYS
340
+ ])
341
+ },
342
+ 'val': {
343
+ 'r270p': sorted([
344
+ _DATA_FILES_GAME_ENGINE[k]['val']['r270p'] \
345
+ for k in _DATA_FILES_GAME_ENGINE_KEYS
346
+ ]),
347
+ 'r360p': sorted([
348
+ _DATA_FILES_GAME_ENGINE[k]['val']['r360p'] \
349
+ for k in _DATA_FILES_GAME_ENGINE_KEYS
350
+ ]),
351
+ 'r540p': sorted([
352
+ _DATA_FILES_GAME_ENGINE[k]['val']['r540p'] \
353
+ for k in _DATA_FILES_GAME_ENGINE_KEYS
354
+ ]),
355
+ 'r1080p': sorted([
356
+ _DATA_FILES_GAME_ENGINE[k]['val']['r1080p'] \
357
+ for k in _DATA_FILES_GAME_ENGINE_KEYS
358
+ ])
359
+ }
360
+ }
361
+
362
  _DATA_FILES_DOWNSCALE = {
363
  'Downscale_DMXPrevisSample': {
364
  'train': {
 
404
  }
405
  }
406
 
407
+ _DATA_FILES_DOWNSCALE_KEYS = list(_DATA_FILES_DOWNSCALE.keys())
408
+ _DATA_FILES_DOWNSCALE['Downscale_All'] = {
409
+ 'train': {
410
+ 'r270p': sorted([
411
+ _DATA_FILES_DOWNSCALE[k]['train']['r270p'] \
412
+ for k in _DATA_FILES_DOWNSCALE_KEYS
413
+ ]),
414
+ 'r360p': sorted([
415
+ _DATA_FILES_DOWNSCALE[k]['train']['r360p'] \
416
+ for k in _DATA_FILES_DOWNSCALE_KEYS
417
+ ]),
418
+ 'r540p': sorted([
419
+ _DATA_FILES_DOWNSCALE[k]['train']['r540p'] \
420
+ for k in _DATA_FILES_DOWNSCALE_KEYS
421
+ ]),
422
+ 'r1080p': sorted([
423
+ _DATA_FILES_DOWNSCALE[k]['train']['r1080p'] \
424
+ for k in _DATA_FILES_DOWNSCALE_KEYS
425
+ ])
426
+ },
427
+ 'val': {
428
+ 'r270p': sorted([
429
+ _DATA_FILES_DOWNSCALE[k]['val']['r270p'] \
430
+ for k in _DATA_FILES_DOWNSCALE_KEYS
431
+ ]),
432
+ 'r360p': sorted([
433
+ _DATA_FILES_DOWNSCALE[k]['val']['r360p'] \
434
+ for k in _DATA_FILES_DOWNSCALE_KEYS
435
+ ]),
436
+ 'r540p': sorted([
437
+ _DATA_FILES_DOWNSCALE[k]['val']['r540p'] \
438
+ for k in _DATA_FILES_DOWNSCALE_KEYS
439
+ ]),
440
+ 'r1080p': sorted([
441
+ _DATA_FILES_DOWNSCALE[k]['val']['r1080p'] \
442
+ for k in _DATA_FILES_DOWNSCALE_KEYS
443
+ ])
444
+ }
445
+ }
446
+
447
  _DATA_FILES = {
448
  **_DATA_FILES_GAME_ENGINE,
449
  **_DATA_FILES_DOWNSCALE
 
479
  data_files = self.config.data_files
480
  train_archives, val_archives = data_files['train'], data_files['val']
481
 
482
+ train_archives_downloaded = {}
483
+ for k in train_archives.keys():
484
+ train_archives_k = train_archives[k]
485
+ if isinstance(train_archives_k, str):
486
+ train_archives_downloaded[k] = \
487
+ dl_manager.download(train_archives_k)
488
+ elif isinstance(train_archives_k, list):
489
+ train_archives_downloaded[k] = [
490
+ dl_manager.download(train_archives_k[i]) \
491
+ for i in range(len(train_archives_k))
492
+ ]
493
+ else:
494
+ raise TypeError()
495
 
496
+ val_archives_downloaded = {}
497
+ for k in val_archives.keys():
498
+ val_archives_k = val_archives[k]
499
+ if isinstance(val_archives_k, str):
500
+ val_archives_downloaded[k] = \
501
+ dl_manager.download(val_archives_k)
502
+ elif isinstance(val_archives_k, list):
503
+ val_archives_downloaded[k] = [
504
+ dl_manager.download(val_archives_k[i]) \
505
+ for i in range(len(val_archives_k))
506
+ ]
507
+ else:
508
+ raise TypeError()
509
+
510
+ train_gen_kwargs = {}
511
+ for k in train_archives_downloaded.keys():
512
+ train_archives_downloaded_k = train_archives_downloaded[k]
513
+ if isinstance(train_archives_downloaded_k, list):
514
+ train_gen_kwargs[k] = chain.from_iterable([
515
+ dl_manager.iter_archive(train_archives_downloaded_k[i]) \
516
+ for i in range(len(train_archives_downloaded_k))
517
+ ])
518
+ else:
519
+ train_gen_kwargs[k] = \
520
+ dl_manager.iter_archive(train_archives_downloaded_k)
521
+
522
+ val_gen_kwargs = {}
523
+ for k in val_archives_downloaded.keys():
524
+ val_archives_downloaded_k = val_archives_downloaded[k]
525
+ if isinstance(val_archives_downloaded_k, list):
526
+ val_gen_kwargs[k] = chain.from_iterable([
527
+ dl_manager.iter_archive(val_archives_downloaded_k[i]) \
528
+ for i in range(len(val_archives_downloaded_k))
529
+ ])
530
+ else:
531
+ val_gen_kwargs[k] = \
532
+ dl_manager.iter_archive(val_archives_downloaded_k)
533
 
534
  splits = [
535
  datasets.SplitGenerator(
536
  name=datasets.Split.TRAIN,
537
+ gen_kwargs=train_gen_kwargs
 
 
 
538
  ),
539
  datasets.SplitGenerator(
540
  name=datasets.Split.VALIDATION,
541
+ gen_kwargs=val_gen_kwargs
 
 
 
542
  )
543
  ]
544
  return splits