viddiff commited on
Commit
cad6eb5
1 Parent(s): 49ed58c

Create load_viddiff_dataset.py

Browse files
Files changed (1) hide show
  1. load_viddiff_dataset.py +307 -0
load_viddiff_dataset.py ADDED
@@ -0,0 +1,307 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ipdb
2
+ import pdb
3
+ import os
4
+ import numpy as np
5
+ import json
6
+ import re
7
+ from PIL import Image
8
+ from pathlib import Path
9
+ from datasets import load_dataset
10
+ import decord
11
+ from tqdm import tqdm
12
+ import logging
13
+ import hashlib
14
+
15
+
16
+ def load_viddiff_dataset(splits=["easy"], subset_mode="0"):
17
+ """
18
+ splits in ['ballsports', 'demo', 'easy', 'fitness', 'music', 'surgery']
19
+ """
20
+ dataset = load_dataset("viddiff/VidDiffBench", cache_dir=None)
21
+ dataset = dataset['test']
22
+
23
+ def _filter_splits(example):
24
+ return example["split"] in splits
25
+
26
+ dataset = dataset.filter(_filter_splits)
27
+
28
+ def _map_elements_to_json(example):
29
+ example["videos"] = json.loads(example["videos"])
30
+ example["differences_annotated"] = json.loads(
31
+ example["differences_annotated"])
32
+ example["differences_gt"] = json.loads(example["differences_gt"])
33
+ return example
34
+
35
+ dataset = dataset.map(_map_elements_to_json)
36
+ # dataset = dataset.map(_clean_annotations)
37
+ dataset = apply_subset_mode(dataset, subset_mode)
38
+
39
+ return dataset
40
+
41
+
42
+ def load_all_videos(dataset, cache=True, do_tqdm=True):
43
+ """
44
+ Return a 2-element tuple. Each element is a list of length len(datset).
45
+ First list is video A for each datapoint as a dict with elements
46
+ path: original path to video
47
+ fps: frames per second
48
+ video: numpy array of the video shape (nframes,H,W,3)
49
+ Second list is the same but for video B.
50
+ """
51
+
52
+ all_videos = ([], [])
53
+ # make iterator, with or without tqdm based on `do_tqdm`
54
+ if do_tqdm:
55
+ it = tqdm(dataset)
56
+ else:
57
+ it = dataset
58
+
59
+ # load each video
60
+ for row in it:
61
+ videos = get_video_data(row['videos'], cache=cache)
62
+
63
+ all_videos[0].append(videos[0])
64
+ all_videos[1].append(videos[1])
65
+
66
+ return all_videos
67
+
68
+
69
+ def _clean_annotations(example):
70
+ # Not all differences in the taxonomy may have a label available, so filter them.
71
+
72
+ differences_gt_labeled = {
73
+ k: v
74
+ for k, v in example['differences_gt'].items() if v is not None
75
+ }
76
+ differences_annotated = {
77
+ k: v
78
+ for k, v in example['differences_annotated'].items()
79
+ if k in differences_gt_labeled.keys()
80
+ }
81
+
82
+ # Directly assign to the example without deepcopy
83
+ example['differences_gt'] = differences_gt_labeled
84
+ example['differences_annotated'] = differences_annotated
85
+
86
+ return example
87
+
88
+
89
+ def get_video_data(videos: dict, cache=True):
90
+ """
91
+ Pass in the videos dictionary from the dataset, like dataset[idx]['videos'].
92
+ Load the 2 videos represented as numpy arrays.
93
+ By default, cache the arrays ... so the second time through, the dataset
94
+ loading will be faster.
95
+
96
+ returns: video0, video1
97
+ """
98
+ video_dicts = []
99
+
100
+ for i in [0, 1]:
101
+ path = videos[i]['path']
102
+ assert Path(path).exists(
103
+ ), f"Video not downloaded [{path}]\ncheck dataset README about downloading videos"
104
+ frames_trim = slice(*videos[i]['frames_trim'])
105
+
106
+ video_dict = videos[i].copy()
107
+
108
+ if cache:
109
+ dir_cache = Path("cache/cache_data")
110
+ dir_cache.mkdir(exist_ok=True, parents=True)
111
+ hash_key = get_hash_key(path + str(frames_trim))
112
+ memmap_filename = dir_cache / f"memmap_{hash_key}.npy"
113
+
114
+ if os.path.exists(memmap_filename):
115
+ video_info = np.load(f"{memmap_filename}.info.npy",
116
+ allow_pickle=True).item()
117
+ video = np.memmap(memmap_filename,
118
+ dtype=video_info['dtype'],
119
+ mode='r',
120
+ shape=video_info['shape'])
121
+ video_dict['video'] = video
122
+ video_dicts.append(video_dict)
123
+ continue
124
+
125
+ is_dir = Path(path).is_dir()
126
+ if is_dir:
127
+ video = _load_video_from_directory_of_images(
128
+ path, frames_trim=frames_trim)
129
+
130
+ else:
131
+ assert Path(path).suffix in (".mp4", ".mov")
132
+ video, fps = _load_video(path, frames_trim=frames_trim)
133
+ assert fps == videos[i]['fps']
134
+
135
+ if cache:
136
+ np.save(f"{memmap_filename}.info.npy", {
137
+ 'shape': video.shape,
138
+ 'dtype': video.dtype
139
+ })
140
+ memmap = np.memmap(memmap_filename,
141
+ dtype=video.dtype,
142
+ mode='w+',
143
+ shape=video.shape)
144
+ memmap[:] = video[:]
145
+ memmap.flush()
146
+ video = memmap
147
+
148
+ video_dict['video'] = video
149
+ video_dicts.append(video_dict)
150
+
151
+ return video_dicts
152
+
153
+
154
+ def _load_video(f, return_fps=True, frames_trim: slice = None) -> np.ndarray:
155
+ """
156
+ mp4 video to frames numpy array shape (N,H,W,3).
157
+ Do not use for long videos
158
+ frames_trim: (s,e) is start and end int frames to include (warning, the range
159
+ is inclusive, unlike in list indexing.)
160
+ """
161
+ vid = decord.VideoReader(str(f))
162
+ fps = vid.get_avg_fps()
163
+
164
+ if len(vid) > 50000:
165
+ raise ValueError(
166
+ "Video probably has too many frames to convert to a numpy")
167
+
168
+ if frames_trim is None:
169
+ frames_trim = slice(0, None, None)
170
+ video_np = vid[frames_trim].asnumpy()
171
+
172
+ if not return_fps:
173
+ return video_np
174
+ else:
175
+ assert fps > 0
176
+ return video_np, fps
177
+
178
+
179
+ def _load_video_from_directory_of_images(
180
+ path_dir: str,
181
+ frames_trim: slice = None,
182
+ downsample_time: int = None,
183
+ ) -> np.ndarray:
184
+ """
185
+
186
+ `path_dir` is a directory path with images that, when arranged in alphabetical
187
+ order, make a video.
188
+ This function returns the a numpy array shape (N,H,W,3) where N is the
189
+ number of frames.
190
+ """
191
+ files = sorted(os.listdir(path_dir))
192
+
193
+ if frames_trim is not None:
194
+ files = files[frames_trim]
195
+
196
+ if downsample_time is not None:
197
+ files = files[::downsample_time]
198
+
199
+ files = [f"{path_dir}/{f}" for f in files]
200
+ images = [Image.open(f) for f in files]
201
+
202
+ video_array = np.stack(images)
203
+
204
+ return video_array
205
+
206
+
207
+ def _subsample_video(video: np.ndarray,
208
+ fps_original: int,
209
+ fps_target: int,
210
+ fps_warning: bool = True):
211
+ """
212
+ video: video as numby array (nframes, h, w, 3)
213
+ fps_original: original fps of the video
214
+ fps_target: target fps to downscale to
215
+ fps_warning: if True, then log warnings to logger if the target fps is
216
+ higher than original fps, or if the target fps isn't possible because
217
+ it isn't divisible by the original fps.
218
+ """
219
+ subsample_time = fps_original / fps_target
220
+
221
+ if subsample_time < 1 and fps_warning:
222
+ logging.warning(f"Trying to subsample frames to fps {fps_target}, which "\
223
+ "is higher than the fps of the original video which is "\
224
+ "{video['fps']}. The video fps won't be changed for {video['path']}. "\
225
+ f"\nSupress this warning by setting config fps_warning=False")
226
+ return video, fps_original, 1
227
+
228
+ subsample_time_int = int(subsample_time)
229
+ fps_new = int(fps_original / subsample_time_int)
230
+ if fps_new != fps_target and fps_warning:
231
+ logging.warning(f"Config lmm.fps='{fps_target}' but the original fps is {fps_original} " \
232
+ f"so we downscale to fps {fps_new} instead. " \
233
+ f"\nSupress this warning by setting config fps_warning=False")
234
+
235
+ video = video[::subsample_time_int]
236
+
237
+ return video, fps_new, subsample_time_int
238
+
239
+
240
+ def apply_subset_mode(dataset, subset_mode):
241
+ """
242
+ For example if subset_mode is "3_per_action" then just get the first 3 rows
243
+ for each unique action.
244
+ Useful for working with subsets.
245
+ """
246
+ match = re.match(r"(\d+)_per_action", subset_mode)
247
+ if match:
248
+ instances_per_action = int(match.group(1))
249
+ action_counts = {}
250
+ subset_indices = []
251
+
252
+ for idx, example in enumerate(dataset):
253
+ action = example['action']
254
+ if action not in action_counts:
255
+ action_counts[action] = 0
256
+
257
+ if action_counts[action] < instances_per_action:
258
+ subset_indices.append(idx)
259
+ action_counts[action] += 1
260
+
261
+ return dataset.select(subset_indices)
262
+ else:
263
+ return dataset
264
+
265
+
266
+ def get_hash_key(key: str) -> str:
267
+ return hashlib.sha256(key.encode()).hexdigest()
268
+
269
+
270
+ def get_n_differences(dataset, config_n_differences: int | str | Path):
271
+ """
272
+ The maximum number of differences the model is allowed to make.
273
+ Either it's a single int, or its a path to a json `ndiff`, where n_differences
274
+ is indexed by the data split and sample action, e.g.:
275
+ ndiff['fitness']['fitness_4'] = 8
276
+ For split 'fitness' and action 'fitness_4'
277
+
278
+ Returns: a list with length len(dataset), with an int for each sample.
279
+ """
280
+ if type(config_n_differences) is int:
281
+ n_differences = [config_n_differences] * len(dataset)
282
+ else:
283
+ path = Path(config_n_differences)
284
+ if not path.exists():
285
+ raise ValueError(
286
+ f"Config value n_differences: [{n_differences}] must be an int " \
287
+ "or a path to a json with per-action level stuff n_differences ")
288
+ with open(path, 'r') as fp:
289
+ lookup_ndiff = json.load(fp)
290
+ n_differences = []
291
+ for row in dataset:
292
+ split = row['split']
293
+ action = row['action']
294
+ if split not in lookup_ndiff.keys(
295
+ ) or action not in lookup_ndiff[split].keys():
296
+ raise ValueError(
297
+ f"n_differences json at {path} has no entry for {(action, split)}"
298
+ )
299
+ n_differences.append(lookup_ndiff[split][action])
300
+
301
+ return n_differences
302
+
303
+
304
+ if __name__ == "__main__":
305
+ # dataset = load_viddiff_dataset(splits=['surgery','ballsports'])
306
+ dataset = load_viddiff_dataset(splits=['demo'])
307
+ videos = load_all_videos(dataset)