import torch from torch.utils.data import Dataset import glob import numpy as np import os from tqdm import tqdm class Robo360(Dataset): def __init__(self, datadir, downsample=4): self.root_dir = datadir self.downsample = downsample self.read_meta() def read_meta(self): poses_bounds = np.load(os.path.join(self.root_dir, 'poses_bounds.npy')) # (N_cams, 17) poses = poses_bounds[:, :15].reshape(-1, 3, 5) # (N_images, 3, 5) self.near_fars = poses_bounds[:, -2:] # (N_images, 2) # Step 1: rescale focal length according to training resolution H, W, _ = poses[0, :, -1] self.focal = poses[:, -1, -1] self.img_wh = np.array([int(W / self.downsample), int(H / self.downsample)]) self.focal = self.focal * self.img_wh[0] / W # Step 2: correct poses # Original poses has rotation in form "down right back", change to "right up back" # See https://github.com/bmild/nerf/issues/34 self.poses = np.concatenate([poses[..., 1:2], -poses[..., :1], poses[..., 2:4]], -1) def __len__(self): return 0 def __getitem__(self, idx): return None