dataset: blender batching: single_image factor: 0 num_coarse_samples: 64 num_fine_samples: 64 use_viewdirs: true white_bkgd: true batch_size: 1026 randomized: true max_steps: 200000 print_every: 100 render_every: 1000 save_every: 5000 use_semantic_loss: true clip_model_name: openai/clip-vit-base-patch32 clip_output_dtype: float16 sc_loss_every: 16 few_shot: -1