dataset: blender | |
batching: single_image | |
factor: 0 | |
num_coarse_samples: 64 | |
num_fine_samples: 128 | |
use_viewdirs: true | |
white_bkgd: true | |
batch_size: 1024 | |
randomized: true | |
max_steps: 200000 | |
stop_sc_loss: 160000 | |
print_every: 100 | |
render_every: 1000 | |
save_every: 5000 | |
use_semantic_loss: true | |
clip_model_name: openai/clip-vit-base-patch32 | |
clip_output_dtype: float16 | |
sc_loss_every: 16 | |
few_shot: 8 |