Vicky0522 commited on
Commit
ee2ed6e
1 Parent(s): a6249d0

Upload item7/config_single_chunk.yaml with huggingface_hub

Browse files
Files changed (1) hide show
  1. item7/config_single_chunk.yaml +108 -0
item7/config_single_chunk.yaml ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Pretrained diffusers model path.
2
+ pretrained_model_path: "ckpts/stable-video-diffusion-img2vid"
3
+ # The folder where your training outputs will be placed.
4
+ output_dir: "./outputs"
5
+ seed: 23
6
+ num_steps: 25
7
+ # Xformers must be installed for best memory savings and performance (< Pytorch 2.0)
8
+ enable_xformers_memory_efficient_attention: True
9
+ # Use scaled dot product attention (Only available with >= Torch 2.0)
10
+ enable_torch_2_attn: true
11
+
12
+ use_sarp: true
13
+
14
+ use_motion_lora: true
15
+ train_motion_lora_only: false
16
+ retrain_motion_lora: false
17
+
18
+ use_inversed_latents: true
19
+ use_attention_matching: true
20
+ use_consistency_attention_control: false
21
+ dtype: fp32
22
+
23
+ save_last_frames: true
24
+
25
+ # data_params
26
+ data_params:
27
+ video_path: "../datasets/svdedit/item7/production_id_4811927.mp4"
28
+ keyframe_paths:
29
+ - "../datasets/svdedit/item7/golden_dress.png"
30
+ - "../datasets/svdedit/item7/ice.png"
31
+ start_t: 0
32
+ end_t: 1
33
+ sample_fps: 14
34
+ chunk_size: 14
35
+ overlay_size: 1
36
+ normalize: true
37
+ output_fps: 14
38
+ save_sampled_frame: true
39
+ output_res: [576, 1024]
40
+ pad_to_fit: false
41
+
42
+ train_motion_lora_params:
43
+ cache_latents: true
44
+ cached_latent_dir: null
45
+ lora_rank: 32
46
+ use_unet_lora: true
47
+ lora_unet_dropout: 0.1
48
+ save_pretrained_model: false
49
+ learning_rate: 0.0005
50
+ adam_weight_decay: 0.01
51
+ max_train_steps: 300
52
+ checkpointing_steps: 50
53
+ validation_steps: 50
54
+ mixed_precision: fp16
55
+ gradient_checkpointing: true
56
+ image_encoder_gradient_checkpointing: true
57
+ train_data:
58
+ width: 896
59
+ height: 512
60
+ use_data_aug: null
61
+ pad_to_fit: false
62
+ validation_data:
63
+ sample_preview: true
64
+ num_frames: 14
65
+ width: 1024
66
+ height: 576
67
+ pad_to_fit: false
68
+ spatial_scale: 0
69
+ noise_prior:
70
+ - 1.0
71
+
72
+ sarp_params:
73
+ sarp_noise_scale: 0.005
74
+
75
+ attention_matching_params:
76
+ best_checkpoint_index: 300
77
+ lora_scale: 1.0
78
+ lora_dir: "./cache/item7/train_motion_lora"
79
+ max_guidance_scale: 2.5
80
+ disk_store: true
81
+ load_attention_store: "./cache/item7/attention_store/"
82
+ registered_modules:
83
+ BasicTransformerBlock:
84
+ - "attn1"
85
+ TemporalBasicTransformerBlock:
86
+ - "attn1"
87
+ control_mode:
88
+ spatial_self: "masked_copy"
89
+ temporal_self: "copy_v2"
90
+ cross_replace_steps: 0.0
91
+ temporal_self_replace_steps: 1.0
92
+ spatial_self_replace_steps: 1.0
93
+ spatial_attention_chunk_size: 1
94
+
95
+ params:
96
+ edit0:
97
+ temporal_step_thr: [0.5, 0.8]
98
+ mask_thr: [0.35, 0.35]
99
+ edit1:
100
+ temporal_step_thr: [0.5, 0.8]
101
+ mask_thr: [0.35, 0.35]
102
+
103
+ long_video_params:
104
+ mode: skip-interval
105
+ registered_modules:
106
+ BasicTransformerBlock: null
107
+ TemporalBasicTransformerBlock:
108
+ - attn1