Vicky0522 commited on
Commit
41e532d
1 Parent(s): 292c36c

Upload item9/config_single_chunk.yaml with huggingface_hub

Browse files
Files changed (1) hide show
  1. item9/config_single_chunk.yaml +142 -0
item9/config_single_chunk.yaml ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Pretrained diffusers model path.
2
+ pretrained_model_path: "ckpts/stable-video-diffusion-img2vid"
3
+ # The folder where your training outputs will be placed.
4
+ output_dir: "./outputs"
5
+ seed: 23
6
+ num_steps: 25
7
+ # Xformers must be installed for best memory savings and performance (< Pytorch 2.0)
8
+ enable_xformers_memory_efficient_attention: True
9
+ # Use scaled dot product attention (Only available with >= Torch 2.0)
10
+ enable_torch_2_attn: True
11
+
12
+ use_sarp: true
13
+
14
+ use_motion_lora: true
15
+ train_motion_lora_only: false
16
+ retrain_motion_lora: false
17
+
18
+ use_inversed_latents: true
19
+ use_attention_matching: true
20
+ use_consistency_attention_control: false
21
+ dtype: fp16
22
+
23
+ save_last_frames: True
24
+ load_from_last_frames_latents:
25
+
26
+ # data_params
27
+ data_params:
28
+ video_path: "../datasets/svdedit/item9/source.mp4"
29
+ keyframe_paths:
30
+ - "../datasets/svdedit/item9/panda.png"
31
+ - "../datasets/svdedit/item9/tiger.png"
32
+ start_t: 0
33
+ end_t: -1
34
+ sample_fps: 4
35
+ chunk_size: 14
36
+ overlay_size: 1
37
+ normalize: true
38
+ output_fps: 4
39
+ save_sampled_frame: true
40
+ output_res: [576, 1024]
41
+ pad_to_fit: false
42
+ begin_clip_id: 0
43
+ end_clip_id: 1
44
+
45
+ train_motion_lora_params:
46
+ cache_latents: true
47
+ cached_latent_dir: null #/path/to/cached_latents
48
+ lora_rank: 32
49
+ # Use LoRA for the UNET model.
50
+ use_unet_lora: True
51
+ # LoRA Dropout. This parameter adds the probability of randomly zeros out elements. Helps prevent overfitting.
52
+ # See: https://pytorch.org/docs/stable/generated/torch.nn.Dropout.html
53
+ lora_unet_dropout: 0.1
54
+ # The only time you want this off is if you're doing full LoRA training.
55
+ save_pretrained_model: False
56
+ # Learning rate for AdamW
57
+ learning_rate: 5e-4
58
+ # Weight decay. Higher = more regularization. Lower = closer to dataset.
59
+ adam_weight_decay: 1e-2
60
+ # Maximum number of train steps. Model is saved after training.
61
+ max_train_steps: 300
62
+ # Saves a model every nth step.
63
+ checkpointing_steps: 50
64
+ # How many steps to do for validation if sample_preview is enabled.
65
+ validation_steps: 50
66
+ # Whether or not we want to use mixed precision with accelerate
67
+ mixed_precision: "fp16"
68
+ # Trades VRAM usage for speed. You lose roughly 20% of training speed, but save a lot of VRAM.
69
+ # If you need to save more VRAM, it can also be enabled for the text encoder, but reduces speed x2.
70
+ gradient_checkpointing: True
71
+ image_encoder_gradient_checkpointing: True
72
+
73
+ train_data:
74
+ # The width and height in which you want your training data to be resized to.
75
+ width: 896
76
+ height: 512
77
+ # This will find the closest aspect ratio to your input width and height.
78
+ # For example, 512x512 width and height with a video of resolution 1280x720 will be resized to 512x256
79
+ use_data_aug: ~ #"rsfnet"
80
+ pad_to_fit: false
81
+
82
+ validation_data:
83
+ # Whether or not to sample preview during training (Requires more VRAM).
84
+ sample_preview: True
85
+ # The number of frames to sample during validation.
86
+ num_frames: 14
87
+ # Height and width of validation sample.
88
+ width: 1024
89
+ height: 576
90
+ pad_to_fit: false
91
+ # scale of spatial LoRAs, default is 0
92
+ spatial_scale: 0
93
+ # scale of noise prior, i.e. the scale of inversion noises
94
+ noise_prior:
95
+ #- 0.0
96
+ - 1.0
97
+
98
+ sarp_params:
99
+ sarp_noise_scale: 0.005
100
+
101
+ attention_matching_params:
102
+ best_checkpoint_index: 250
103
+ lora_scale: 1.0
104
+ # lora path
105
+ lora_dir: "./cache/item9/train_motion_lora"
106
+
107
+ disk_store: True
108
+ load_attention_store: "./cache/item9/attention_store/"
109
+ registered_modules:
110
+ BasicTransformerBlock:
111
+ - "attn1"
112
+ #- "attn2"
113
+ TemporalBasicTransformerBlock:
114
+ - "attn1"
115
+ #- "attn2"
116
+ control_mode:
117
+ spatial_self: "masked_copy"
118
+ temporal_self: "copy_v2"
119
+ cross_replace_steps: 0.0
120
+ temporal_self_replace_steps: 1.0
121
+ spatial_self_replace_steps: 1.0
122
+ spatial_attention_chunk_size: 1
123
+
124
+ params:
125
+ edit0:
126
+ temporal_step_thr: [0.6, 0.8]
127
+ mask_thr: [0.5, 0.5]
128
+ edit1:
129
+ temporal_step_thr: [0.4, 0.5]
130
+ mask_thr: [0.3, 0.3]
131
+
132
+ long_video_params:
133
+ mode: "skip-interval"
134
+ registered_modules:
135
+ BasicTransformerBlock:
136
+ #- "attn1"
137
+ #- "attn2"
138
+ TemporalBasicTransformerBlock:
139
+ - "attn1"
140
+ #- "attn2"
141
+
142
+