Vicky0522 commited on
Commit
50cb30c
1 Parent(s): df64d93

Delete item1/config_multi_chunks.yaml

Browse files
Files changed (1) hide show
  1. item1/config_multi_chunks.yaml +0 -150
item1/config_multi_chunks.yaml DELETED
@@ -1,150 +0,0 @@
1
- # Pretrained diffusers model path.
2
- pretrained_model_path: "ckpts/stable-video-diffusion-img2vid"
3
- # The folder where your training outputs will be placed.
4
- output_dir: "./sig_ablation"
5
- seed: 23
6
- num_steps: 25
7
- # Xformers must be installed for best memory savings and performance (< Pytorch 2.0)
8
- enable_xformers_memory_efficient_attention: True
9
- # Use scaled dot product attention (Only available with >= Torch 2.0)
10
- enable_torch_2_attn: True
11
-
12
- use_sarp: true
13
-
14
- use_motion_lora: true
15
- train_motion_lora_only: false
16
- retrain_motion_lora: false
17
-
18
- use_inversed_latents: true
19
- use_attention_matching: true
20
- use_consistency_attention_control: true
21
- dtype: fp16
22
-
23
- visualize_attention_store: false
24
- visualize_attention_store_steps: #[0, 5, 10, 15, 20, 24]
25
-
26
- save_last_frames: True
27
- load_from_last_frames_latents:
28
-
29
- # data_params
30
- data_params:
31
- video_path: "../datasets/svdedit/item1/girl.mp4"
32
- keyframe_paths:
33
- - "../datasets/svdedit/item1/edit1.png"
34
- - "../datasets/svdedit/item1/edit2.png"
35
- - "../datasets/svdedit/item1/edit3.png"
36
- start_t: 0
37
- end_t: 8.4
38
- sample_fps: 2.5
39
- chunk_size: 11
40
- overlay_size: 1
41
- normalize: true
42
- output_fps: 5
43
- save_sampled_frame: true
44
- output_res: [576, 1024]
45
- pad_to_fit: false
46
- begin_clip_id: 0
47
- end_clip_id: 2
48
-
49
- train_motion_lora_params:
50
- cache_latents: true
51
- cached_latent_dir: null #/path/to/cached_latents
52
- lora_rank: 32
53
- # Use LoRA for the UNET model.
54
- use_unet_lora: True
55
- # LoRA Dropout. This parameter adds the probability of randomly zeros out elements. Helps prevent overfitting.
56
- # See: https://pytorch.org/docs/stable/generated/torch.nn.Dropout.html
57
- lora_unet_dropout: 0.1
58
- # The only time you want this off is if you're doing full LoRA training.
59
- save_pretrained_model: False
60
- # Learning rate for AdamW
61
- learning_rate: 5e-4
62
- # Weight decay. Higher = more regularization. Lower = closer to dataset.
63
- adam_weight_decay: 1e-2
64
- # Maximum number of train steps. Model is saved after training.
65
- max_train_steps: 300
66
- # Saves a model every nth step.
67
- checkpointing_steps: 50
68
- # How many steps to do for validation if sample_preview is enabled.
69
- validation_steps: 50
70
- # Whether or not we want to use mixed precision with accelerate
71
- mixed_precision: "fp16"
72
- # Trades VRAM usage for speed. You lose roughly 20% of training speed, but save a lot of VRAM.
73
- # If you need to save more VRAM, it can also be enabled for the text encoder, but reduces speed x2.
74
- gradient_checkpointing: True
75
- image_encoder_gradient_checkpointing: True
76
-
77
- train_data:
78
- # The width and height in which you want your training data to be resized to.
79
- width: 896
80
- height: 512
81
- # This will find the closest aspect ratio to your input width and height.
82
- # For example, 512x512 width and height with a video of resolution 1280x720 will be resized to 512x256
83
- use_data_aug: ~ #"controlnet"
84
- pad_to_fit: false
85
-
86
- validation_data:
87
- # Whether or not to sample preview during training (Requires more VRAM).
88
- sample_preview: True
89
- # The number of frames to sample during validation.
90
- num_frames: 14
91
- # Height and width of validation sample.
92
- width: 1024
93
- height: 576
94
- pad_to_fit: false
95
- # scale of spatial LoRAs, default is 0
96
- spatial_scale: 0
97
- # scale of noise prior, i.e. the scale of inversion noises
98
- noise_prior:
99
- #- 0.0
100
- - 1.0
101
-
102
- sarp_params:
103
- sarp_noise_scale: 0.005
104
-
105
- attention_matching_params:
106
- best_checkpoint_index: 250
107
- lora_scale: 1.0
108
- # lora path
109
- lora_dir: "./cache/item1/train_motion_lora"
110
- max_guidance_scale: 2.0
111
-
112
- disk_store: True
113
- load_attention_store: "./cache/item1/attention_store"
114
- load_consistency_attention_store: "./cache/item1/consistency_attention_store"
115
- registered_modules:
116
- BasicTransformerBlock:
117
- - "attn1"
118
- #- "attn2"
119
- TemporalBasicTransformerBlock:
120
- - "attn1"
121
- #- "attn2"
122
- control_mode:
123
- spatial_self: "masked_copy"
124
- temporal_self: "copy_v2"
125
- cross_replace_steps: 0.0
126
- temporal_self_replace_steps: 1.0
127
- spatial_self_replace_steps: 1.0
128
- spatial_attention_chunk_size: 1
129
-
130
- params:
131
- edit0:
132
- temporal_step_thr: [0.5, 0.8]
133
- mask_thr: [0.35, 0.35]
134
- edit1:
135
- temporal_step_thr: [0.5, 0.8]
136
- mask_thr: [0.35, 0.35]
137
- edit2:
138
- temporal_step_thr: [0.8, 0.9]
139
- mask_thr: [0.35, 0.35]
140
-
141
- long_video_params:
142
- mode: "skip-interval"
143
- registered_modules:
144
- BasicTransformerBlock:
145
- #- "attn1"
146
- #- "attn2"
147
- TemporalBasicTransformerBlock:
148
- - "attn1"
149
- #- "attn2"
150
-