Upload item1/config_single_chunk.yaml with huggingface_hub
Browse files
item1/config_single_chunk.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
# Pretrained diffusers model path.
|
2 |
pretrained_model_path: "ckpts/stable-video-diffusion-img2vid"
|
3 |
# The folder where your training outputs will be placed.
|
4 |
-
output_dir: "./
|
5 |
seed: 23
|
6 |
num_steps: 25
|
7 |
# Xformers must be installed for best memory savings and performance (< Pytorch 2.0)
|
@@ -28,11 +28,11 @@ load_from_last_frames_latents:
|
|
28 |
|
29 |
# data_params
|
30 |
data_params:
|
31 |
-
video_path: "../datasets/svdedit/item1/
|
32 |
keyframe_paths:
|
33 |
-
- "../datasets/svdedit/item1/
|
34 |
-
- "../datasets/svdedit/item1/
|
35 |
-
- "../datasets/svdedit/item1/
|
36 |
start_t: 0
|
37 |
end_t: 8.4
|
38 |
sample_fps: 2.5
|
@@ -103,7 +103,7 @@ sarp_params:
|
|
103 |
sarp_noise_scale: 0.005
|
104 |
|
105 |
attention_matching_params:
|
106 |
-
best_checkpoint_index:
|
107 |
lora_scale: 1.0
|
108 |
# lora path
|
109 |
lora_dir: "./cache/item1/train_motion_lora"
|
|
|
1 |
# Pretrained diffusers model path.
|
2 |
pretrained_model_path: "ckpts/stable-video-diffusion-img2vid"
|
3 |
# The folder where your training outputs will be placed.
|
4 |
+
output_dir: "./opensource"
|
5 |
seed: 23
|
6 |
num_steps: 25
|
7 |
# Xformers must be installed for best memory savings and performance (< Pytorch 2.0)
|
|
|
28 |
|
29 |
# data_params
|
30 |
data_params:
|
31 |
+
video_path: "../datasets/svdedit/item1/source.mp4"
|
32 |
keyframe_paths:
|
33 |
+
- "../datasets/svdedit/item1/necklace.png"
|
34 |
+
- "../datasets/svdedit/item1/red_hair.png"
|
35 |
+
- "../datasets/svdedit/item1/white_sculpture.png"
|
36 |
start_t: 0
|
37 |
end_t: 8.4
|
38 |
sample_fps: 2.5
|
|
|
103 |
sarp_noise_scale: 0.005
|
104 |
|
105 |
attention_matching_params:
|
106 |
+
best_checkpoint_index: 300
|
107 |
lora_scale: 1.0
|
108 |
# lora path
|
109 |
lora_dir: "./cache/item1/train_motion_lora"
|