File size: 2,398 Bytes
7a62de0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 |
pretrained_model_path: "ckpts/stable-video-diffusion-img2vid"
output_dir: "./outputs"
num_steps: 25
seed: 23
enable_xformers_memory_efficient_attention: true
enable_torch_2_attn: true
dtype: fp16
use_sarp: true
use_motion_lora: true
train_motion_lora_only: false
retrain_motion_lora: false
use_inversed_latents: true
use_attention_matching: true
use_consistency_attention_control: true
save_last_frames: true
load_from_last_frames_latents:
visualize_attention_store: false
visualize_attention_store_steps: null
data_params:
video_path: "../datasets/svdedit/item10/source.mp4"
keyframe_paths:
- "../datasets/svdedit/item10/porsche.png"
start_t: 0
end_t: -1
sample_fps: 10
chunk_size: 14
overlay_size: 1
normalize: true
output_fps: 10
save_sampled_frame: true
output_res: [576, 1024]
pad_to_fit: false
begin_clip_id: 0
end_clip_id: 2
train_motion_lora_params:
cache_latents: true
cached_latent_dir: null
lora_rank: 32
use_unet_lora: true
lora_unet_dropout: 0.1
save_pretrained_model: false
learning_rate: 0.0005
adam_weight_decay: 0.01
max_train_steps: 250
checkpointing_steps: 250
validation_steps: 300
mixed_precision: fp16
gradient_checkpointing: true
image_encoder_gradient_checkpointing: true
train_data:
width: 896
height: 512
use_data_aug: null
pad_to_fit: false
validation_data:
sample_preview: true
num_frames: 14
width: 1024
height: 576
pad_to_fit: false
spatial_scale: 0
noise_prior:
- 1.0
sarp_params:
sarp_noise_scale: 0.005
attention_matching_params:
best_checkpoint_index: 250
lora_scale: 1.0
lora_dir: "./cache/item10/train_motion_lora"
disk_store: true
load_attention_store: "./cache/item10/attention_store"
load_consistency_attention_store: "./cache/item10/consistency_attention_store"
registered_modules:
BasicTransformerBlock:
- "attn1"
TemporalBasicTransformerBlock:
- "attn1"
control_mode:
spatial_self: "masked_copy"
temporal_self: "copy_v2"
cross_replace_steps: 0.0
temporal_self_replace_steps: 1.0
spatial_self_replace_steps: 1.0
spatial_attention_chunk_size: 1
params:
edit0:
temporal_step_thr: [0.5, 0.8]
mask_thr: [0.35, 0.35]
long_video_params:
mode: "skip-interval"
registered_modules:
BasicTransformerBlock: null
TemporalBasicTransformerBlock:
- "attn1"
|