File size: 2,586 Bytes
ee2ed6e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
719b9d8
ee2ed6e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
# Pretrained diffusers model path.
pretrained_model_path: "ckpts/stable-video-diffusion-img2vid"
# The folder where your training outputs will be placed.
output_dir: "./outputs"
seed: 23
num_steps: 25
# Xformers must be installed for best memory savings and performance (< Pytorch 2.0)
enable_xformers_memory_efficient_attention: True
# Use scaled dot product attention (Only available with >= Torch 2.0)
enable_torch_2_attn: true

use_sarp: true

use_motion_lora: true
train_motion_lora_only: false
retrain_motion_lora: false

use_inversed_latents: true
use_attention_matching: true
use_consistency_attention_control: false
dtype: fp32

save_last_frames: true

# data_params
data_params:
  video_path: "../datasets/svdedit/item7/production_id_4811927.mp4"
  keyframe_paths:
    - "../datasets/svdedit/item7/golden_dress.png"
    - "../datasets/svdedit/item7/ice.png"
  start_t: 0
  end_t: 1
  sample_fps: 14
  chunk_size: 14
  overlay_size: 1
  normalize: true
  output_fps: 14
  save_sampled_frame: true
  output_res: [576, 1024]
  pad_to_fit: false

train_motion_lora_params:
  cache_latents: true
  cached_latent_dir: null
  lora_rank: 32
  use_unet_lora: true
  lora_unet_dropout: 0.1
  save_pretrained_model: false
  learning_rate: 0.0005
  adam_weight_decay: 0.01
  max_train_steps: 300
  checkpointing_steps: 50
  validation_steps: 50
  mixed_precision: fp16
  gradient_checkpointing: true
  image_encoder_gradient_checkpointing: true
  train_data:
    width: 896
    height: 512
    use_data_aug: null
    pad_to_fit: false
  validation_data:
    sample_preview: true
    num_frames: 14
    width: 1024
    height: 576
    pad_to_fit: false
    spatial_scale: 0
    noise_prior:
    - 1.0

sarp_params:
  sarp_noise_scale: 0.005

attention_matching_params:
  best_checkpoint_index: 250
  lora_scale: 1.0
  lora_dir: "./cache/item7/train_motion_lora"
  max_guidance_scale: 2.5
  disk_store: true
  load_attention_store: "./cache/item7/attention_store/"
  registered_modules:
    BasicTransformerBlock:
    - "attn1"
    TemporalBasicTransformerBlock:
    - "attn1"
  control_mode:
    spatial_self: "masked_copy"
    temporal_self: "copy_v2"
  cross_replace_steps: 0.0
  temporal_self_replace_steps: 1.0
  spatial_self_replace_steps: 1.0
  spatial_attention_chunk_size: 1

  params:
    edit0:
      temporal_step_thr: [0.5, 0.8]
      mask_thr: [0.35, 0.35]
    edit1:
      temporal_step_thr: [0.5, 0.8]
      mask_thr: [0.35, 0.35]

long_video_params:
  mode: skip-interval
  registered_modules:
    BasicTransformerBlock: null
    TemporalBasicTransformerBlock:
    - attn1