File size: 953 Bytes
a3a596a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 |
enable_conv = true
# An example for use different algo/settings in "full" preset
unet_target_module = [
"Transformer2DModel",
"ResnetBlock2D",
"Downsample2D",
"Upsample2D",
]
unet_target_name = [
".*time_embed\\..+",
".*label_emb\\..+",
".*input_blocks\\.0.+",
"^out\\..+"
]
text_encoder_target_module = [
"CLIPAttention",
"CLIPMLP",
]
text_encoder_target_name = [
# "token_embedding", # not supported, Embedding module in CLIP
]
[module_algo_map]
[module_algo_map.CrossAttention]
algo = "lokr"
dim = 10000
alpha = 10000
factor = 4
[module_algo_map.FeedForward] # MLP Layer in UNet
algo = "lokr"
dim = 10000
alpha = 10000
factor = 2
[module_algo_map.CLIPAttention] # Attention Layer in TE
algo = "lokr"
dim = 10000
alpha = 10000
factor = 8
[module_algo_map.CLIPMLP] # MLP Layer in TE
algo = "lokr"
dim = 10000
alpha = 10000
factor = 8 |