File size: 6,612 Bytes
07b7d37 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 |
import argparse
import os
import torch
from safetensors import safe_open
from safetensors.torch import load_file, save_file
from tqdm import tqdm
from library.utils import setup_logging
setup_logging()
import logging
logger = logging.getLogger(__name__)
def is_unet_key(key):
# VAE or TextEncoder, the last one is for SDXL
return not ("first_stage_model" in key or "cond_stage_model" in key or "conditioner." in key)
TEXT_ENCODER_KEY_REPLACEMENTS = [
("cond_stage_model.transformer.embeddings.", "cond_stage_model.transformer.text_model.embeddings."),
("cond_stage_model.transformer.encoder.", "cond_stage_model.transformer.text_model.encoder."),
("cond_stage_model.transformer.final_layer_norm.", "cond_stage_model.transformer.text_model.final_layer_norm."),
]
# support for models with different text encoder keys
def replace_text_encoder_key(key):
for rep_from, rep_to in TEXT_ENCODER_KEY_REPLACEMENTS:
if key.startswith(rep_from):
return True, rep_to + key[len(rep_from) :]
return False, key
def merge(args):
if args.precision == "fp16":
dtype = torch.float16
elif args.precision == "bf16":
dtype = torch.bfloat16
else:
dtype = torch.float
if args.saving_precision == "fp16":
save_dtype = torch.float16
elif args.saving_precision == "bf16":
save_dtype = torch.bfloat16
else:
save_dtype = torch.float
# check if all models are safetensors
for model in args.models:
if not model.endswith("safetensors"):
logger.info(f"Model {model} is not a safetensors model")
exit()
if not os.path.isfile(model):
logger.info(f"Model {model} does not exist")
exit()
assert args.ratios is None or len(args.models) == len(args.ratios), "ratios must be the same length as models"
# load and merge
ratio = 1.0 / len(args.models) # default
supplementary_key_ratios = {} # [key] = ratio, for keys not in all models, add later
merged_sd = None
first_model_keys = set() # check missing keys in other models
for i, model in enumerate(args.models):
if args.ratios is not None:
ratio = args.ratios[i]
if merged_sd is None:
# load first model
logger.info(f"Loading model {model}, ratio = {ratio}...")
merged_sd = {}
with safe_open(model, framework="pt", device=args.device) as f:
for key in tqdm(f.keys()):
value = f.get_tensor(key)
_, key = replace_text_encoder_key(key)
first_model_keys.add(key)
if not is_unet_key(key) and args.unet_only:
supplementary_key_ratios[key] = 1.0 # use first model's value for VAE or TextEncoder
continue
value = ratio * value.to(dtype) # first model's value * ratio
merged_sd[key] = value
logger.info(f"Model has {len(merged_sd)} keys " + ("(UNet only)" if args.unet_only else ""))
continue
# load other models
logger.info(f"Loading model {model}, ratio = {ratio}...")
with safe_open(model, framework="pt", device=args.device) as f:
model_keys = f.keys()
for key in tqdm(model_keys):
_, new_key = replace_text_encoder_key(key)
if new_key not in merged_sd:
if args.show_skipped and new_key not in first_model_keys:
logger.info(f"Skip: {new_key}")
continue
value = f.get_tensor(key)
merged_sd[new_key] = merged_sd[new_key] + ratio * value.to(dtype)
# enumerate keys not in this model
model_keys = set(model_keys)
for key in merged_sd.keys():
if key in model_keys:
continue
logger.warning(f"Key {key} not in model {model}, use first model's value")
if key in supplementary_key_ratios:
supplementary_key_ratios[key] += ratio
else:
supplementary_key_ratios[key] = ratio
# add supplementary keys' value (including VAE and TextEncoder)
if len(supplementary_key_ratios) > 0:
logger.info("add first model's value")
with safe_open(args.models[0], framework="pt", device=args.device) as f:
for key in tqdm(f.keys()):
_, new_key = replace_text_encoder_key(key)
if new_key not in supplementary_key_ratios:
continue
if is_unet_key(new_key): # not VAE or TextEncoder
logger.warning(f"Key {new_key} not in all models, ratio = {supplementary_key_ratios[new_key]}")
value = f.get_tensor(key) # original key
if new_key not in merged_sd:
merged_sd[new_key] = supplementary_key_ratios[new_key] * value.to(dtype)
else:
merged_sd[new_key] = merged_sd[new_key] + supplementary_key_ratios[new_key] * value.to(dtype)
# save
output_file = args.output
if not output_file.endswith(".safetensors"):
output_file = output_file + ".safetensors"
logger.info(f"Saving to {output_file}...")
# convert to save_dtype
for k in merged_sd.keys():
merged_sd[k] = merged_sd[k].to(save_dtype)
save_file(merged_sd, output_file)
logger.info("Done!")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Merge models")
parser.add_argument("--models", nargs="+", type=str, help="Models to merge")
parser.add_argument("--output", type=str, help="Output model")
parser.add_argument("--ratios", nargs="+", type=float, help="Ratios of models, default is equal, total = 1.0")
parser.add_argument("--unet_only", action="store_true", help="Only merge unet")
parser.add_argument("--device", type=str, default="cpu", help="Device to use, default is cpu")
parser.add_argument(
"--precision", type=str, default="float", choices=["float", "fp16", "bf16"], help="Calculation precision, default is float"
)
parser.add_argument(
"--saving_precision",
type=str,
default="float",
choices=["float", "fp16", "bf16"],
help="Saving precision, default is float",
)
parser.add_argument("--show_skipped", action="store_true", help="Show skipped keys (keys not in first model)")
args = parser.parse_args()
merge(args)
|