RamAnanth1 commited on
Commit
7093637
1 Parent(s): 777ee0f

Load Anything-v4 models

Browse files
Files changed (1) hide show
  1. model.py +4 -0
model.py CHANGED
@@ -67,18 +67,21 @@ class Model:
67
  config.model.params.cond_stage_config.params.device = device
68
 
69
  base_model_file = "https://huggingface.co/CompVis/stable-diffusion-v-1-4-original/resolve/main/sd-v1-4.ckpt"
 
70
  sketch_adapter_file = "https://huggingface.co/TencentARC/T2I-Adapter/resolve/main/models/t2iadapter_sketch_sd14v1.pth"
71
  pose_adapter_file = "https://huggingface.co/TencentARC/T2I-Adapter/resolve/main/models/t2iadapter_keypose_sd14v1.pth"
72
  pidinet_file = model_path+"table5_pidinet.pth"
73
  clip_file = "https://huggingface.co/openai/clip-vit-large-patch14/resolve/main/*"
74
 
75
  subprocess.run(shlex.split(f'wget {base_model_file} -O models/sd-v1-4.ckpt'))
 
76
  subprocess.run(shlex.split(f'wget {sketch_adapter_file} -O models/t2iadapter_sketch_sd14v1.pth'))
77
  subprocess.run(shlex.split(f'wget {pose_adapter_file} -O models/t2iadapter_keypose_sd14v1.pth'))
78
  subprocess.run(shlex.split(f'wget {pidinet_file} -O models/table5_pidinet.pth'))
79
 
80
 
81
  self.model = load_model_from_config(config, "models/sd-v1-4.ckpt").to(device)
 
82
  current_base = 'sd-v1-4.ckpt'
83
  self.model_ad_sketch = Adapter(channels=[320, 640, 1280, 1280][:4], nums_rb=2, ksize=1, sk=True, use_conv=False).to(device)
84
  self.model_ad_sketch.load_state_dict(torch.load("models/t2iadapter_sketch_sd14v1.pth"))
@@ -87,6 +90,7 @@ class Model:
87
  net_G.load_state_dict({k.replace('module.',''):v for k, v in ckp.items()})
88
  net_G.to(device)
89
  self.sampler= PLMSSampler(self.model)
 
90
  save_memory=True
91
 
92
  self.model_ad_pose = Adapter(cin=int(3*64),channels=[320, 640, 1280, 1280][:4], nums_rb=2, ksize=1, sk=True, use_conv=False).to(device)
 
67
  config.model.params.cond_stage_config.params.device = device
68
 
69
  base_model_file = "https://huggingface.co/CompVis/stable-diffusion-v-1-4-original/resolve/main/sd-v1-4.ckpt"
70
+ base_model_file_anything = "https://huggingface.co/andite/anything-v4.0/resolve/main/anything-v4.0-pruned.ckpt"
71
  sketch_adapter_file = "https://huggingface.co/TencentARC/T2I-Adapter/resolve/main/models/t2iadapter_sketch_sd14v1.pth"
72
  pose_adapter_file = "https://huggingface.co/TencentARC/T2I-Adapter/resolve/main/models/t2iadapter_keypose_sd14v1.pth"
73
  pidinet_file = model_path+"table5_pidinet.pth"
74
  clip_file = "https://huggingface.co/openai/clip-vit-large-patch14/resolve/main/*"
75
 
76
  subprocess.run(shlex.split(f'wget {base_model_file} -O models/sd-v1-4.ckpt'))
77
+ subprocess.run(shlex.split(f'wget {base_model_file_anything} -O models/anything-v4.0-pruned.ckpt'))
78
  subprocess.run(shlex.split(f'wget {sketch_adapter_file} -O models/t2iadapter_sketch_sd14v1.pth'))
79
  subprocess.run(shlex.split(f'wget {pose_adapter_file} -O models/t2iadapter_keypose_sd14v1.pth'))
80
  subprocess.run(shlex.split(f'wget {pidinet_file} -O models/table5_pidinet.pth'))
81
 
82
 
83
  self.model = load_model_from_config(config, "models/sd-v1-4.ckpt").to(device)
84
+ self.model_anything = load_model_from_config(config, "models/anything-v4.0-pruned.ckpt").to(device)
85
  current_base = 'sd-v1-4.ckpt'
86
  self.model_ad_sketch = Adapter(channels=[320, 640, 1280, 1280][:4], nums_rb=2, ksize=1, sk=True, use_conv=False).to(device)
87
  self.model_ad_sketch.load_state_dict(torch.load("models/t2iadapter_sketch_sd14v1.pth"))
 
90
  net_G.load_state_dict({k.replace('module.',''):v for k, v in ckp.items()})
91
  net_G.to(device)
92
  self.sampler= PLMSSampler(self.model)
93
+ self.sampler_anything= PLMSSampler(self.model_anything)
94
  save_memory=True
95
 
96
  self.model_ad_pose = Adapter(cin=int(3*64),channels=[320, 640, 1280, 1280][:4], nums_rb=2, ksize=1, sk=True, use_conv=False).to(device)