End of training
Browse files- .gitattributes +2 -0
- README.md +83 -0
- config.json +57 -0
- diffusion_pytorch_model.safetensors +3 -0
- image_control.png +0 -0
- images_0.png +3 -0
- images_1.png +0 -0
- images_2.png +3 -0
.gitattributes
CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
images_0.png filter=lfs diff=lfs merge=lfs -text
|
37 |
+
images_2.png filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
---
|
3 |
+
license: creativeml-openrail-m
|
4 |
+
base_model: runwayml/stable-diffusion-v1-5
|
5 |
+
tags:
|
6 |
+
- stable-diffusion
|
7 |
+
- stable-diffusion-diffusers
|
8 |
+
- image-to-image
|
9 |
+
- diffusers
|
10 |
+
- controlnet
|
11 |
+
- control-lora
|
12 |
+
---
|
13 |
+
|
14 |
+
# ControlLoRA - Head3d Version
|
15 |
+
|
16 |
+
ControlLoRA is a neural network structure extended from Controlnet to control diffusion models by adding extra conditions. This checkpoint corresponds to the ControlLoRA conditioned on Head3d.
|
17 |
+
|
18 |
+
ControlLoRA uses the same structure as Controlnet. But its core weight comes from UNet, unmodified. Only hint image encoding layers, linear lora layers and conv2d lora layers used in weight offset are trained.
|
19 |
+
|
20 |
+
The main idea is from my [ControlLoRA](https://github.com/HighCWu/ControlLoRA) and sdxl [control-lora](https://huggingface.co/stabilityai/control-lora).
|
21 |
+
|
22 |
+
## Example
|
23 |
+
|
24 |
+
1. Clone ControlLoRA from [Github](https://github.com/HighCWu/control-lora-v2):
|
25 |
+
```sh
|
26 |
+
$ git clone https://github.com/HighCWu/control-lora-v2
|
27 |
+
```
|
28 |
+
|
29 |
+
2. Enter the repo dir:
|
30 |
+
```sh
|
31 |
+
$ cd control-lora-v2
|
32 |
+
```
|
33 |
+
|
34 |
+
3. Run code:
|
35 |
+
```py
|
36 |
+
import torch
|
37 |
+
from PIL import Image
|
38 |
+
from diffusers import StableDiffusionControlNetPipeline, UNet2DConditionModel, UniPCMultistepScheduler
|
39 |
+
from models.control_lora import ControlLoRAModel
|
40 |
+
|
41 |
+
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
42 |
+
dtype = torch.float16 if torch.cuda.is_available() else torch.float32
|
43 |
+
|
44 |
+
image = Image.open('<Your Conditioning Image Path>')
|
45 |
+
|
46 |
+
base_model = "runwayml/stable-diffusion-v1-5"
|
47 |
+
|
48 |
+
unet = UNet2DConditionModel.from_pretrained(
|
49 |
+
base_model, subfolder="unet", torch_dtype=dtype
|
50 |
+
)
|
51 |
+
control_lora: ControlLoRAModel = ControlLoRAModel.from_pretrained(
|
52 |
+
"HighCWu/sd-latent-control-dora-rank128-head3d", torch_dtype=dtype
|
53 |
+
)
|
54 |
+
control_lora.tie_weights(unet)
|
55 |
+
|
56 |
+
pipe = StableDiffusionControlNetPipeline.from_pretrained(
|
57 |
+
base_model, unet=unet, controlnet=control_lora, safety_checker=None, torch_dtype=dtype
|
58 |
+
).to(device)
|
59 |
+
control_lora.bind_vae(pipe.vae)
|
60 |
+
|
61 |
+
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
|
62 |
+
|
63 |
+
# Remove if you do not have xformers installed
|
64 |
+
# see https://huggingface.co/docs/diffusers/v0.13.0/en/optimization/xformers#installing-xformers
|
65 |
+
# for installation instructions
|
66 |
+
pipe.enable_xformers_memory_efficient_attention()
|
67 |
+
|
68 |
+
# pipe.enable_model_cpu_offload()
|
69 |
+
|
70 |
+
image = pipe("Girl smiling, professional dslr photograph, high quality", image, num_inference_steps=20).images[0]
|
71 |
+
|
72 |
+
image.show()
|
73 |
+
```
|
74 |
+
|
75 |
+
You can find some example images below.
|
76 |
+
|
77 |
+
prompt: a photography of a man with a beard and sunglasses on
|
78 |
+
![images_0)](./images_0.png)
|
79 |
+
prompt: worst quality , low quality , portrait , close - up , inconsistent head shape
|
80 |
+
![images_1)](./images_1.png)
|
81 |
+
prompt: a photography of a man with a mustache and a suit jacket
|
82 |
+
![images_2)](./images_2.png)
|
83 |
+
|
config.json
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_class_name": "ControlLoRAModel",
|
3 |
+
"_diffusers_version": "0.26.3",
|
4 |
+
"_name_or_path": "output/sd-latent-control-dora-rank128-head3d\\checkpoint-75000",
|
5 |
+
"act_fn": "silu",
|
6 |
+
"addition_embed_type": null,
|
7 |
+
"addition_embed_type_num_heads": 64,
|
8 |
+
"addition_time_embed_dim": null,
|
9 |
+
"attention_head_dim": 8,
|
10 |
+
"block_out_channels": [
|
11 |
+
320,
|
12 |
+
640,
|
13 |
+
1280,
|
14 |
+
1280
|
15 |
+
],
|
16 |
+
"class_embed_type": null,
|
17 |
+
"conditioning_channels": 4,
|
18 |
+
"conditioning_embedding_out_channels": [
|
19 |
+
16,
|
20 |
+
32,
|
21 |
+
96,
|
22 |
+
256
|
23 |
+
],
|
24 |
+
"controlnet_conditioning_channel_order": "rgb",
|
25 |
+
"cross_attention_dim": 768,
|
26 |
+
"down_block_types": [
|
27 |
+
"CrossAttnDownBlock2D",
|
28 |
+
"CrossAttnDownBlock2D",
|
29 |
+
"CrossAttnDownBlock2D",
|
30 |
+
"DownBlock2D"
|
31 |
+
],
|
32 |
+
"downsample_padding": 1,
|
33 |
+
"encoder_hid_dim": null,
|
34 |
+
"encoder_hid_dim_type": null,
|
35 |
+
"flip_sin_to_cos": true,
|
36 |
+
"freq_shift": 0,
|
37 |
+
"global_pool_conditions": false,
|
38 |
+
"in_channels": 4,
|
39 |
+
"layers_per_block": 2,
|
40 |
+
"lora_conv2d_rank": 128,
|
41 |
+
"lora_linear_rank": 128,
|
42 |
+
"mid_block_scale_factor": 1,
|
43 |
+
"mid_block_type": "UNetMidBlock2DCrossAttn",
|
44 |
+
"norm_eps": 1e-05,
|
45 |
+
"norm_num_groups": 32,
|
46 |
+
"num_attention_heads": null,
|
47 |
+
"num_class_embeds": null,
|
48 |
+
"only_cross_attention": false,
|
49 |
+
"projection_class_embeddings_input_dim": null,
|
50 |
+
"resnet_time_scale_shift": "default",
|
51 |
+
"transformer_layers_per_block": 1,
|
52 |
+
"upcast_attention": false,
|
53 |
+
"use_conditioning_latent": true,
|
54 |
+
"use_dora": true,
|
55 |
+
"use_linear_projection": false,
|
56 |
+
"use_same_level_conditioning_latent": false
|
57 |
+
}
|
diffusion_pytorch_model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2d02974f69df2ec4ead727979f0cd6bf4bfa31abb7f0502fb71589a13b96807d
|
3 |
+
size 271963378
|
image_control.png
ADDED
images_0.png
ADDED
Git LFS Details
|
images_1.png
ADDED
images_2.png
ADDED
Git LFS Details
|