Update to inpaint diffuser
Browse files- Realistic_Vision_V1.3-inpainting.safetensors +0 -3
- Realistic_Vision_V1.3-inpainting.yaml +0 -70
- model_index.json +1 -1
- scheduler/scheduler_config.json +1 -1
- text_encoder/config.json +1 -1
- text_encoder/pytorch_model.bin +1 -1
- unet/config.json +8 -2
- unet/diffusion_pytorch_model.bin +2 -2
- vae/config.json +1 -1
Realistic_Vision_V1.3-inpainting.safetensors
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:10642fd1d25e2ccce0f182aec31174429ead44d855f52caaf6af720268846300
|
3 |
-
size 4265203902
|
|
|
|
|
|
|
|
Realistic_Vision_V1.3-inpainting.yaml
DELETED
@@ -1,70 +0,0 @@
|
|
1 |
-
model:
|
2 |
-
base_learning_rate: 7.5e-05
|
3 |
-
target: ldm.models.diffusion.ddpm.LatentInpaintDiffusion
|
4 |
-
params:
|
5 |
-
linear_start: 0.00085
|
6 |
-
linear_end: 0.0120
|
7 |
-
num_timesteps_cond: 1
|
8 |
-
log_every_t: 200
|
9 |
-
timesteps: 1000
|
10 |
-
first_stage_key: "jpg"
|
11 |
-
cond_stage_key: "txt"
|
12 |
-
image_size: 64
|
13 |
-
channels: 4
|
14 |
-
cond_stage_trainable: false # Note: different from the one we trained before
|
15 |
-
conditioning_key: hybrid # important
|
16 |
-
monitor: val/loss_simple_ema
|
17 |
-
scale_factor: 0.18215
|
18 |
-
finetune_keys: null
|
19 |
-
|
20 |
-
scheduler_config: # 10000 warmup steps
|
21 |
-
target: ldm.lr_scheduler.LambdaLinearScheduler
|
22 |
-
params:
|
23 |
-
warm_up_steps: [ 2500 ] # NOTE for resuming. use 10000 if starting from scratch
|
24 |
-
cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
|
25 |
-
f_start: [ 1.e-6 ]
|
26 |
-
f_max: [ 1. ]
|
27 |
-
f_min: [ 1. ]
|
28 |
-
|
29 |
-
unet_config:
|
30 |
-
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
31 |
-
params:
|
32 |
-
image_size: 32 # unused
|
33 |
-
in_channels: 9 # 4 data + 4 downscaled image + 1 mask
|
34 |
-
out_channels: 4
|
35 |
-
model_channels: 320
|
36 |
-
attention_resolutions: [ 4, 2, 1 ]
|
37 |
-
num_res_blocks: 2
|
38 |
-
channel_mult: [ 1, 2, 4, 4 ]
|
39 |
-
num_heads: 8
|
40 |
-
use_spatial_transformer: True
|
41 |
-
transformer_depth: 1
|
42 |
-
context_dim: 768
|
43 |
-
use_checkpoint: True
|
44 |
-
legacy: False
|
45 |
-
|
46 |
-
first_stage_config:
|
47 |
-
target: ldm.models.autoencoder.AutoencoderKL
|
48 |
-
params:
|
49 |
-
embed_dim: 4
|
50 |
-
monitor: val/rec_loss
|
51 |
-
ddconfig:
|
52 |
-
double_z: true
|
53 |
-
z_channels: 4
|
54 |
-
resolution: 256
|
55 |
-
in_channels: 3
|
56 |
-
out_ch: 3
|
57 |
-
ch: 128
|
58 |
-
ch_mult:
|
59 |
-
- 1
|
60 |
-
- 2
|
61 |
-
- 4
|
62 |
-
- 4
|
63 |
-
num_res_blocks: 2
|
64 |
-
attn_resolutions: []
|
65 |
-
dropout: 0.0
|
66 |
-
lossconfig:
|
67 |
-
target: torch.nn.Identity
|
68 |
-
|
69 |
-
cond_stage_config:
|
70 |
-
target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model_index.json
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
{
|
2 |
"_class_name": "StableDiffusionPipeline",
|
3 |
-
"_diffusers_version": "0.13.
|
4 |
"requires_safety_checker": false,
|
5 |
"scheduler": [
|
6 |
"diffusers",
|
|
|
1 |
{
|
2 |
"_class_name": "StableDiffusionPipeline",
|
3 |
+
"_diffusers_version": "0.13.1",
|
4 |
"requires_safety_checker": false,
|
5 |
"scheduler": [
|
6 |
"diffusers",
|
scheduler/scheduler_config.json
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
{
|
2 |
"_class_name": "PNDMScheduler",
|
3 |
-
"_diffusers_version": "0.13.
|
4 |
"beta_end": 0.012,
|
5 |
"beta_schedule": "scaled_linear",
|
6 |
"beta_start": 0.00085,
|
|
|
1 |
{
|
2 |
"_class_name": "PNDMScheduler",
|
3 |
+
"_diffusers_version": "0.13.1",
|
4 |
"beta_end": 0.012,
|
5 |
"beta_schedule": "scaled_linear",
|
6 |
"beta_start": 0.00085,
|
text_encoder/config.json
CHANGED
@@ -20,6 +20,6 @@
|
|
20 |
"pad_token_id": 1,
|
21 |
"projection_dim": 768,
|
22 |
"torch_dtype": "float32",
|
23 |
-
"transformers_version": "4.
|
24 |
"vocab_size": 49408
|
25 |
}
|
|
|
20 |
"pad_token_id": 1,
|
21 |
"projection_dim": 768,
|
22 |
"torch_dtype": "float32",
|
23 |
+
"transformers_version": "4.26.1",
|
24 |
"vocab_size": 49408
|
25 |
}
|
text_encoder/pytorch_model.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 492307041
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:685f4a79ed75221910f1b8ba04beabbb5770e6b315e4ca76b4d32a241404c295
|
3 |
size 492307041
|
unet/config.json
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
{
|
2 |
"_class_name": "UNet2DConditionModel",
|
3 |
-
"_diffusers_version": "0.13.
|
4 |
"act_fn": "silu",
|
5 |
"attention_head_dim": 8,
|
6 |
"block_out_channels": [
|
@@ -11,6 +11,8 @@
|
|
11 |
],
|
12 |
"center_input_sample": false,
|
13 |
"class_embed_type": null,
|
|
|
|
|
14 |
"cross_attention_dim": 768,
|
15 |
"down_block_types": [
|
16 |
"CrossAttnDownBlock2D",
|
@@ -22,7 +24,7 @@
|
|
22 |
"dual_cross_attention": false,
|
23 |
"flip_sin_to_cos": true,
|
24 |
"freq_shift": 0,
|
25 |
-
"in_channels":
|
26 |
"layers_per_block": 2,
|
27 |
"mid_block_scale_factor": 1,
|
28 |
"mid_block_type": "UNetMidBlock2DCrossAttn",
|
@@ -31,8 +33,12 @@
|
|
31 |
"num_class_embeds": null,
|
32 |
"only_cross_attention": false,
|
33 |
"out_channels": 4,
|
|
|
34 |
"resnet_time_scale_shift": "default",
|
35 |
"sample_size": 64,
|
|
|
|
|
|
|
36 |
"up_block_types": [
|
37 |
"UpBlock2D",
|
38 |
"CrossAttnUpBlock2D",
|
|
|
1 |
{
|
2 |
"_class_name": "UNet2DConditionModel",
|
3 |
+
"_diffusers_version": "0.13.1",
|
4 |
"act_fn": "silu",
|
5 |
"attention_head_dim": 8,
|
6 |
"block_out_channels": [
|
|
|
11 |
],
|
12 |
"center_input_sample": false,
|
13 |
"class_embed_type": null,
|
14 |
+
"conv_in_kernel": 3,
|
15 |
+
"conv_out_kernel": 3,
|
16 |
"cross_attention_dim": 768,
|
17 |
"down_block_types": [
|
18 |
"CrossAttnDownBlock2D",
|
|
|
24 |
"dual_cross_attention": false,
|
25 |
"flip_sin_to_cos": true,
|
26 |
"freq_shift": 0,
|
27 |
+
"in_channels": 9,
|
28 |
"layers_per_block": 2,
|
29 |
"mid_block_scale_factor": 1,
|
30 |
"mid_block_type": "UNetMidBlock2DCrossAttn",
|
|
|
33 |
"num_class_embeds": null,
|
34 |
"only_cross_attention": false,
|
35 |
"out_channels": 4,
|
36 |
+
"projection_class_embeddings_input_dim": null,
|
37 |
"resnet_time_scale_shift": "default",
|
38 |
"sample_size": 64,
|
39 |
+
"time_cond_proj_dim": null,
|
40 |
+
"time_embedding_type": "positional",
|
41 |
+
"timestep_post_act": null,
|
42 |
"up_block_types": [
|
43 |
"UpBlock2D",
|
44 |
"CrossAttnUpBlock2D",
|
unet/diffusion_pytorch_model.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1eddb51aa144f2d93092eda46985d01983a8ed2578d341c610f9ade6d346c267
|
3 |
+
size 3438423973
|
vae/config.json
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
{
|
2 |
"_class_name": "AutoencoderKL",
|
3 |
-
"_diffusers_version": "0.13.
|
4 |
"act_fn": "silu",
|
5 |
"block_out_channels": [
|
6 |
128,
|
|
|
1 |
{
|
2 |
"_class_name": "AutoencoderKL",
|
3 |
+
"_diffusers_version": "0.13.1",
|
4 |
"act_fn": "silu",
|
5 |
"block_out_channels": [
|
6 |
128,
|