Upload Animistatics to Hugging Face.
Browse files- .gitattributes +3 -0
- Animistatics-fp16.ckpt +3 -0
- Animistatics-fp16.safetensors +3 -0
- Animistatics-fp32.ckpt +3 -0
- Animistatics-fp32.safetensors +3 -0
- Animistatics.ckpt +3 -0
- Animistatics.vae.pt +3 -0
- Animistatics.yaml +70 -0
- README.md +83 -0
- assets/boy.png +3 -0
- assets/city.png +3 -0
- assets/girl.png +3 -0
- assets/header.png +0 -0
- feature_extractor/preprocessor_config.json +28 -0
- model_index.json +33 -0
- safety_checker/config.json +181 -0
- safety_checker/pytorch_model.bin +3 -0
- scheduler/scheduler_config.json +14 -0
- text_encoder/config.json +25 -0
- text_encoder/pytorch_model.bin +3 -0
- tokenizer/merges.txt +0 -0
- tokenizer/special_tokens_map.json +24 -0
- tokenizer/tokenizer_config.json +34 -0
- tokenizer/vocab.json +0 -0
- unet/config.json +44 -0
- unet/diffusion_pytorch_model.bin +3 -0
- vae/config.json +30 -0
- vae/diffusion_pytorch_model.bin +3 -0
.gitattributes
CHANGED
@@ -32,3 +32,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
32 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
32 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
35 |
+
assets/boy.png filter=lfs diff=lfs merge=lfs -text
|
36 |
+
assets/city.png filter=lfs diff=lfs merge=lfs -text
|
37 |
+
assets/girl.png filter=lfs diff=lfs merge=lfs -text
|
Animistatics-fp16.ckpt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c93153573f0a07ce0b7bd84ec1cd0fc5c9ef9682f723c3356406d6c02a12f5fa
|
3 |
+
size 2132874321
|
Animistatics-fp16.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:89e8d8e7199bb5dc850deac4f102dc5c6b383e7c02de3d95a40b93efe165a2ba
|
3 |
+
size 2132625431
|
Animistatics-fp32.ckpt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:779d6586784da8a7a5efdf4960102c2e57d3d7cc93ddbfd10204a15b593e3adf
|
3 |
+
size 4265344913
|
Animistatics-fp32.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:29bd32be9d2a20079d6f7c71e70358e967e5621229daa76c7fe9ec029e7e6631
|
3 |
+
size 4265096535
|
Animistatics.ckpt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c31a3f94c0c206597e2e72c480104866755beba54dfc814bf74427f805e65951
|
3 |
+
size 4265339120
|
Animistatics.vae.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f921fb3f29891d2a77a6571e56b8b5052420d2884129517a333c60b1b4816cdf
|
3 |
+
size 822802803
|
Animistatics.yaml
ADDED
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
model:
|
2 |
+
base_learning_rate: 1.0e-04
|
3 |
+
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
4 |
+
params:
|
5 |
+
linear_start: 0.00085
|
6 |
+
linear_end: 0.0120
|
7 |
+
num_timesteps_cond: 1
|
8 |
+
log_every_t: 200
|
9 |
+
timesteps: 1000
|
10 |
+
first_stage_key: "jpg"
|
11 |
+
cond_stage_key: "txt"
|
12 |
+
image_size: 64
|
13 |
+
channels: 4
|
14 |
+
cond_stage_trainable: false # Note: different from the one we trained before
|
15 |
+
conditioning_key: crossattn
|
16 |
+
monitor: val/loss_simple_ema
|
17 |
+
scale_factor: 0.18215
|
18 |
+
use_ema: False
|
19 |
+
|
20 |
+
scheduler_config: # 10000 warmup steps
|
21 |
+
target: ldm.lr_scheduler.LambdaLinearScheduler
|
22 |
+
params:
|
23 |
+
warm_up_steps: [ 10000 ]
|
24 |
+
cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
|
25 |
+
f_start: [ 1.e-6 ]
|
26 |
+
f_max: [ 1. ]
|
27 |
+
f_min: [ 1. ]
|
28 |
+
|
29 |
+
unet_config:
|
30 |
+
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
31 |
+
params:
|
32 |
+
image_size: 32 # unused
|
33 |
+
in_channels: 4
|
34 |
+
out_channels: 4
|
35 |
+
model_channels: 320
|
36 |
+
attention_resolutions: [ 4, 2, 1 ]
|
37 |
+
num_res_blocks: 2
|
38 |
+
channel_mult: [ 1, 2, 4, 4 ]
|
39 |
+
num_heads: 8
|
40 |
+
use_spatial_transformer: True
|
41 |
+
transformer_depth: 1
|
42 |
+
context_dim: 768
|
43 |
+
use_checkpoint: True
|
44 |
+
legacy: False
|
45 |
+
|
46 |
+
first_stage_config:
|
47 |
+
target: ldm.models.autoencoder.AutoencoderKL
|
48 |
+
params:
|
49 |
+
embed_dim: 4
|
50 |
+
monitor: val/rec_loss
|
51 |
+
ddconfig:
|
52 |
+
double_z: true
|
53 |
+
z_channels: 4
|
54 |
+
resolution: 512
|
55 |
+
in_channels: 3
|
56 |
+
out_ch: 3
|
57 |
+
ch: 128
|
58 |
+
ch_mult:
|
59 |
+
- 1
|
60 |
+
- 2
|
61 |
+
- 4
|
62 |
+
- 4
|
63 |
+
num_res_blocks: 2
|
64 |
+
attn_resolutions: []
|
65 |
+
dropout: 0.0
|
66 |
+
lossconfig:
|
67 |
+
target: torch.nn.Identity
|
68 |
+
|
69 |
+
cond_stage_config:
|
70 |
+
target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
|
README.md
CHANGED
@@ -1,3 +1,86 @@
|
|
1 |
---
|
|
|
|
|
2 |
license: creativeml-openrail-m
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
---
|
2 |
+
language:
|
3 |
+
- en
|
4 |
license: creativeml-openrail-m
|
5 |
+
tags:
|
6 |
+
- stable-diffusion
|
7 |
+
- stable-diffusion-diffusers
|
8 |
+
- text-to-image
|
9 |
+
- diffusers
|
10 |
+
inference: true
|
11 |
---
|
12 |
+
|
13 |
+
![Header](https://huggingface.co/Maseshi/Animistatics/resolve/main/assets/header.png)
|
14 |
+
|
15 |
+
# Animistatics
|
16 |
+
|
17 |
+
Welcome to Animistatics - a latent diffusion model for weebs. This model is intended to produce high-quality, highly detailed anime style with just a few prompts. Like other anime-style Stable Diffusion models, it also supports danbooru tags to generate images.
|
18 |
+
|
19 |
+
e.g. **_girl, cafe, plants, coffee, lighting, steam, blue eyes, brown hair_**
|
20 |
+
|
21 |
+
## Gradio
|
22 |
+
|
23 |
+
We support a [Gradio](https://github.com/gradio-app/gradio) Web UI to run Animistatics:
|
24 |
+
|
25 |
+
[![Open In Spaces](https://camo.githubusercontent.com/00380c35e60d6b04be65d3d94a58332be5cc93779f630bcdfc18ab9a3a7d3388/68747470733a2f2f696d672e736869656c64732e696f2f62616467652f25463025394625413425393725323048756767696e67253230466163652d5370616365732d626c7565)](https://huggingface.co/spaces/Maseshi/Animistatics)
|
26 |
+
|
27 |
+
## Google Colab
|
28 |
+
|
29 |
+
We support a [Google Colab](https://github.com/gradio-app/gradio) to run Animistatics:
|
30 |
+
|
31 |
+
[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1Qf7KGx7wCQ6XCs4ai_2riq68ip7mZw_t?usp=sharing)
|
32 |
+
|
33 |
+
## 🧨 Diffusers
|
34 |
+
|
35 |
+
This model can be used just like any other Stable Diffusion model. For more information,
|
36 |
+
please have a look at the [Stable Diffusion](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion).
|
37 |
+
|
38 |
+
You can also export the model to [ONNX](https://huggingface.co/docs/diffusers/optimization/onnx), [MPS](https://huggingface.co/docs/diffusers/optimization/mps) and/or [FLAX/JAX]().
|
39 |
+
|
40 |
+
```python
|
41 |
+
from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler
|
42 |
+
import torch
|
43 |
+
|
44 |
+
repo_id = "Maseshi/Animistatics"
|
45 |
+
pipe = DiffusionPipeline.from_pretrained(repo_id, torch_dtype=torch.float16)
|
46 |
+
|
47 |
+
pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
|
48 |
+
pipe = pipe.to("cuda")
|
49 |
+
|
50 |
+
prompt = "girl, cafe, plants, coffee, lighting, steam, blue eyes, brown hair"
|
51 |
+
image = pipe(prompt, num_inference_steps=25).images[0]
|
52 |
+
image.save("girl.png")
|
53 |
+
```
|
54 |
+
|
55 |
+
## Examples
|
56 |
+
|
57 |
+
Below are some examples of images generated using this model:
|
58 |
+
|
59 |
+
**Anime Girl:**
|
60 |
+
![Anime Girl](https://huggingface.co/Maseshi/Animistatics/resolve/main/assets/girl.png)
|
61 |
+
```
|
62 |
+
girl, cafe, plants, coffee, lighting, steam, blue eyes, brown hair
|
63 |
+
Steps: 50, Sampler: DDIM, CFG scale: 12
|
64 |
+
```
|
65 |
+
**Anime Boy:**
|
66 |
+
![Anime Boy](https://huggingface.co/Maseshi/Animistatics/resolve/main/assets/boy.png)
|
67 |
+
```
|
68 |
+
boy, blonde hair, blue eyes, colorful, cumulonimbus clouds, lighting, medium hair, plants, city, hoodie, cool
|
69 |
+
Steps: 50, Sampler: DDIM, CFG scale: 12
|
70 |
+
```
|
71 |
+
**City:**
|
72 |
+
![City](https://huggingface.co/Maseshi/Animistatics/resolve/main/assets/city.png)
|
73 |
+
```
|
74 |
+
cityscape, concept art, sun shining through clouds, crepuscular rays, trending on art station, 8k
|
75 |
+
Steps: 50, Sampler: DDIM, CFG scale: 12
|
76 |
+
```
|
77 |
+
|
78 |
+
## License
|
79 |
+
|
80 |
+
This model is open access and available to all, with a CreativeML OpenRAIL-M license further specifying rights and usage.
|
81 |
+
The CreativeML OpenRAIL License specifies:
|
82 |
+
|
83 |
+
1. You can't use the model to deliberately produce nor share illegal or harmful outputs or content
|
84 |
+
2. The authors claims no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in the license
|
85 |
+
3. You may re-distribute the weights and use the model commercially and/or as a service. If you do, please be aware you have to include the same use restrictions as the ones in the license and share a copy of the CreativeML OpenRAIL-M to all your users (please read the license entirely and carefully)
|
86 |
+
[Please read the full license here](https://huggingface.co/spaces/CompVis/stable-diffusion-license)
|
assets/boy.png
ADDED
Git LFS Details
|
assets/city.png
ADDED
Git LFS Details
|
assets/girl.png
ADDED
Git LFS Details
|
assets/header.png
ADDED
feature_extractor/preprocessor_config.json
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"crop_size": {
|
3 |
+
"height": 224,
|
4 |
+
"width": 224
|
5 |
+
},
|
6 |
+
"do_center_crop": true,
|
7 |
+
"do_convert_rgb": true,
|
8 |
+
"do_normalize": true,
|
9 |
+
"do_rescale": true,
|
10 |
+
"do_resize": true,
|
11 |
+
"feature_extractor_type": "CLIPFeatureExtractor",
|
12 |
+
"image_mean": [
|
13 |
+
0.48145466,
|
14 |
+
0.4578275,
|
15 |
+
0.40821073
|
16 |
+
],
|
17 |
+
"image_processor_type": "CLIPFeatureExtractor",
|
18 |
+
"image_std": [
|
19 |
+
0.26862954,
|
20 |
+
0.26130258,
|
21 |
+
0.27577711
|
22 |
+
],
|
23 |
+
"resample": 3,
|
24 |
+
"rescale_factor": 0.00392156862745098,
|
25 |
+
"size": {
|
26 |
+
"shortest_edge": 224
|
27 |
+
}
|
28 |
+
}
|
model_index.json
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_class_name": "StableDiffusionPipeline",
|
3 |
+
"_diffusers_version": "0.12.0.dev0",
|
4 |
+
"feature_extractor": [
|
5 |
+
"transformers",
|
6 |
+
"CLIPFeatureExtractor"
|
7 |
+
],
|
8 |
+
"requires_safety_checker": true,
|
9 |
+
"safety_checker": [
|
10 |
+
"stable_diffusion",
|
11 |
+
"StableDiffusionSafetyChecker"
|
12 |
+
],
|
13 |
+
"scheduler": [
|
14 |
+
"diffusers",
|
15 |
+
"PNDMScheduler"
|
16 |
+
],
|
17 |
+
"text_encoder": [
|
18 |
+
"transformers",
|
19 |
+
"CLIPTextModel"
|
20 |
+
],
|
21 |
+
"tokenizer": [
|
22 |
+
"transformers",
|
23 |
+
"CLIPTokenizer"
|
24 |
+
],
|
25 |
+
"unet": [
|
26 |
+
"diffusers",
|
27 |
+
"UNet2DConditionModel"
|
28 |
+
],
|
29 |
+
"vae": [
|
30 |
+
"diffusers",
|
31 |
+
"AutoencoderKL"
|
32 |
+
]
|
33 |
+
}
|
safety_checker/config.json
ADDED
@@ -0,0 +1,181 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_commit_hash": "cb41f3a270d63d454d385fc2e4f571c487c253c5",
|
3 |
+
"_name_or_path": "CompVis/stable-diffusion-safety-checker",
|
4 |
+
"architectures": [
|
5 |
+
"StableDiffusionSafetyChecker"
|
6 |
+
],
|
7 |
+
"initializer_factor": 1.0,
|
8 |
+
"logit_scale_init_value": 2.6592,
|
9 |
+
"model_type": "clip",
|
10 |
+
"projection_dim": 768,
|
11 |
+
"text_config": {
|
12 |
+
"_name_or_path": "",
|
13 |
+
"add_cross_attention": false,
|
14 |
+
"architectures": null,
|
15 |
+
"attention_dropout": 0.0,
|
16 |
+
"bad_words_ids": null,
|
17 |
+
"begin_suppress_tokens": null,
|
18 |
+
"bos_token_id": 0,
|
19 |
+
"chunk_size_feed_forward": 0,
|
20 |
+
"cross_attention_hidden_size": null,
|
21 |
+
"decoder_start_token_id": null,
|
22 |
+
"diversity_penalty": 0.0,
|
23 |
+
"do_sample": false,
|
24 |
+
"dropout": 0.0,
|
25 |
+
"early_stopping": false,
|
26 |
+
"encoder_no_repeat_ngram_size": 0,
|
27 |
+
"eos_token_id": 2,
|
28 |
+
"exponential_decay_length_penalty": null,
|
29 |
+
"finetuning_task": null,
|
30 |
+
"forced_bos_token_id": null,
|
31 |
+
"forced_eos_token_id": null,
|
32 |
+
"hidden_act": "quick_gelu",
|
33 |
+
"hidden_size": 768,
|
34 |
+
"id2label": {
|
35 |
+
"0": "LABEL_0",
|
36 |
+
"1": "LABEL_1"
|
37 |
+
},
|
38 |
+
"initializer_factor": 1.0,
|
39 |
+
"initializer_range": 0.02,
|
40 |
+
"intermediate_size": 3072,
|
41 |
+
"is_decoder": false,
|
42 |
+
"is_encoder_decoder": false,
|
43 |
+
"label2id": {
|
44 |
+
"LABEL_0": 0,
|
45 |
+
"LABEL_1": 1
|
46 |
+
},
|
47 |
+
"layer_norm_eps": 1e-05,
|
48 |
+
"length_penalty": 1.0,
|
49 |
+
"max_length": 20,
|
50 |
+
"max_position_embeddings": 77,
|
51 |
+
"min_length": 0,
|
52 |
+
"model_type": "clip_text_model",
|
53 |
+
"no_repeat_ngram_size": 0,
|
54 |
+
"num_attention_heads": 12,
|
55 |
+
"num_beam_groups": 1,
|
56 |
+
"num_beams": 1,
|
57 |
+
"num_hidden_layers": 12,
|
58 |
+
"num_return_sequences": 1,
|
59 |
+
"output_attentions": false,
|
60 |
+
"output_hidden_states": false,
|
61 |
+
"output_scores": false,
|
62 |
+
"pad_token_id": 1,
|
63 |
+
"prefix": null,
|
64 |
+
"problem_type": null,
|
65 |
+
"projection_dim": 512,
|
66 |
+
"pruned_heads": {},
|
67 |
+
"remove_invalid_values": false,
|
68 |
+
"repetition_penalty": 1.0,
|
69 |
+
"return_dict": true,
|
70 |
+
"return_dict_in_generate": false,
|
71 |
+
"sep_token_id": null,
|
72 |
+
"suppress_tokens": null,
|
73 |
+
"task_specific_params": null,
|
74 |
+
"temperature": 1.0,
|
75 |
+
"tf_legacy_loss": false,
|
76 |
+
"tie_encoder_decoder": false,
|
77 |
+
"tie_word_embeddings": true,
|
78 |
+
"tokenizer_class": null,
|
79 |
+
"top_k": 50,
|
80 |
+
"top_p": 1.0,
|
81 |
+
"torch_dtype": null,
|
82 |
+
"torchscript": false,
|
83 |
+
"transformers_version": "4.26.0.dev0",
|
84 |
+
"typical_p": 1.0,
|
85 |
+
"use_bfloat16": false,
|
86 |
+
"vocab_size": 49408
|
87 |
+
},
|
88 |
+
"text_config_dict": {
|
89 |
+
"hidden_size": 768,
|
90 |
+
"intermediate_size": 3072,
|
91 |
+
"num_attention_heads": 12,
|
92 |
+
"num_hidden_layers": 12
|
93 |
+
},
|
94 |
+
"torch_dtype": "float32",
|
95 |
+
"transformers_version": null,
|
96 |
+
"vision_config": {
|
97 |
+
"_name_or_path": "",
|
98 |
+
"add_cross_attention": false,
|
99 |
+
"architectures": null,
|
100 |
+
"attention_dropout": 0.0,
|
101 |
+
"bad_words_ids": null,
|
102 |
+
"begin_suppress_tokens": null,
|
103 |
+
"bos_token_id": null,
|
104 |
+
"chunk_size_feed_forward": 0,
|
105 |
+
"cross_attention_hidden_size": null,
|
106 |
+
"decoder_start_token_id": null,
|
107 |
+
"diversity_penalty": 0.0,
|
108 |
+
"do_sample": false,
|
109 |
+
"dropout": 0.0,
|
110 |
+
"early_stopping": false,
|
111 |
+
"encoder_no_repeat_ngram_size": 0,
|
112 |
+
"eos_token_id": null,
|
113 |
+
"exponential_decay_length_penalty": null,
|
114 |
+
"finetuning_task": null,
|
115 |
+
"forced_bos_token_id": null,
|
116 |
+
"forced_eos_token_id": null,
|
117 |
+
"hidden_act": "quick_gelu",
|
118 |
+
"hidden_size": 1024,
|
119 |
+
"id2label": {
|
120 |
+
"0": "LABEL_0",
|
121 |
+
"1": "LABEL_1"
|
122 |
+
},
|
123 |
+
"image_size": 224,
|
124 |
+
"initializer_factor": 1.0,
|
125 |
+
"initializer_range": 0.02,
|
126 |
+
"intermediate_size": 4096,
|
127 |
+
"is_decoder": false,
|
128 |
+
"is_encoder_decoder": false,
|
129 |
+
"label2id": {
|
130 |
+
"LABEL_0": 0,
|
131 |
+
"LABEL_1": 1
|
132 |
+
},
|
133 |
+
"layer_norm_eps": 1e-05,
|
134 |
+
"length_penalty": 1.0,
|
135 |
+
"max_length": 20,
|
136 |
+
"min_length": 0,
|
137 |
+
"model_type": "clip_vision_model",
|
138 |
+
"no_repeat_ngram_size": 0,
|
139 |
+
"num_attention_heads": 16,
|
140 |
+
"num_beam_groups": 1,
|
141 |
+
"num_beams": 1,
|
142 |
+
"num_channels": 3,
|
143 |
+
"num_hidden_layers": 24,
|
144 |
+
"num_return_sequences": 1,
|
145 |
+
"output_attentions": false,
|
146 |
+
"output_hidden_states": false,
|
147 |
+
"output_scores": false,
|
148 |
+
"pad_token_id": null,
|
149 |
+
"patch_size": 14,
|
150 |
+
"prefix": null,
|
151 |
+
"problem_type": null,
|
152 |
+
"projection_dim": 512,
|
153 |
+
"pruned_heads": {},
|
154 |
+
"remove_invalid_values": false,
|
155 |
+
"repetition_penalty": 1.0,
|
156 |
+
"return_dict": true,
|
157 |
+
"return_dict_in_generate": false,
|
158 |
+
"sep_token_id": null,
|
159 |
+
"suppress_tokens": null,
|
160 |
+
"task_specific_params": null,
|
161 |
+
"temperature": 1.0,
|
162 |
+
"tf_legacy_loss": false,
|
163 |
+
"tie_encoder_decoder": false,
|
164 |
+
"tie_word_embeddings": true,
|
165 |
+
"tokenizer_class": null,
|
166 |
+
"top_k": 50,
|
167 |
+
"top_p": 1.0,
|
168 |
+
"torch_dtype": null,
|
169 |
+
"torchscript": false,
|
170 |
+
"transformers_version": "4.26.0.dev0",
|
171 |
+
"typical_p": 1.0,
|
172 |
+
"use_bfloat16": false
|
173 |
+
},
|
174 |
+
"vision_config_dict": {
|
175 |
+
"hidden_size": 1024,
|
176 |
+
"intermediate_size": 4096,
|
177 |
+
"num_attention_heads": 16,
|
178 |
+
"num_hidden_layers": 24,
|
179 |
+
"patch_size": 14
|
180 |
+
}
|
181 |
+
}
|
safety_checker/pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:16d28f2b37109f222cdc33620fdd262102ac32112be0352a7f77e9614b35a394
|
3 |
+
size 1216064769
|
scheduler/scheduler_config.json
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_class_name": "PNDMScheduler",
|
3 |
+
"_diffusers_version": "0.12.0.dev0",
|
4 |
+
"beta_end": 0.012,
|
5 |
+
"beta_schedule": "scaled_linear",
|
6 |
+
"beta_start": 0.00085,
|
7 |
+
"clip_sample": false,
|
8 |
+
"num_train_timesteps": 1000,
|
9 |
+
"prediction_type": "epsilon",
|
10 |
+
"set_alpha_to_one": false,
|
11 |
+
"skip_prk_steps": true,
|
12 |
+
"steps_offset": 1,
|
13 |
+
"trained_betas": null
|
14 |
+
}
|
text_encoder/config.json
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "openai/clip-vit-large-patch14",
|
3 |
+
"architectures": [
|
4 |
+
"CLIPTextModel"
|
5 |
+
],
|
6 |
+
"attention_dropout": 0.0,
|
7 |
+
"bos_token_id": 0,
|
8 |
+
"dropout": 0.0,
|
9 |
+
"eos_token_id": 2,
|
10 |
+
"hidden_act": "quick_gelu",
|
11 |
+
"hidden_size": 768,
|
12 |
+
"initializer_factor": 1.0,
|
13 |
+
"initializer_range": 0.02,
|
14 |
+
"intermediate_size": 3072,
|
15 |
+
"layer_norm_eps": 1e-05,
|
16 |
+
"max_position_embeddings": 77,
|
17 |
+
"model_type": "clip_text_model",
|
18 |
+
"num_attention_heads": 12,
|
19 |
+
"num_hidden_layers": 12,
|
20 |
+
"pad_token_id": 1,
|
21 |
+
"projection_dim": 768,
|
22 |
+
"torch_dtype": "float32",
|
23 |
+
"transformers_version": "4.26.0.dev0",
|
24 |
+
"vocab_size": 49408
|
25 |
+
}
|
text_encoder/pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5a375c8d147585ec7223d47e64b1cfedb63069f42c949eb85237413e5bb4f8b6
|
3 |
+
size 492307041
|
tokenizer/merges.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer/special_tokens_map.json
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": {
|
3 |
+
"content": "<|startoftext|>",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": true,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
},
|
9 |
+
"eos_token": {
|
10 |
+
"content": "<|endoftext|>",
|
11 |
+
"lstrip": false,
|
12 |
+
"normalized": true,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
},
|
16 |
+
"pad_token": "<|endoftext|>",
|
17 |
+
"unk_token": {
|
18 |
+
"content": "<|endoftext|>",
|
19 |
+
"lstrip": false,
|
20 |
+
"normalized": true,
|
21 |
+
"rstrip": false,
|
22 |
+
"single_word": false
|
23 |
+
}
|
24 |
+
}
|
tokenizer/tokenizer_config.json
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_prefix_space": false,
|
3 |
+
"bos_token": {
|
4 |
+
"__type": "AddedToken",
|
5 |
+
"content": "<|startoftext|>",
|
6 |
+
"lstrip": false,
|
7 |
+
"normalized": true,
|
8 |
+
"rstrip": false,
|
9 |
+
"single_word": false
|
10 |
+
},
|
11 |
+
"do_lower_case": true,
|
12 |
+
"eos_token": {
|
13 |
+
"__type": "AddedToken",
|
14 |
+
"content": "<|endoftext|>",
|
15 |
+
"lstrip": false,
|
16 |
+
"normalized": true,
|
17 |
+
"rstrip": false,
|
18 |
+
"single_word": false
|
19 |
+
},
|
20 |
+
"errors": "replace",
|
21 |
+
"model_max_length": 77,
|
22 |
+
"name_or_path": "openai/clip-vit-large-patch14",
|
23 |
+
"pad_token": "<|endoftext|>",
|
24 |
+
"special_tokens_map_file": "./special_tokens_map.json",
|
25 |
+
"tokenizer_class": "CLIPTokenizer",
|
26 |
+
"unk_token": {
|
27 |
+
"__type": "AddedToken",
|
28 |
+
"content": "<|endoftext|>",
|
29 |
+
"lstrip": false,
|
30 |
+
"normalized": true,
|
31 |
+
"rstrip": false,
|
32 |
+
"single_word": false
|
33 |
+
}
|
34 |
+
}
|
tokenizer/vocab.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
unet/config.json
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_class_name": "UNet2DConditionModel",
|
3 |
+
"_diffusers_version": "0.12.0.dev0",
|
4 |
+
"act_fn": "silu",
|
5 |
+
"attention_head_dim": 8,
|
6 |
+
"block_out_channels": [
|
7 |
+
320,
|
8 |
+
640,
|
9 |
+
1280,
|
10 |
+
1280
|
11 |
+
],
|
12 |
+
"center_input_sample": false,
|
13 |
+
"class_embed_type": null,
|
14 |
+
"cross_attention_dim": 768,
|
15 |
+
"down_block_types": [
|
16 |
+
"CrossAttnDownBlock2D",
|
17 |
+
"CrossAttnDownBlock2D",
|
18 |
+
"CrossAttnDownBlock2D",
|
19 |
+
"DownBlock2D"
|
20 |
+
],
|
21 |
+
"downsample_padding": 1,
|
22 |
+
"dual_cross_attention": false,
|
23 |
+
"flip_sin_to_cos": true,
|
24 |
+
"freq_shift": 0,
|
25 |
+
"in_channels": 4,
|
26 |
+
"layers_per_block": 2,
|
27 |
+
"mid_block_scale_factor": 1,
|
28 |
+
"mid_block_type": "UNetMidBlock2DCrossAttn",
|
29 |
+
"norm_eps": 1e-05,
|
30 |
+
"norm_num_groups": 32,
|
31 |
+
"num_class_embeds": null,
|
32 |
+
"only_cross_attention": false,
|
33 |
+
"out_channels": 4,
|
34 |
+
"resnet_time_scale_shift": "default",
|
35 |
+
"sample_size": 64,
|
36 |
+
"up_block_types": [
|
37 |
+
"UpBlock2D",
|
38 |
+
"CrossAttnUpBlock2D",
|
39 |
+
"CrossAttnUpBlock2D",
|
40 |
+
"CrossAttnUpBlock2D"
|
41 |
+
],
|
42 |
+
"upcast_attention": false,
|
43 |
+
"use_linear_projection": false
|
44 |
+
}
|
unet/diffusion_pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3a2c73c5c7c2a31252f3a450689433a8f8acd24cc533d68a8d043f99d0a0312b
|
3 |
+
size 3438366373
|
vae/config.json
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_class_name": "AutoencoderKL",
|
3 |
+
"_diffusers_version": "0.12.0.dev0",
|
4 |
+
"act_fn": "silu",
|
5 |
+
"block_out_channels": [
|
6 |
+
128,
|
7 |
+
256,
|
8 |
+
512,
|
9 |
+
512
|
10 |
+
],
|
11 |
+
"down_block_types": [
|
12 |
+
"DownEncoderBlock2D",
|
13 |
+
"DownEncoderBlock2D",
|
14 |
+
"DownEncoderBlock2D",
|
15 |
+
"DownEncoderBlock2D"
|
16 |
+
],
|
17 |
+
"in_channels": 3,
|
18 |
+
"latent_channels": 4,
|
19 |
+
"layers_per_block": 2,
|
20 |
+
"norm_num_groups": 32,
|
21 |
+
"out_channels": 3,
|
22 |
+
"sample_size": 512,
|
23 |
+
"scaling_factor": 0.18215,
|
24 |
+
"up_block_types": [
|
25 |
+
"UpDecoderBlock2D",
|
26 |
+
"UpDecoderBlock2D",
|
27 |
+
"UpDecoderBlock2D",
|
28 |
+
"UpDecoderBlock2D"
|
29 |
+
]
|
30 |
+
}
|
vae/diffusion_pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:39c39d36eb762d178c878bcdd743f75ae609c6e13848f0a69c574d32d7ed3ab3
|
3 |
+
size 334711857
|