tlwu commited on
Commit
0987186
β€’
1 Parent(s): c31995d

models from Olive

Browse files
model_index.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "ORTStableDiffusionXLPipeline",
3
+ "_diffusers_version": "0.24.0",
4
+ "_name_or_path": "stabilityai/sdxl-turbo",
5
+ "feature_extractor": [
6
+ null,
7
+ null
8
+ ],
9
+ "force_zeros_for_empty_prompt": true,
10
+ "image_encoder": [
11
+ null,
12
+ null
13
+ ],
14
+ "scheduler": [
15
+ "diffusers",
16
+ "EulerAncestralDiscreteScheduler"
17
+ ],
18
+ "text_encoder": [
19
+ "diffusers",
20
+ "OnnxRuntimeModel"
21
+ ],
22
+ "text_encoder_2": [
23
+ "diffusers",
24
+ "OnnxRuntimeModel"
25
+ ],
26
+ "tokenizer": [
27
+ "transformers",
28
+ "CLIPTokenizer"
29
+ ],
30
+ "tokenizer_2": [
31
+ "transformers",
32
+ "CLIPTokenizer"
33
+ ],
34
+ "unet": [
35
+ "diffusers",
36
+ "OnnxRuntimeModel"
37
+ ],
38
+ "vae_decoder": [
39
+ "diffusers",
40
+ "OnnxRuntimeModel"
41
+ ],
42
+ "vae_encoder": [
43
+ "diffusers",
44
+ "OnnxRuntimeModel"
45
+ ]
46
+ }
scheduler/scheduler_config.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "EulerAncestralDiscreteScheduler",
3
+ "_diffusers_version": "0.24.0",
4
+ "beta_end": 0.012,
5
+ "beta_schedule": "scaled_linear",
6
+ "beta_start": 0.00085,
7
+ "clip_sample": false,
8
+ "interpolation_type": "linear",
9
+ "num_train_timesteps": 1000,
10
+ "prediction_type": "epsilon",
11
+ "sample_max_value": 1.0,
12
+ "set_alpha_to_one": false,
13
+ "skip_prk_steps": true,
14
+ "steps_offset": 1,
15
+ "timestep_spacing": "trailing",
16
+ "trained_betas": null
17
+ }
{ORT_CUDA/sdxl-turbo/engine/clip.ort_cuda.fp16 β†’ text_encoder}/model.onnx RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c444caf5de76cdfc1d631b8df96c8206d90300e44c25c67b9f730dc411917ea2
3
- size 246165433
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:92afb291fb3760769372d0e6660c243b018dff319bd7820dfb4ba033ec53c8db
3
+ size 246178359
text_encoder_2/model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ef9d6ed1f90407421056518f4308da5ecbd4de069c173700575cd4140da86ea
3
+ size 1389427378
{ORT_CUDA/sdxl-turbo/engine/clip2.ort_cuda.fp16 β†’ text_encoder_2}/model.onnx.data RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:df8a30895d8eecdc82b8fc36bfd5f18879b6bfaba6eb8ade8b05d04aa6dfdad5
3
- size 1389319680
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0c29d6ace4f348ccbcd302ab0d858994e64240a5b54d7c5ef431a88f2f287e2c
3
+ size 2778639360
tokenizer/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer/special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|startoftext|>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<|endoftext|>",
18
+ "lstrip": false,
19
+ "normalized": true,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<|endoftext|>",
25
+ "lstrip": false,
26
+ "normalized": true,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer/tokenizer_config.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "49406": {
5
+ "content": "<|startoftext|>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "49407": {
13
+ "content": "<|endoftext|>",
14
+ "lstrip": false,
15
+ "normalized": true,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ }
20
+ },
21
+ "bos_token": "<|startoftext|>",
22
+ "clean_up_tokenization_spaces": true,
23
+ "do_lower_case": true,
24
+ "eos_token": "<|endoftext|>",
25
+ "errors": "replace",
26
+ "model_max_length": 77,
27
+ "pad_token": "<|endoftext|>",
28
+ "tokenizer_class": "CLIPTokenizer",
29
+ "unk_token": "<|endoftext|>"
30
+ }
tokenizer/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_2/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_2/special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|startoftext|>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "!",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<|endoftext|>",
25
+ "lstrip": false,
26
+ "normalized": true,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer_2/tokenizer_config.json ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "!",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "49406": {
13
+ "content": "<|startoftext|>",
14
+ "lstrip": false,
15
+ "normalized": true,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "49407": {
21
+ "content": "<|endoftext|>",
22
+ "lstrip": false,
23
+ "normalized": true,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ }
28
+ },
29
+ "bos_token": "<|startoftext|>",
30
+ "clean_up_tokenization_spaces": true,
31
+ "do_lower_case": true,
32
+ "eos_token": "<|endoftext|>",
33
+ "errors": "replace",
34
+ "model_max_length": 77,
35
+ "pad_token": "!",
36
+ "tokenizer_class": "CLIPTokenizer",
37
+ "unk_token": "<|endoftext|>"
38
+ }
tokenizer_2/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
{ORT_CUDA/sdxl-turbo/engine/clip2.ort_cuda.fp16 β†’ unet}/model.onnx RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e22b14a375aae73df2df5d8b008e1a5d5584df5ac6c37e21f60d06f2c4706407
3
- size 134410
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d06947552e0581049be42679f6da402d63888a72134dc1d274014a5a3277ea8b
3
+ size 736952
{ORT_CUDA/sdxl-turbo/engine/unetxl.ort_cuda.fp16 β†’ unet}/model.onnx.data RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e84089ea857d55d40d32306f08aa53aa495dacf877d49eff47193741e3d416c0
3
  size 5135092480
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ccf502a0c2e454086a4fb08f71e62bbb752a06e1063b99d0352ba42695b1f9ae
3
  size 5135092480
{ORT_CUDA/sdxl-turbo/engine/vae.ort_cuda.fp16 β†’ vae_decoder}/model.onnx RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5f475291593170ccb03a4916bf6a92048750471d0c67d55e5424ff93f01ab1f4
3
- size 99070385
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7987d20deef6934d7d30bd7486da698940765d5383a5ca009f0aad74c737ec70
3
+ size 99072671
{ORT_CUDA/sdxl-turbo/engine/unetxl.ort_cuda.fp16 β†’ vae_encoder}/model.onnx RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:127ce7791f6b46bd2ea113e6dc68acb6d1829e055bd7f7c11d37019c1fbdc5c7
3
- size 704979
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a56f9f96a763bc9995d032d6e03159cf433569047488e7594f0b15066cbed44f
3
+ size 68412330