upd preprocessor
Browse files- merges.txt +1 -1
- preprocessor_config.json +1 -0
- tokenizer.json +0 -0
- tokenizer_config.json +1 -1
- vocab.json +0 -0
merges.txt
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
#version: 0.2
|
2 |
i n
|
3 |
t h
|
4 |
a n
|
|
|
1 |
+
#version: 0.2 - Trained by `huggingface/tokenizers`
|
2 |
i n
|
3 |
t h
|
4 |
a n
|
preprocessor_config.json
CHANGED
@@ -14,6 +14,7 @@
|
|
14 |
0.26130258,
|
15 |
0.27577711
|
16 |
],
|
|
|
17 |
"resample": 3,
|
18 |
"size": 224
|
19 |
}
|
|
|
14 |
0.26130258,
|
15 |
0.27577711
|
16 |
],
|
17 |
+
"processor_class": "CLIPProcessor",
|
18 |
"resample": 3,
|
19 |
"size": 224
|
20 |
}
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"
|
|
|
1 |
+
{"unk_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "bos_token": {"content": "<|startoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "eos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "pad_token": "<|endoftext|>", "add_prefix_space": false, "errors": "replace", "do_lower_case": true, "name_or_path": "openai/clip-vit-base-patch32", "model_max_length": 77, "special_tokens_map_file": "/root/.cache/huggingface/transformers/18a566598f286c9139f88160c99f84eec492a26bd22738fa9cb44d5b7e0a5c76.cce1206abbad28826f000510f22f354e53e66a97f7c23745a7dfe27609cc07f5", "tokenizer_class": "CLIPTokenizer", "processor_class": "CLIPProcessor"}
|
vocab.json
CHANGED
The diff for this file is too large to render.
See raw diff
|
|