Add first version of large -> small distilled model
Browse files- .gitattributes +1 -0
- 1_Pooling/config.json +10 -0
- README.md +253 -3
- config.json +26 -0
- config_sentence_transformers.json +9 -0
- model.safetensors +3 -0
- modules.json +20 -0
- sentence_bert_config.json +4 -0
- special_tokens_map.json +51 -0
- tokenizer.json +3 -0
- tokenizer_config.json +55 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
1_Pooling/config.json
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"word_embedding_dimension": 384,
|
3 |
+
"pooling_mode_cls_token": false,
|
4 |
+
"pooling_mode_mean_tokens": true,
|
5 |
+
"pooling_mode_max_tokens": false,
|
6 |
+
"pooling_mode_mean_sqrt_len_tokens": false,
|
7 |
+
"pooling_mode_weightedmean_tokens": false,
|
8 |
+
"pooling_mode_lasttoken": false,
|
9 |
+
"include_prompt": true
|
10 |
+
}
|
README.md
CHANGED
@@ -1,3 +1,253 @@
|
|
1 |
-
---
|
2 |
-
|
3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
tags:
|
3 |
+
- Sentence Transformers
|
4 |
+
- sentence-similarity
|
5 |
+
- sentence-transformers
|
6 |
+
language:
|
7 |
+
- multilingual
|
8 |
+
- af
|
9 |
+
- am
|
10 |
+
- ar
|
11 |
+
- as
|
12 |
+
- az
|
13 |
+
- be
|
14 |
+
- bg
|
15 |
+
- bn
|
16 |
+
- br
|
17 |
+
- bs
|
18 |
+
- ca
|
19 |
+
- cs
|
20 |
+
- cy
|
21 |
+
- da
|
22 |
+
- de
|
23 |
+
- el
|
24 |
+
- en
|
25 |
+
- eo
|
26 |
+
- es
|
27 |
+
- et
|
28 |
+
- eu
|
29 |
+
- fa
|
30 |
+
- fi
|
31 |
+
- fr
|
32 |
+
- fy
|
33 |
+
- ga
|
34 |
+
- gd
|
35 |
+
- gl
|
36 |
+
- gu
|
37 |
+
- ha
|
38 |
+
- he
|
39 |
+
- hi
|
40 |
+
- hr
|
41 |
+
- hu
|
42 |
+
- hy
|
43 |
+
- id
|
44 |
+
- is
|
45 |
+
- it
|
46 |
+
- ja
|
47 |
+
- jv
|
48 |
+
- ka
|
49 |
+
- kk
|
50 |
+
- km
|
51 |
+
- kn
|
52 |
+
- ko
|
53 |
+
- ku
|
54 |
+
- ky
|
55 |
+
- la
|
56 |
+
- lo
|
57 |
+
- lt
|
58 |
+
- lv
|
59 |
+
- mg
|
60 |
+
- mk
|
61 |
+
- ml
|
62 |
+
- mn
|
63 |
+
- mr
|
64 |
+
- ms
|
65 |
+
- my
|
66 |
+
- ne
|
67 |
+
- nl
|
68 |
+
- 'no'
|
69 |
+
- om
|
70 |
+
- or
|
71 |
+
- pa
|
72 |
+
- pl
|
73 |
+
- ps
|
74 |
+
- pt
|
75 |
+
- ro
|
76 |
+
- ru
|
77 |
+
- sa
|
78 |
+
- sd
|
79 |
+
- si
|
80 |
+
- sk
|
81 |
+
- sl
|
82 |
+
- so
|
83 |
+
- sq
|
84 |
+
- sr
|
85 |
+
- su
|
86 |
+
- sv
|
87 |
+
- sw
|
88 |
+
- ta
|
89 |
+
- te
|
90 |
+
- th
|
91 |
+
- tl
|
92 |
+
- tr
|
93 |
+
- ug
|
94 |
+
- uk
|
95 |
+
- ur
|
96 |
+
- uz
|
97 |
+
- vi
|
98 |
+
- xh
|
99 |
+
- yi
|
100 |
+
- zh
|
101 |
+
license: mit
|
102 |
+
---
|
103 |
+
|
104 |
+
## Multilingual-E5-small-distill-base
|
105 |
+
|
106 |
+
This model is an attempt to distill `intfloat/multilingual-e5-base` (teacher) into `intfloat/multilingual-e5-small` (student),
|
107 |
+
as well as applying [Matryoshka Representation Learning](https://arxiv.org/abs/2205.13147) to it.
|
108 |
+
|
109 |
+
This was made by trying an L2 loss to teach the student model to match the same cosine similarity on text pairs as the teacher model.
|
110 |
+
|
111 |
+
The distillation dataset is composed of about 700k multilingual sentences pairs sampled for the following 3 datasets:
|
112 |
+
- [PhilipMay/stsb_multi_mt](https://huggingface.co/datasets/PhilipMay/stsb_multi_mt)
|
113 |
+
- [castorini/mr-tydi](https://huggingface.co/datasets/castorini/mr-tydi)
|
114 |
+
- [quora](https://huggingface.co/datasets/quora)
|
115 |
+
|
116 |
+
|
117 |
+
[Multilingual E5 Text Embeddings: A Technical Report](https://arxiv.org/pdf/2402.05672).
|
118 |
+
Liang Wang, Nan Yang, Xiaolong Huang, Linjun Yang, Rangan Majumder, Furu Wei, arXiv 2024
|
119 |
+
|
120 |
+
This model has 12 layers and the embedding size is 384.
|
121 |
+
|
122 |
+
## Usage
|
123 |
+
|
124 |
+
Below is an example to encode queries and passages from the MS-MARCO passage ranking dataset.
|
125 |
+
|
126 |
+
```python
|
127 |
+
import torch.nn.functional as F
|
128 |
+
|
129 |
+
from torch import Tensor
|
130 |
+
from transformers import AutoTokenizer, AutoModel
|
131 |
+
|
132 |
+
|
133 |
+
def average_pool(last_hidden_states: Tensor,
|
134 |
+
attention_mask: Tensor) -> Tensor:
|
135 |
+
last_hidden = last_hidden_states.masked_fill(~attention_mask[..., None].bool(), 0.0)
|
136 |
+
return last_hidden.sum(dim=1) / attention_mask.sum(dim=1)[..., None]
|
137 |
+
|
138 |
+
|
139 |
+
# Each input text should start with "query: " or "passage: ", even for non-English texts.
|
140 |
+
# For tasks other than retrieval, you can simply use the "query: " prefix.
|
141 |
+
input_texts = ['query: how much protein should a female eat',
|
142 |
+
'query: 南瓜的家常做法',
|
143 |
+
"passage: As a general guideline, the CDC's average requirement of protein for women ages 19 to 70 is 46 grams per day. But, as you can see from this chart, you'll need to increase that if you're expecting or training for a marathon. Check out the chart below to see how much protein you should be eating each day.",
|
144 |
+
"passage: 1.清炒南瓜丝 原料:嫩南瓜半个 调料:葱、盐、白糖、鸡精 做法: 1、南瓜用刀薄薄的削去表面一层皮,用勺子刮去瓤 2、擦成细丝(没有擦菜板就用刀慢慢切成细丝) 3、锅烧热放油,入葱花煸出香味 4、入南瓜丝快速翻炒一分钟左右,放盐、一点白糖和鸡精调味出锅 2.香葱炒南瓜 原料:南瓜1只 调料:香葱、蒜末、橄榄油、盐 做法: 1、将南瓜去皮,切成片 2、油锅8成热后,将蒜末放入爆香 3、爆香后,将南瓜片放入,翻炒 4、在翻炒的同时,可以不时地往锅里加水,但不要太多 5、放入盐,炒匀 6、南瓜差不多软和绵了之后,就可以关火 7、撒入香葱,即可出锅"]
|
145 |
+
|
146 |
+
tokenizer = AutoTokenizer.from_pretrained('intfloat/multilingual-e5-small')
|
147 |
+
model = AutoModel.from_pretrained('intfloat/multilingual-e5-small')
|
148 |
+
|
149 |
+
# Tokenize the input texts
|
150 |
+
batch_dict = tokenizer(input_texts, max_length=512, padding=True, truncation=True, return_tensors='pt')
|
151 |
+
|
152 |
+
outputs = model(**batch_dict)
|
153 |
+
embeddings = average_pool(outputs.last_hidden_state, batch_dict['attention_mask'])
|
154 |
+
|
155 |
+
# normalize embeddings
|
156 |
+
embeddings = F.normalize(embeddings, p=2, dim=1)
|
157 |
+
scores = (embeddings[:2] @ embeddings[2:].T) * 100
|
158 |
+
print(scores.tolist())
|
159 |
+
```
|
160 |
+
|
161 |
+
## Supported Languages
|
162 |
+
|
163 |
+
This model is initialized from [microsoft/Multilingual-MiniLM-L12-H384](https://huggingface.co/microsoft/Multilingual-MiniLM-L12-H384)
|
164 |
+
and continually trained on a mixture of multilingual datasets.
|
165 |
+
It supports 100 languages from xlm-roberta,
|
166 |
+
but low-resource languages may see performance degradation.
|
167 |
+
|
168 |
+
## MTEB Benchmark Evaluation (Subset)
|
169 |
+
|
170 |
+
| | intfloat/multilingual-e5-base | intfloat/multilingual-e5-large | intfloat/multilingual-e5-small | avditvs/multilingual-e5-small-distill-base-small-0.1 |
|
171 |
+
| --------------------------- | ----------------------------- | ------------------------------ | ------------------------------ | ---------------------------------------------------- |
|
172 |
+
| STS15 | 0.876 | 0.882 | 0.864 | 0.865 |
|
173 |
+
| BIOSSES | 0.870 | 0.863 | 0.857 | 0.863 |
|
174 |
+
| STS14 | 0.789 | 0.776 | 0.788 | 0.803 |
|
175 |
+
| STS12 | 0.858 | 0.873 | 0.854 | 0.856 |
|
176 |
+
| AskUbuntuDupQuestions | 0.571 | 0.577 | 0.568 | 0.574 |
|
177 |
+
| StackOverflowDupQuestions | 0.485 | 0.486 | 0.486 | 0.485 |
|
178 |
+
| AmazonReviewsClassification | 0.476 | 0.470 | 0.452 | 0.450 |
|
179 |
+
| ArguAna | 0.442 | 0.544 | 0.391 | 0.480 |
|
180 |
+
| ImdbClassification | 0.849 | 0.887 | 0.758 | 0.757 |
|
181 |
+
| STS13 | 0.756 | 0.751 | 0.764 | 0.785 |
|
182 |
+
| STSBenchmark | 0.832 | 0.836 | 0.809 | 0.818 |
|
183 |
+
| STS17 | 0.890 | 0.896 | 0.868 | 0.871 |
|
184 |
+
| SICK-R | 0.835 | 0.838 | 0.835 | 0.850 |
|
185 |
+
| STS22 | 0.645 | 0.675 | 0.640 | 0.648 |
|
186 |
+
| STS16 | 0.814 | 0.824 | 0.822 | 0.820 |
|
187 |
+
| Banking77Classification | 0.741 | 0.749 | 0.706 | 0.706 |
|
188 |
+
| average | 0.733 | 0.745 | *0.717* | **0.727** |
|
189 |
+
|
190 |
+
|
191 |
+
|
192 |
+
## Support for Sentence Transformers
|
193 |
+
|
194 |
+
Below is an example for usage with sentence_transformers.
|
195 |
+
```python
|
196 |
+
from sentence_transformers import SentenceTransformer
|
197 |
+
model = SentenceTransformer('avditvs/multilingual-e5-small-distill-base')
|
198 |
+
input_texts = [
|
199 |
+
'query: how much protein should a female eat',
|
200 |
+
'query: 南瓜的家常做法',
|
201 |
+
"passage: As a general guideline, the CDC's average requirement of protein for women ages 19 to 70 i s 46 grams per day. But, as you can see from this chart, you'll need to increase that if you're expecting or traini ng for a marathon. Check out the chart below to see how much protein you should be eating each day.",
|
202 |
+
"passage: 1.清炒南瓜丝 原料:嫩南瓜半个 调料:葱、盐、白糖、鸡精 做法: 1、南瓜用刀薄薄的削去表面一层皮 ,用勺子刮去瓤 2、擦成细丝(没有擦菜板就用刀慢慢切成细丝) 3、锅烧热放油,入葱花煸出香味 4、入南瓜丝快速翻炒一分钟左右, 放盐、一点白糖和鸡精调味出锅 2.香葱炒南瓜 原料:南瓜1只 调料:香葱、蒜末、橄榄油、盐 做法: 1、将南瓜去皮,切成片 2、油 锅8成热后,将蒜末放入爆香 3、爆香后,将南瓜片放入,翻炒 4、在翻炒的同时,可以不时地往锅里加水,但不要太多 5、放入盐,炒匀 6、南瓜差不多软和绵了之后,就可以关火 7、撒入香葱,即可出锅"
|
203 |
+
]
|
204 |
+
embeddings = model.encode(input_texts, normalize_embeddings=True)
|
205 |
+
```
|
206 |
+
|
207 |
+
Package requirements
|
208 |
+
|
209 |
+
`pip install sentence_transformers~=2.2.2`
|
210 |
+
|
211 |
+
Contributors: [michaelfeil](https://huggingface.co/michaelfeil)
|
212 |
+
|
213 |
+
## FAQ
|
214 |
+
|
215 |
+
**1. Do I need to add the prefix "query: " and "passage: " to input texts?**
|
216 |
+
|
217 |
+
Yes, this is how the model is trained, otherwise you will see a performance degradation.
|
218 |
+
|
219 |
+
Here are some rules of thumb:
|
220 |
+
- Use "query: " and "passage: " correspondingly for asymmetric tasks such as passage retrieval in open QA, ad-hoc information retrieval.
|
221 |
+
|
222 |
+
- Use "query: " prefix for symmetric tasks such as semantic similarity, bitext mining, paraphrase retrieval.
|
223 |
+
|
224 |
+
- Use "query: " prefix if you want to use embeddings as features, such as linear probing classification, clustering.
|
225 |
+
|
226 |
+
**2. Why are my reproduced results slightly different from reported in the model card?**
|
227 |
+
|
228 |
+
Different versions of `transformers` and `pytorch` could cause negligible but non-zero performance differences.
|
229 |
+
|
230 |
+
**3. Why does the cosine similarity scores distribute around 0.7 to 1.0?**
|
231 |
+
|
232 |
+
This is a known and expected behavior as we use a low temperature 0.01 for InfoNCE contrastive loss.
|
233 |
+
|
234 |
+
For text embedding tasks like text retrieval or semantic similarity,
|
235 |
+
what matters is the relative order of the scores instead of the absolute values,
|
236 |
+
so this should not be an issue.
|
237 |
+
|
238 |
+
## Citation
|
239 |
+
|
240 |
+
If you find our paper or models helpful, please consider cite as follows:
|
241 |
+
|
242 |
+
```
|
243 |
+
@article{wang2024multilingual,
|
244 |
+
title={Multilingual E5 Text Embeddings: A Technical Report},
|
245 |
+
author={Wang, Liang and Yang, Nan and Huang, Xiaolong and Yang, Linjun and Majumder, Rangan and Wei, Furu},
|
246 |
+
journal={arXiv preprint arXiv:2402.05672},
|
247 |
+
year={2024}
|
248 |
+
}
|
249 |
+
```
|
250 |
+
|
251 |
+
## Limitations
|
252 |
+
|
253 |
+
Long texts will be truncated to at most 512 tokens.
|
config.json
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "avditvs/multilingual-e5-small-distill-base-0.1",
|
3 |
+
"architectures": [
|
4 |
+
"BertModel"
|
5 |
+
],
|
6 |
+
"attention_probs_dropout_prob": 0.1,
|
7 |
+
"classifier_dropout": null,
|
8 |
+
"hidden_act": "gelu",
|
9 |
+
"hidden_dropout_prob": 0.1,
|
10 |
+
"hidden_size": 384,
|
11 |
+
"initializer_range": 0.02,
|
12 |
+
"intermediate_size": 1536,
|
13 |
+
"layer_norm_eps": 1e-12,
|
14 |
+
"max_position_embeddings": 512,
|
15 |
+
"model_type": "bert",
|
16 |
+
"num_attention_heads": 12,
|
17 |
+
"num_hidden_layers": 12,
|
18 |
+
"pad_token_id": 0,
|
19 |
+
"position_embedding_type": "absolute",
|
20 |
+
"tokenizer_class": "XLMRobertaTokenizer",
|
21 |
+
"torch_dtype": "float32",
|
22 |
+
"transformers_version": "4.40.1",
|
23 |
+
"type_vocab_size": 2,
|
24 |
+
"use_cache": true,
|
25 |
+
"vocab_size": 250037
|
26 |
+
}
|
config_sentence_transformers.json
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"__version__": {
|
3 |
+
"sentence_transformers": "2.7.0",
|
4 |
+
"transformers": "4.40.1",
|
5 |
+
"pytorch": "2.3.0+cu121"
|
6 |
+
},
|
7 |
+
"prompts": {},
|
8 |
+
"default_prompt_name": null
|
9 |
+
}
|
model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:69ce7a5cf642d8fe96efcbf4029b3948e509df031f1f05b7e9f26b7971db0d97
|
3 |
+
size 470637416
|
modules.json
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"idx": 0,
|
4 |
+
"name": "0",
|
5 |
+
"path": "",
|
6 |
+
"type": "sentence_transformers.models.Transformer"
|
7 |
+
},
|
8 |
+
{
|
9 |
+
"idx": 1,
|
10 |
+
"name": "1",
|
11 |
+
"path": "1_Pooling",
|
12 |
+
"type": "sentence_transformers.models.Pooling"
|
13 |
+
},
|
14 |
+
{
|
15 |
+
"idx": 2,
|
16 |
+
"name": "2",
|
17 |
+
"path": "2_Normalize",
|
18 |
+
"type": "sentence_transformers.models.Normalize"
|
19 |
+
}
|
20 |
+
]
|
sentence_bert_config.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"max_seq_length": 512,
|
3 |
+
"do_lower_case": false
|
4 |
+
}
|
special_tokens_map.json
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": {
|
3 |
+
"content": "<s>",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": false,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
},
|
9 |
+
"cls_token": {
|
10 |
+
"content": "<s>",
|
11 |
+
"lstrip": false,
|
12 |
+
"normalized": false,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
},
|
16 |
+
"eos_token": {
|
17 |
+
"content": "</s>",
|
18 |
+
"lstrip": false,
|
19 |
+
"normalized": false,
|
20 |
+
"rstrip": false,
|
21 |
+
"single_word": false
|
22 |
+
},
|
23 |
+
"mask_token": {
|
24 |
+
"content": "<mask>",
|
25 |
+
"lstrip": false,
|
26 |
+
"normalized": false,
|
27 |
+
"rstrip": false,
|
28 |
+
"single_word": false
|
29 |
+
},
|
30 |
+
"pad_token": {
|
31 |
+
"content": "<pad>",
|
32 |
+
"lstrip": false,
|
33 |
+
"normalized": false,
|
34 |
+
"rstrip": false,
|
35 |
+
"single_word": false
|
36 |
+
},
|
37 |
+
"sep_token": {
|
38 |
+
"content": "</s>",
|
39 |
+
"lstrip": false,
|
40 |
+
"normalized": false,
|
41 |
+
"rstrip": false,
|
42 |
+
"single_word": false
|
43 |
+
},
|
44 |
+
"unk_token": {
|
45 |
+
"content": "<unk>",
|
46 |
+
"lstrip": false,
|
47 |
+
"normalized": false,
|
48 |
+
"rstrip": false,
|
49 |
+
"single_word": false
|
50 |
+
}
|
51 |
+
}
|
tokenizer.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6c2ccb33b55c4d157419a2105d6e033f7514c2baa6306471c56b86ab02787613
|
3 |
+
size 17083053
|
tokenizer_config.json
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"added_tokens_decoder": {
|
3 |
+
"0": {
|
4 |
+
"content": "<s>",
|
5 |
+
"lstrip": false,
|
6 |
+
"normalized": false,
|
7 |
+
"rstrip": false,
|
8 |
+
"single_word": false,
|
9 |
+
"special": true
|
10 |
+
},
|
11 |
+
"1": {
|
12 |
+
"content": "<pad>",
|
13 |
+
"lstrip": false,
|
14 |
+
"normalized": false,
|
15 |
+
"rstrip": false,
|
16 |
+
"single_word": false,
|
17 |
+
"special": true
|
18 |
+
},
|
19 |
+
"2": {
|
20 |
+
"content": "</s>",
|
21 |
+
"lstrip": false,
|
22 |
+
"normalized": false,
|
23 |
+
"rstrip": false,
|
24 |
+
"single_word": false,
|
25 |
+
"special": true
|
26 |
+
},
|
27 |
+
"3": {
|
28 |
+
"content": "<unk>",
|
29 |
+
"lstrip": false,
|
30 |
+
"normalized": false,
|
31 |
+
"rstrip": false,
|
32 |
+
"single_word": false,
|
33 |
+
"special": true
|
34 |
+
},
|
35 |
+
"250001": {
|
36 |
+
"content": "<mask>",
|
37 |
+
"lstrip": false,
|
38 |
+
"normalized": false,
|
39 |
+
"rstrip": false,
|
40 |
+
"single_word": false,
|
41 |
+
"special": true
|
42 |
+
}
|
43 |
+
},
|
44 |
+
"bos_token": "<s>",
|
45 |
+
"clean_up_tokenization_spaces": true,
|
46 |
+
"cls_token": "<s>",
|
47 |
+
"eos_token": "</s>",
|
48 |
+
"mask_token": "<mask>",
|
49 |
+
"model_max_length": 512,
|
50 |
+
"pad_token": "<pad>",
|
51 |
+
"sep_token": "</s>",
|
52 |
+
"sp_model_kwargs": {},
|
53 |
+
"tokenizer_class": "XLMRobertaTokenizer",
|
54 |
+
"unk_token": "<unk>"
|
55 |
+
}
|