Add SetFit model
Browse files- .gitattributes +1 -0
- 1_Pooling/config.json +10 -0
- README.md +197 -0
- config.json +29 -0
- config_sentence_transformers.json +10 -0
- config_setfit.json +23 -0
- model.safetensors +3 -0
- model_head.pkl +3 -0
- modules.json +14 -0
- sentence_bert_config.json +4 -0
- special_tokens_map.json +51 -0
- tokenizer.json +3 -0
- tokenizer_config.json +61 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
1_Pooling/config.json
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"word_embedding_dimension": 768,
|
3 |
+
"pooling_mode_cls_token": false,
|
4 |
+
"pooling_mode_mean_tokens": true,
|
5 |
+
"pooling_mode_max_tokens": false,
|
6 |
+
"pooling_mode_mean_sqrt_len_tokens": false,
|
7 |
+
"pooling_mode_weightedmean_tokens": false,
|
8 |
+
"pooling_mode_lasttoken": false,
|
9 |
+
"include_prompt": true
|
10 |
+
}
|
README.md
ADDED
@@ -0,0 +1,197 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
base_model: sentence-transformers/paraphrase-multilingual-mpnet-base-v2
|
3 |
+
library_name: setfit
|
4 |
+
metrics:
|
5 |
+
- accuracy
|
6 |
+
pipeline_tag: text-classification
|
7 |
+
tags:
|
8 |
+
- setfit
|
9 |
+
- sentence-transformers
|
10 |
+
- text-classification
|
11 |
+
- generated_from_setfit_trainer
|
12 |
+
widget:
|
13 |
+
- text: Dominica is striving for multi-sectoral and multi-level adaptation across
|
14 |
+
all segments of society, giving particular consideration to vulnerable groups
|
15 |
+
- the poor, disabled, elderly and Kalinago community; as well as gender disparities.
|
16 |
+
Recognising the threats posed by climate change, Dominica has over the last two
|
17 |
+
decades, undertaken a number of initiatives to respond to this threat. The adaptation
|
18 |
+
component has been revised to incorporate updated information on regional climate
|
19 |
+
change projections and impacts on Caribbean SIDS.
|
20 |
+
- text: They live in geographical regions and ecosystems that are the most vulnerable
|
21 |
+
to climate change. These include polar regions, humid tropical forests, high mountains,
|
22 |
+
small islands, coastal regions, and arid and semi-arid lands, among others. The
|
23 |
+
impacts of climate change in such regions have strong implications for the ecosystem-based
|
24 |
+
livelihoods on which many indigenous peoples depend. Moreover, in some regions
|
25 |
+
such as the Pacific, the very existence of many indigenous territories is under
|
26 |
+
threat from rising sea levels that not only pose a grave threat to indigenous
|
27 |
+
peoples’ livelihoods but also to their cultures and ways of life.
|
28 |
+
- text: 'Enhancing climate change resilience in the Benguela current fisheries system
|
29 |
+
(regional project: Angola, Namibia and South Africa). The project aims to build
|
30 |
+
resilience and reduce vulnerability of the Benguela Current marine fisheries systems
|
31 |
+
to climate change through strengthened adaptive capacity and implementation of
|
32 |
+
participatory and integrated adaptive strategies in order to ensure food and livelihood
|
33 |
+
security. Fisheries. Agriculture and food security. Total project cost (US $ million):
|
34 |
+
16.520. Implementing GEF agency: FAO.'
|
35 |
+
- text: As the average annual precipitation across the country is expected to decline
|
36 |
+
2.6-3.4% by 2025 and 5.9-6.3% by 2050 this will result direct yield response.
|
37 |
+
As described by PACE experiment59 on the Pastures and Climate Extremes using a
|
38 |
+
factorial combination of elevated temperature (ambient +3°C) and winter/spring
|
39 |
+
extreme drought (60% rainfall reduction) resulted in productivity declines of
|
40 |
+
up to 73%. Functional group identity was not an important predictor of yield response
|
41 |
+
to drought.
|
42 |
+
- text: Poor rural households in marginal territories that have a low productive potential
|
43 |
+
and/or that are far from markets and infrastructure are highly vulnerable to climate-change
|
44 |
+
impacts and could easily fall into poverty-environment traps 9. This means that
|
45 |
+
communities that are already struggling economically and geographically isolated
|
46 |
+
are at greater risk of experiencing the negative impacts of climate change on
|
47 |
+
their agricultural livelihoods.
|
48 |
+
inference: false
|
49 |
+
---
|
50 |
+
|
51 |
+
# SetFit with sentence-transformers/paraphrase-multilingual-mpnet-base-v2
|
52 |
+
|
53 |
+
This is a [SetFit](https://github.com/huggingface/setfit) model that can be used for Text Classification. This SetFit model uses [sentence-transformers/paraphrase-multilingual-mpnet-base-v2](https://huggingface.co/sentence-transformers/paraphrase-multilingual-mpnet-base-v2) as the Sentence Transformer embedding model. A [SetFitHead](huggingface.co/docs/setfit/reference/main#setfit.SetFitHead) instance is used for classification.
|
54 |
+
|
55 |
+
The model has been trained using an efficient few-shot learning technique that involves:
|
56 |
+
|
57 |
+
1. Fine-tuning a [Sentence Transformer](https://www.sbert.net) with contrastive learning.
|
58 |
+
2. Training a classification head with features from the fine-tuned Sentence Transformer.
|
59 |
+
|
60 |
+
## Model Details
|
61 |
+
|
62 |
+
### Model Description
|
63 |
+
- **Model Type:** SetFit
|
64 |
+
- **Sentence Transformer body:** [sentence-transformers/paraphrase-multilingual-mpnet-base-v2](https://huggingface.co/sentence-transformers/paraphrase-multilingual-mpnet-base-v2)
|
65 |
+
- **Classification head:** a [SetFitHead](huggingface.co/docs/setfit/reference/main#setfit.SetFitHead) instance
|
66 |
+
- **Maximum Sequence Length:** 128 tokens
|
67 |
+
- **Number of Classes:** 18 classes
|
68 |
+
<!-- - **Training Dataset:** [Unknown](https://huggingface.co/datasets/unknown) -->
|
69 |
+
<!-- - **Language:** Unknown -->
|
70 |
+
<!-- - **License:** Unknown -->
|
71 |
+
|
72 |
+
### Model Sources
|
73 |
+
|
74 |
+
- **Repository:** [SetFit on GitHub](https://github.com/huggingface/setfit)
|
75 |
+
- **Paper:** [Efficient Few-Shot Learning Without Prompts](https://arxiv.org/abs/2209.11055)
|
76 |
+
- **Blogpost:** [SetFit: Efficient Few-Shot Learning Without Prompts](https://huggingface.co/blog/setfit)
|
77 |
+
|
78 |
+
## Uses
|
79 |
+
|
80 |
+
### Direct Use for Inference
|
81 |
+
|
82 |
+
First install the SetFit library:
|
83 |
+
|
84 |
+
```bash
|
85 |
+
pip install setfit
|
86 |
+
```
|
87 |
+
|
88 |
+
Then you can load this model and run inference.
|
89 |
+
|
90 |
+
```python
|
91 |
+
from setfit import SetFitModel
|
92 |
+
|
93 |
+
# Download from the 🤗 Hub
|
94 |
+
model = SetFitModel.from_pretrained("GIZ/vulnerability_multilabel_v2")
|
95 |
+
# Run inference
|
96 |
+
preds = model("Poor rural households in marginal territories that have a low productive potential and/or that are far from markets and infrastructure are highly vulnerable to climate-change impacts and could easily fall into poverty-environment traps 9. This means that communities that are already struggling economically and geographically isolated are at greater risk of experiencing the negative impacts of climate change on their agricultural livelihoods.")
|
97 |
+
```
|
98 |
+
|
99 |
+
<!--
|
100 |
+
### Downstream Use
|
101 |
+
|
102 |
+
*List how someone could finetune this model on their own dataset.*
|
103 |
+
-->
|
104 |
+
|
105 |
+
<!--
|
106 |
+
### Out-of-Scope Use
|
107 |
+
|
108 |
+
*List how the model may foreseeably be misused and address what users ought not to do with the model.*
|
109 |
+
-->
|
110 |
+
|
111 |
+
<!--
|
112 |
+
## Bias, Risks and Limitations
|
113 |
+
|
114 |
+
*What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.*
|
115 |
+
-->
|
116 |
+
|
117 |
+
<!--
|
118 |
+
### Recommendations
|
119 |
+
|
120 |
+
*What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.*
|
121 |
+
-->
|
122 |
+
|
123 |
+
## Training Details
|
124 |
+
|
125 |
+
### Training Set Metrics
|
126 |
+
| Training set | Min | Median | Max |
|
127 |
+
|:-------------|:----|:--------|:----|
|
128 |
+
| Word count | 1 | 61.2897 | 164 |
|
129 |
+
|
130 |
+
### Training Hyperparameters
|
131 |
+
- batch_size: (16, 2)
|
132 |
+
- num_epochs: (1, 0)
|
133 |
+
- max_steps: -1
|
134 |
+
- sampling_strategy: undersampling
|
135 |
+
- body_learning_rate: (2e-05, 1e-05)
|
136 |
+
- head_learning_rate: 0.01
|
137 |
+
- loss: CosineSimilarityLoss
|
138 |
+
- distance_metric: cosine_distance
|
139 |
+
- margin: 0.25
|
140 |
+
- end_to_end: False
|
141 |
+
- use_amp: False
|
142 |
+
- warmup_proportion: 0.01
|
143 |
+
- seed: 42
|
144 |
+
- eval_max_steps: -1
|
145 |
+
- load_best_model_at_end: False
|
146 |
+
|
147 |
+
### Training Results
|
148 |
+
| Epoch | Step | Training Loss | Validation Loss |
|
149 |
+
|:------:|:----:|:-------------:|:---------------:|
|
150 |
+
| 0.0002 | 1 | 0.2095 | - |
|
151 |
+
| 0.2084 | 1000 | 0.0307 | 0.1211 |
|
152 |
+
| 0.4168 | 2000 | 0.0165 | 0.1275 |
|
153 |
+
| 0.6251 | 3000 | 0.0085 | 0.131 |
|
154 |
+
| 0.8335 | 4000 | 0.0317 | 0.1171 |
|
155 |
+
|
156 |
+
### Framework Versions
|
157 |
+
- Python: 3.9.5
|
158 |
+
- SetFit: 1.0.3
|
159 |
+
- Sentence Transformers: 3.0.1
|
160 |
+
- Transformers: 4.44.2
|
161 |
+
- PyTorch: 2.4.0+cu121
|
162 |
+
- Datasets: 2.3.0
|
163 |
+
- Tokenizers: 0.19.1
|
164 |
+
|
165 |
+
## Citation
|
166 |
+
|
167 |
+
### BibTeX
|
168 |
+
```bibtex
|
169 |
+
@article{https://doi.org/10.48550/arxiv.2209.11055,
|
170 |
+
doi = {10.48550/ARXIV.2209.11055},
|
171 |
+
url = {https://arxiv.org/abs/2209.11055},
|
172 |
+
author = {Tunstall, Lewis and Reimers, Nils and Jo, Unso Eun Seo and Bates, Luke and Korat, Daniel and Wasserblat, Moshe and Pereg, Oren},
|
173 |
+
keywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences},
|
174 |
+
title = {Efficient Few-Shot Learning Without Prompts},
|
175 |
+
publisher = {arXiv},
|
176 |
+
year = {2022},
|
177 |
+
copyright = {Creative Commons Attribution 4.0 International}
|
178 |
+
}
|
179 |
+
```
|
180 |
+
|
181 |
+
<!--
|
182 |
+
## Glossary
|
183 |
+
|
184 |
+
*Clearly define terms in order to be accessible across audiences.*
|
185 |
+
-->
|
186 |
+
|
187 |
+
<!--
|
188 |
+
## Model Card Authors
|
189 |
+
|
190 |
+
*Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.*
|
191 |
+
-->
|
192 |
+
|
193 |
+
<!--
|
194 |
+
## Model Card Contact
|
195 |
+
|
196 |
+
*Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.*
|
197 |
+
-->
|
config.json
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "sentence-transformers/paraphrase-multilingual-mpnet-base-v2",
|
3 |
+
"architectures": [
|
4 |
+
"XLMRobertaModel"
|
5 |
+
],
|
6 |
+
"attention_probs_dropout_prob": 0.1,
|
7 |
+
"bos_token_id": 0,
|
8 |
+
"classifier_dropout": null,
|
9 |
+
"eos_token_id": 2,
|
10 |
+
"gradient_checkpointing": false,
|
11 |
+
"hidden_act": "gelu",
|
12 |
+
"hidden_dropout_prob": 0.1,
|
13 |
+
"hidden_size": 768,
|
14 |
+
"initializer_range": 0.02,
|
15 |
+
"intermediate_size": 3072,
|
16 |
+
"layer_norm_eps": 1e-05,
|
17 |
+
"max_position_embeddings": 514,
|
18 |
+
"model_type": "xlm-roberta",
|
19 |
+
"num_attention_heads": 12,
|
20 |
+
"num_hidden_layers": 12,
|
21 |
+
"output_past": true,
|
22 |
+
"pad_token_id": 1,
|
23 |
+
"position_embedding_type": "absolute",
|
24 |
+
"torch_dtype": "float32",
|
25 |
+
"transformers_version": "4.44.2",
|
26 |
+
"type_vocab_size": 1,
|
27 |
+
"use_cache": true,
|
28 |
+
"vocab_size": 250002
|
29 |
+
}
|
config_sentence_transformers.json
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"__version__": {
|
3 |
+
"sentence_transformers": "3.0.1",
|
4 |
+
"transformers": "4.44.2",
|
5 |
+
"pytorch": "2.4.0+cu121"
|
6 |
+
},
|
7 |
+
"prompts": {},
|
8 |
+
"default_prompt_name": null,
|
9 |
+
"similarity_fn_name": null
|
10 |
+
}
|
config_setfit.json
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"labels": [
|
3 |
+
"Agricultural communities",
|
4 |
+
"Children",
|
5 |
+
"Coastal communities",
|
6 |
+
"Ethnic, racial or other minorities",
|
7 |
+
"Fishery communities",
|
8 |
+
"Informal sector workers",
|
9 |
+
"Members of indigenous and local communities",
|
10 |
+
"Migrants and displaced persons",
|
11 |
+
"Older persons",
|
12 |
+
"Other",
|
13 |
+
"Persons living in poverty",
|
14 |
+
"Persons with disabilities",
|
15 |
+
"Persons with pre-existing health conditions",
|
16 |
+
"Residents of drought-prone regions",
|
17 |
+
"Rural populations",
|
18 |
+
"Sexual minorities (LGBTQI+)",
|
19 |
+
"Urban populations",
|
20 |
+
"Women and other genders"
|
21 |
+
],
|
22 |
+
"normalize_embeddings": true
|
23 |
+
}
|
model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4b25193438cfffb7398b1a14abb40376c43ff6b732861ef3b926ae6ec9321c69
|
3 |
+
size 1112197096
|
model_head.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0130b0360bfba0ea1c2f20d20d556ddeada389d91187d362f89862e1e3e9bfa4
|
3 |
+
size 56918
|
modules.json
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"idx": 0,
|
4 |
+
"name": "0",
|
5 |
+
"path": "",
|
6 |
+
"type": "sentence_transformers.models.Transformer"
|
7 |
+
},
|
8 |
+
{
|
9 |
+
"idx": 1,
|
10 |
+
"name": "1",
|
11 |
+
"path": "1_Pooling",
|
12 |
+
"type": "sentence_transformers.models.Pooling"
|
13 |
+
}
|
14 |
+
]
|
sentence_bert_config.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"max_seq_length": 128,
|
3 |
+
"do_lower_case": false
|
4 |
+
}
|
special_tokens_map.json
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": {
|
3 |
+
"content": "<s>",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": false,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
},
|
9 |
+
"cls_token": {
|
10 |
+
"content": "<s>",
|
11 |
+
"lstrip": false,
|
12 |
+
"normalized": false,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
},
|
16 |
+
"eos_token": {
|
17 |
+
"content": "</s>",
|
18 |
+
"lstrip": false,
|
19 |
+
"normalized": false,
|
20 |
+
"rstrip": false,
|
21 |
+
"single_word": false
|
22 |
+
},
|
23 |
+
"mask_token": {
|
24 |
+
"content": "<mask>",
|
25 |
+
"lstrip": true,
|
26 |
+
"normalized": false,
|
27 |
+
"rstrip": false,
|
28 |
+
"single_word": false
|
29 |
+
},
|
30 |
+
"pad_token": {
|
31 |
+
"content": "<pad>",
|
32 |
+
"lstrip": false,
|
33 |
+
"normalized": false,
|
34 |
+
"rstrip": false,
|
35 |
+
"single_word": false
|
36 |
+
},
|
37 |
+
"sep_token": {
|
38 |
+
"content": "</s>",
|
39 |
+
"lstrip": false,
|
40 |
+
"normalized": false,
|
41 |
+
"rstrip": false,
|
42 |
+
"single_word": false
|
43 |
+
},
|
44 |
+
"unk_token": {
|
45 |
+
"content": "<unk>",
|
46 |
+
"lstrip": false,
|
47 |
+
"normalized": false,
|
48 |
+
"rstrip": false,
|
49 |
+
"single_word": false
|
50 |
+
}
|
51 |
+
}
|
tokenizer.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cad551d5600a84242d0973327029452a1e3672ba6313c2a3c3d69c4310e12719
|
3 |
+
size 17082987
|
tokenizer_config.json
ADDED
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"added_tokens_decoder": {
|
3 |
+
"0": {
|
4 |
+
"content": "<s>",
|
5 |
+
"lstrip": false,
|
6 |
+
"normalized": false,
|
7 |
+
"rstrip": false,
|
8 |
+
"single_word": false,
|
9 |
+
"special": true
|
10 |
+
},
|
11 |
+
"1": {
|
12 |
+
"content": "<pad>",
|
13 |
+
"lstrip": false,
|
14 |
+
"normalized": false,
|
15 |
+
"rstrip": false,
|
16 |
+
"single_word": false,
|
17 |
+
"special": true
|
18 |
+
},
|
19 |
+
"2": {
|
20 |
+
"content": "</s>",
|
21 |
+
"lstrip": false,
|
22 |
+
"normalized": false,
|
23 |
+
"rstrip": false,
|
24 |
+
"single_word": false,
|
25 |
+
"special": true
|
26 |
+
},
|
27 |
+
"3": {
|
28 |
+
"content": "<unk>",
|
29 |
+
"lstrip": false,
|
30 |
+
"normalized": false,
|
31 |
+
"rstrip": false,
|
32 |
+
"single_word": false,
|
33 |
+
"special": true
|
34 |
+
},
|
35 |
+
"250001": {
|
36 |
+
"content": "<mask>",
|
37 |
+
"lstrip": true,
|
38 |
+
"normalized": false,
|
39 |
+
"rstrip": false,
|
40 |
+
"single_word": false,
|
41 |
+
"special": true
|
42 |
+
}
|
43 |
+
},
|
44 |
+
"bos_token": "<s>",
|
45 |
+
"clean_up_tokenization_spaces": true,
|
46 |
+
"cls_token": "<s>",
|
47 |
+
"eos_token": "</s>",
|
48 |
+
"mask_token": "<mask>",
|
49 |
+
"max_length": 128,
|
50 |
+
"model_max_length": 128,
|
51 |
+
"pad_to_multiple_of": null,
|
52 |
+
"pad_token": "<pad>",
|
53 |
+
"pad_token_type_id": 0,
|
54 |
+
"padding_side": "right",
|
55 |
+
"sep_token": "</s>",
|
56 |
+
"stride": 0,
|
57 |
+
"tokenizer_class": "XLMRobertaTokenizer",
|
58 |
+
"truncation_side": "right",
|
59 |
+
"truncation_strategy": "longest_first",
|
60 |
+
"unk_token": "<unk>"
|
61 |
+
}
|