Upload tokenizer
Browse files- tokenizer.json +0 -0
- tokenizer_config.json +3 -3
tokenizer.json
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
CHANGED
@@ -2121,7 +2121,7 @@
|
|
2121 |
"special": true
|
2122 |
},
|
2123 |
"265": {
|
2124 |
-
"content": "[DOMAIN=
|
2125 |
"lstrip": false,
|
2126 |
"normalized": false,
|
2127 |
"rstrip": false,
|
@@ -2129,7 +2129,7 @@
|
|
2129 |
"special": true
|
2130 |
},
|
2131 |
"266": {
|
2132 |
-
"content": "[DOMAIN=
|
2133 |
"lstrip": false,
|
2134 |
"normalized": false,
|
2135 |
"rstrip": false,
|
@@ -10720,7 +10720,7 @@
|
|
10720 |
"single_word": false,
|
10721 |
"special": true
|
10722 |
},
|
10723 |
-
"
|
10724 |
"content": "[EOS]",
|
10725 |
"lstrip": false,
|
10726 |
"normalized": false,
|
|
|
2121 |
"special": true
|
2122 |
},
|
2123 |
"265": {
|
2124 |
+
"content": "[DOMAIN=['CANONICAL ➝ Lexicography ➝ Thematic Word Lists ➝ Ura']]",
|
2125 |
"lstrip": false,
|
2126 |
"normalized": false,
|
2127 |
"rstrip": false,
|
|
|
2129 |
"special": true
|
2130 |
},
|
2131 |
"266": {
|
2132 |
+
"content": "[DOMAIN=lettre privée]",
|
2133 |
"lstrip": false,
|
2134 |
"normalized": false,
|
2135 |
"rstrip": false,
|
|
|
10720 |
"single_word": false,
|
10721 |
"special": true
|
10722 |
},
|
10723 |
+
"7117": {
|
10724 |
"content": "[EOS]",
|
10725 |
"lstrip": false,
|
10726 |
"normalized": false,
|