Upload tokenizer
Browse files- added_tokens.json +2 -2
- tokenizer_config.json +1 -17
- vocab.json +2 -2
added_tokens.json
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
{
|
2 |
"</s>": 128,
|
3 |
"<pad>": 130,
|
4 |
-
"<
|
5 |
-
"
|
6 |
}
|
|
|
1 |
{
|
2 |
"</s>": 128,
|
3 |
"<pad>": 130,
|
4 |
+
"<unk>": 129,
|
5 |
+
"[PAD]": 127
|
6 |
}
|
tokenizer_config.json
CHANGED
@@ -1,14 +1,6 @@
|
|
1 |
{
|
2 |
"added_tokens_decoder": {
|
3 |
-
"
|
4 |
-
"content": "[UNK]",
|
5 |
-
"lstrip": true,
|
6 |
-
"normalized": false,
|
7 |
-
"rstrip": true,
|
8 |
-
"single_word": false,
|
9 |
-
"special": false
|
10 |
-
},
|
11 |
-
"126": {
|
12 |
"content": "[PAD]",
|
13 |
"lstrip": true,
|
14 |
"normalized": false,
|
@@ -16,14 +8,6 @@
|
|
16 |
"single_word": false,
|
17 |
"special": false
|
18 |
},
|
19 |
-
"127": {
|
20 |
-
"content": "<s>",
|
21 |
-
"lstrip": false,
|
22 |
-
"normalized": false,
|
23 |
-
"rstrip": false,
|
24 |
-
"single_word": false,
|
25 |
-
"special": true
|
26 |
-
},
|
27 |
"128": {
|
28 |
"content": "</s>",
|
29 |
"lstrip": false,
|
|
|
1 |
{
|
2 |
"added_tokens_decoder": {
|
3 |
+
"127": {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
"content": "[PAD]",
|
5 |
"lstrip": true,
|
6 |
"normalized": false,
|
|
|
8 |
"single_word": false,
|
9 |
"special": false
|
10 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
"128": {
|
12 |
"content": "</s>",
|
13 |
"lstrip": false,
|
vocab.json
CHANGED
@@ -23,8 +23,8 @@
|
|
23 |
"B": 22,
|
24 |
"L": 23,
|
25 |
"T": 24,
|
26 |
-
"[PAD]":
|
27 |
-
"[UNK]":
|
28 |
"a": 25,
|
29 |
"b": 26,
|
30 |
"c": 27,
|
|
|
23 |
"B": 22,
|
24 |
"L": 23,
|
25 |
"T": 24,
|
26 |
+
"[PAD]": 127,
|
27 |
+
"[UNK]": 127,
|
28 |
"a": 25,
|
29 |
"b": 26,
|
30 |
"c": 27,
|