Add new tokens to model
Browse files- added_tokens.json +1 -0
- config.json +3 -1
- flax_model.msgpack +2 -2
- pytorch_model.bin +2 -2
- special_tokens_map.json +1 -1
- tokenizer.json +0 -0
- tokenizer_config.json +1 -1
added_tokens.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{" ": 50258, "\t\t": 50257, " ": 50260, " ": 50259}
|
config.json
CHANGED
@@ -1,4 +1,5 @@
|
|
1 |
{
|
|
|
2 |
"activation_function": "gelu_new",
|
3 |
"architectures": [
|
4 |
"GPTNeoForCausalLM"
|
@@ -45,8 +46,9 @@
|
|
45 |
"summary_proj_to_labels": true,
|
46 |
"summary_type": "cls_index",
|
47 |
"summary_use_proj": true,
|
|
|
48 |
"transformers_version": "4.9.0.dev0",
|
49 |
"use_cache": true,
|
50 |
-
"vocab_size":
|
51 |
"window_size": 256
|
52 |
}
|
|
|
1 |
{
|
2 |
+
"_name_or_path": "EleutherAI/gpt-neo-125M",
|
3 |
"activation_function": "gelu_new",
|
4 |
"architectures": [
|
5 |
"GPTNeoForCausalLM"
|
|
|
46 |
"summary_proj_to_labels": true,
|
47 |
"summary_type": "cls_index",
|
48 |
"summary_use_proj": true,
|
49 |
+
"torch_dtype": "float32",
|
50 |
"transformers_version": "4.9.0.dev0",
|
51 |
"use_cache": true,
|
52 |
+
"vocab_size": 50261,
|
53 |
"window_size": 256
|
54 |
}
|
flax_model.msgpack
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4c44cfab0c177dba64a78924e2aae587dc6a9a41c20cee3bf7bea6a3c386eac0
|
3 |
+
size 500812408
|
pytorch_model.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ae4052178f4c51012eb97a4fde84eab198c9505e3969bf344c452db340dd0405
|
3 |
+
size 526027101
|
special_tokens_map.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"bos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "eos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "unk_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}}
|
|
|
1 |
+
{"bos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "eos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "unk_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "additional_special_tokens": ["\t\t", " ", " ", " "]}
|
tokenizer.json
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"unk_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "bos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "eos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "add_prefix_space": false, "errors": "replace", "model_max_length": 2048, "special_tokens_map_file": null, "name_or_path": "EleutherAI/gpt-neo-125M", "tokenizer_class": "GPT2Tokenizer"}
|
|
|
1 |
+
{"unk_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "bos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "eos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "add_prefix_space": false, "errors": "replace", "model_max_length": 2048, "special_tokens_map_file": null, "name_or_path": "EleutherAI/gpt-neo-125M", "additional_special_tokens": ["\t\t", " ", " ", " "], "tokenizer_class": "GPT2Tokenizer"}
|