quincyqiang commited on
Commit
c259ee3
1 Parent(s): 3ba55ac

merge 14GB

Browse files
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "/data/searchgpt/yq/GoGPT/llama2-7b-sft-filterd-v3",
3
  "architectures": [
4
  "LlamaForCausalLM"
5
  ],
@@ -20,7 +20,7 @@
20
  "rope_scaling": null,
21
  "tie_word_embeddings": false,
22
  "torch_dtype": "float16",
23
- "transformers_version": "4.29.1",
24
  "use_cache": false,
25
  "vocab_size": 68420
26
  }
 
1
  {
2
+ "_name_or_path": "/data/searchgpt/yq/Firefly/output/llama2-7b-moss-sft/checkpoint-17000",
3
  "architectures": [
4
  "LlamaForCausalLM"
5
  ],
 
20
  "rope_scaling": null,
21
  "tie_word_embeddings": false,
22
  "torch_dtype": "float16",
23
+ "transformers_version": "4.31.0",
24
  "use_cache": false,
25
  "vocab_size": 68420
26
  }
generation_config.json CHANGED
@@ -5,5 +5,5 @@
5
  "pad_token_id": 32000,
6
  "temperature": 0.9,
7
  "top_p": 0.6,
8
- "transformers_version": "4.29.1"
9
  }
 
5
  "pad_token_id": 32000,
6
  "temperature": 0.9,
7
  "top_p": 0.6,
8
+ "transformers_version": "4.31.0"
9
  }
pytorch_model-00001-of-00002.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:92ddf1c25ce9522e1ad9b0961376596543ec6e062c0920f29dd83832a743d867
3
- size 14073577019
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:451e9fa08494310706144cec474237dfa2f4d58c7cb153b85d00bcbfdcbc1daf
3
+ size 9970884397
pytorch_model-00002-of-00002.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:55d97ec2ae68a82f9f6e23a4d1ab06cd175142c9586fb3097a8e62f162719e65
3
- size 14073552151
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:52181a67541f108853b196708f0c9a4693e0d375d4a37486af66fe992cff928e
3
+ size 4102775441
pytorch_model.bin.index.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "metadata": {
3
- "total_size": 14073540608
4
  },
5
  "weight_map": {
6
  "lm_head.weight": "pytorch_model-00002-of-00002.bin",
 
1
  {
2
  "metadata": {
3
+ "total_size": 14073544704
4
  },
5
  "weight_map": {
6
  "lm_head.weight": "pytorch_model-00002-of-00002.bin",
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -18,6 +18,7 @@
18
  "rstrip": false,
19
  "single_word": false
20
  },
 
21
  "model_max_length": 4096,
22
  "pad_token": null,
23
  "padding_side": "right",
 
18
  "rstrip": false,
19
  "single_word": false
20
  },
21
+ "legacy": true,
22
  "model_max_length": 4096,
23
  "pad_token": null,
24
  "padding_side": "right",