Heralax commited on
Commit
1beed45
0 Parent(s):
.gitattributes ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ *.gguf filter=lfs diff=lfs merge=lfs -text
2
+ *.bin filter=lfs diff=lfs merge=lfs -text
Mistral-7B-hf-v0.2-F16.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df7558007bc2a74c7cf8c1304d4f2ec02431e2d4f4a30898d5a6d00484fad58c
3
+ size 14484749280
README.md ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ license: apache-2.0
4
+ base_model: alpindale/Mistral-7B-v0.2-hf
5
+ tags:
6
+ - generated_from_trainer
7
+ model-index:
8
+ - name: army-pretraining
9
+ results: []
10
+ ---
11
+
12
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
13
+ should probably proofread and complete it, then remove this comment. -->
14
+
15
+ [<img src="https://raw.githubusercontent.com/axolotl-ai-cloud/axolotl/main/image/axolotl-badge-web.png" alt="Built with Axolotl" width="200" height="32"/>](https://github.com/axolotl-ai-cloud/axolotl)
16
+ <details><summary>See axolotl config</summary>
17
+
18
+ axolotl version: `0.4.1`
19
+ ```yaml
20
+ base_model: alpindale/Mistral-7B-v0.2-hf
21
+ tokenizer_type: AutoTokenizer
22
+ is_mistral_derived_model: true
23
+ load_in_8bit: false
24
+ load_in_4bit: false
25
+ strict: false
26
+
27
+ datasets:
28
+ - path: json
29
+ data_files: hidden_pretraining-us-army.jsonl
30
+ ds_type: json
31
+ type: completion
32
+
33
+
34
+ dataset_prepared_path: last_run_prepared
35
+ output_dir: ./army-pretraining
36
+
37
+ sequence_len: 4096
38
+ sample_packing: false
39
+ pad_to_sequence_len: true
40
+ shuffle_merged_datasets: true
41
+
42
+ wandb_project: mistral-army
43
+ wandb_entity:
44
+ wandb_watch:
45
+ wandb_run_id:
46
+ wandb_log_model:
47
+
48
+ gradient_accumulation_steps: 6
49
+ micro_batch_size: 2
50
+ eval_batch_size: 1
51
+ num_epochs: 11
52
+ optimizer: paged_adamw_8bit
53
+ lr_scheduler: cosine
54
+ learning_rate: 0.000020
55
+ weight_decay: 0
56
+ # Gradient clipping max norm
57
+ max_grad_norm: 1.0
58
+ noisy_embedding_alpha: 0
59
+ train_on_inputs: false
60
+ group_by_length: false
61
+ bf16: true
62
+ fp16: false
63
+ tf32: false
64
+
65
+ gradient_checkpointing: unsloth
66
+ early_stopping_patience:
67
+ resume_from_checkpoint:
68
+ logging_steps: 1
69
+ xformers_attention:
70
+ flash_attention: true
71
+
72
+ chat_template: chatml
73
+
74
+ warmup_ratio: 0.5
75
+ auto_resume_from_checkpoints: false
76
+ #warmup_ratio: 0.5
77
+ eval_steps: 10
78
+ saves_per_epoch: 1
79
+ eval_sample_packing: false
80
+ save_total_limit: 3
81
+ debug:
82
+ deepspeed: deepspeed_configs/zero2.json
83
+ special_tokens:
84
+ pad_token: "<|end_of_text|>"
85
+ ```
86
+
87
+ </details><br>
88
+
89
+ # army-pretraining
90
+
91
+ This model is a fine-tuned version of [alpindale/Mistral-7B-v0.2-hf](https://huggingface.co/alpindale/Mistral-7B-v0.2-hf) on the None dataset.
92
+
93
+ ## Model description
94
+
95
+ More information needed
96
+
97
+ ## Intended uses & limitations
98
+
99
+ More information needed
100
+
101
+ ## Training and evaluation data
102
+
103
+ More information needed
104
+
105
+ ## Training procedure
106
+
107
+ ### Training hyperparameters
108
+
109
+ The following hyperparameters were used during training:
110
+ - learning_rate: 2e-05
111
+ - train_batch_size: 2
112
+ - eval_batch_size: 1
113
+ - seed: 42
114
+ - distributed_type: multi-GPU
115
+ - num_devices: 6
116
+ - gradient_accumulation_steps: 6
117
+ - total_train_batch_size: 72
118
+ - total_eval_batch_size: 6
119
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
120
+ - lr_scheduler_type: cosine
121
+ - lr_scheduler_warmup_steps: 136
122
+ - num_epochs: 11
123
+
124
+ ### Training results
125
+
126
+
127
+
128
+ ### Framework versions
129
+
130
+ - Transformers 4.45.0.dev0
131
+ - Pytorch 2.3.1+cu121
132
+ - Datasets 2.21.0
133
+ - Tokenizers 0.19.1
added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "<|end_of_text|>": 32000
3
+ }
config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "alpindale/Mistral-7B-v0.2-hf",
3
+ "architectures": [
4
+ "MistralForCausalLM"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 1,
8
+ "eos_token_id": 2,
9
+ "head_dim": 128,
10
+ "hidden_act": "silu",
11
+ "hidden_size": 4096,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 14336,
14
+ "max_position_embeddings": 32768,
15
+ "model_type": "mistral",
16
+ "num_attention_heads": 32,
17
+ "num_hidden_layers": 32,
18
+ "num_key_value_heads": 8,
19
+ "rms_norm_eps": 1e-05,
20
+ "rope_theta": 1000000.0,
21
+ "sliding_window": null,
22
+ "tie_word_embeddings": false,
23
+ "torch_dtype": "bfloat16",
24
+ "transformers_version": "4.45.0.dev0",
25
+ "use_cache": false,
26
+ "vocab_size": 32001
27
+ }
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "do_sample": true,
5
+ "eos_token_id": 2,
6
+ "transformers_version": "4.45.0.dev0"
7
+ }
ggml-model-Q8_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:455c07896c6aa3dc5643fb96b412e8b77609b79d7c05d15f28e36c492fc1b9d2
3
+ size 7695867360
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ecd2154159fc4698c039df67ed002da4fc3d2218352007be29bbf16402c10bb5
3
+ size 14483521198
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<|end_of_text|>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<unk>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
Binary file (493 kB). View file
 
tokenizer_config.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": true,
5
+ "added_tokens_decoder": {
6
+ "0": {
7
+ "content": "<unk>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ },
14
+ "1": {
15
+ "content": "<s>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": true
21
+ },
22
+ "2": {
23
+ "content": "</s>",
24
+ "lstrip": false,
25
+ "normalized": false,
26
+ "rstrip": false,
27
+ "single_word": false,
28
+ "special": true
29
+ },
30
+ "32000": {
31
+ "content": "<|end_of_text|>",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false,
36
+ "special": true
37
+ }
38
+ },
39
+ "bos_token": "<s>",
40
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
41
+ "clean_up_tokenization_spaces": false,
42
+ "eos_token": "</s>",
43
+ "legacy": true,
44
+ "model_max_length": 1000000000000000019884624838656,
45
+ "pad_token": "<|end_of_text|>",
46
+ "sp_model_kwargs": {},
47
+ "spaces_between_special_tokens": false,
48
+ "tokenizer_class": "LlamaTokenizer",
49
+ "unk_token": "<unk>",
50
+ "use_default_system_prompt": false
51
+ }