FuturisticVibes commited on
Commit
0738600
1 Parent(s): 125c228

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. README.md +169 -0
  2. adapter_config.json +34 -0
  3. adapter_model.bin +3 -0
  4. added_tokens.json +5 -0
  5. checkpoint-150/README.md +202 -0
  6. checkpoint-150/adapter_config.json +34 -0
  7. checkpoint-150/adapter_model.safetensors +3 -0
  8. checkpoint-150/added_tokens.json +5 -0
  9. checkpoint-150/merges.txt +0 -0
  10. checkpoint-150/optimizer.pt +3 -0
  11. checkpoint-150/rng_state.pth +3 -0
  12. checkpoint-150/scheduler.pt +3 -0
  13. checkpoint-150/special_tokens_map.json +20 -0
  14. checkpoint-150/tokenizer_config.json +44 -0
  15. checkpoint-150/trainer_state.json +1131 -0
  16. checkpoint-150/training_args.bin +3 -0
  17. checkpoint-150/vocab.json +0 -0
  18. checkpoint-180/README.md +202 -0
  19. checkpoint-180/adapter_config.json +34 -0
  20. checkpoint-180/adapter_model.safetensors +3 -0
  21. checkpoint-180/added_tokens.json +5 -0
  22. checkpoint-180/merges.txt +0 -0
  23. checkpoint-180/optimizer.pt +3 -0
  24. checkpoint-180/rng_state.pth +3 -0
  25. checkpoint-180/scheduler.pt +3 -0
  26. checkpoint-180/special_tokens_map.json +20 -0
  27. checkpoint-180/tokenizer_config.json +44 -0
  28. checkpoint-180/trainer_state.json +1349 -0
  29. checkpoint-180/training_args.bin +3 -0
  30. checkpoint-180/vocab.json +0 -0
  31. checkpoint-210/README.md +202 -0
  32. checkpoint-210/adapter_config.json +34 -0
  33. checkpoint-210/adapter_model.safetensors +3 -0
  34. checkpoint-210/added_tokens.json +5 -0
  35. checkpoint-210/merges.txt +0 -0
  36. checkpoint-210/optimizer.pt +3 -0
  37. checkpoint-210/rng_state.pth +3 -0
  38. checkpoint-210/scheduler.pt +3 -0
  39. checkpoint-210/special_tokens_map.json +20 -0
  40. checkpoint-210/tokenizer_config.json +44 -0
  41. checkpoint-210/trainer_state.json +1567 -0
  42. checkpoint-210/training_args.bin +3 -0
  43. checkpoint-210/vocab.json +0 -0
  44. checkpoint-236/README.md +202 -0
  45. checkpoint-236/adapter_config.json +34 -0
  46. checkpoint-236/adapter_model.safetensors +3 -0
  47. checkpoint-236/added_tokens.json +5 -0
  48. checkpoint-236/merges.txt +0 -0
  49. checkpoint-236/optimizer.pt +3 -0
  50. checkpoint-236/rng_state.pth +3 -0
README.md ADDED
@@ -0,0 +1,169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: Qwen/Qwen2-7B
3
+ #base_model: /workspace/data/models/Qwen2-7B
4
+ library_name: peft
5
+ tags:
6
+ - generated_from_trainer
7
+ model-index:
8
+ - name: workspace/data/outputs/Qwen2-7B-TestFinetune-LORA
9
+ results: []
10
+ ---
11
+
12
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
13
+ should probably proofread and complete it, then remove this comment. -->
14
+
15
+ [<img src="https://raw.githubusercontent.com/OpenAccess-AI-Collective/axolotl/main/image/axolotl-badge-web.png" alt="Built with Axolotl" width="200" height="32"/>](https://github.com/OpenAccess-AI-Collective/axolotl)
16
+ <details><summary>See axolotl config</summary>
17
+
18
+ axolotl version: `0.4.1`
19
+ ```yaml
20
+ base_model: /workspace/data/models/Qwen2-7B
21
+ model_type: Qwen2ForCausalLM
22
+ tokenizer_type: Qwen2Tokenizer
23
+
24
+ trust_remote_code: true
25
+
26
+ load_in_8bit: false
27
+ load_in_4bit: false
28
+ strict: false
29
+
30
+ datasets:
31
+ - path: NobodyExistsOnTheInternet/ToxicQAFinal
32
+ type: sharegpt
33
+ # - path: /workspace/data/SystemChat_filtered_sharegpt.jsonl
34
+ # type: sharegpt
35
+ # conversation: chatml
36
+ # - path: /workspace/data/Opus_Instruct-v2-6.5K-Filtered-v2.json
37
+ # type:
38
+ # field_system: system
39
+ # field_instruction: prompt
40
+ # field_output: response
41
+ # format: "[INST] {instruction} [/INST]"
42
+ # no_input_format: "[INST] {instruction} [/INST]"
43
+ # - path: Undi95/orthogonal-activation-steering-TOXIC
44
+ # type:
45
+ # field_instruction: goal
46
+ # field_output: target
47
+ # format: "[INST] {instruction} [/INST]"
48
+ # no_input_format: "[INST] {instruction} [/INST]"
49
+ # split: test
50
+ # - path: cognitivecomputations/WizardLM_alpaca_evol_instruct_70k_unfiltered
51
+ # type: alpaca
52
+ # split: train
53
+
54
+ dataset_prepared_path: /workspace/data/last_run_prepared
55
+ val_set_size: 0.15
56
+ output_dir: /workspace/data/outputs/Qwen2-7B-TestFinetune-LORA
57
+
58
+ chat_template: chatml
59
+
60
+ sequence_len: 8192
61
+ sample_packing: true
62
+ pad_to_sequence_len: true
63
+
64
+ adapter: lora
65
+ lora_model_dir:
66
+ lora_r: 32
67
+ lora_alpha: 16
68
+ lora_dropout: 0.05
69
+ lora_target_linear: true
70
+ lora_fan_in_fan_out:
71
+
72
+ wandb_project:
73
+ wandb_entity:
74
+ wandb_watch:
75
+ wandb_name:
76
+ wandb_log_model:
77
+
78
+ gradient_accumulation_steps: 8
79
+ micro_batch_size: 1
80
+ num_epochs: 4
81
+ optimizer: adamw_torch
82
+ lr_scheduler: cosine
83
+ learning_rate: 3e-5
84
+
85
+ train_on_inputs: false
86
+ group_by_length: true
87
+ bf16: true
88
+ fp16: false
89
+ tf32: false
90
+
91
+ gradient_checkpointing: true
92
+ early_stopping_patience:
93
+ resume_from_checkpoint:
94
+ local_rank:
95
+ logging_steps: 1
96
+ xformers_attention:
97
+ flash_attention: true
98
+
99
+ warmup_steps: 10
100
+ evals_per_epoch: 2
101
+ eval_table_size:
102
+ saves_per_epoch: 2
103
+ debug:
104
+ deepspeed:
105
+ weight_decay: 0.05
106
+ fsdp:
107
+ fsdp_config:
108
+ special_tokens:
109
+ pad_token: "<|endoftext|>"
110
+ eos_token: "<|im_end|>"
111
+ ```
112
+
113
+ </details><br>
114
+
115
+ # workspace/data/outputs/Qwen2-7B-TestFinetune-LORA
116
+
117
+ This model was trained from scratch on the None dataset.
118
+ It achieves the following results on the evaluation set:
119
+ - Loss: 1.0055
120
+
121
+ ## Model description
122
+
123
+ More information needed
124
+
125
+ ## Intended uses & limitations
126
+
127
+ More information needed
128
+
129
+ ## Training and evaluation data
130
+
131
+ More information needed
132
+
133
+ ## Training procedure
134
+
135
+ ### Training hyperparameters
136
+
137
+ The following hyperparameters were used during training:
138
+ - learning_rate: 3e-05
139
+ - train_batch_size: 1
140
+ - eval_batch_size: 1
141
+ - seed: 42
142
+ - gradient_accumulation_steps: 8
143
+ - total_train_batch_size: 8
144
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
145
+ - lr_scheduler_type: cosine
146
+ - lr_scheduler_warmup_steps: 10
147
+ - num_epochs: 4
148
+
149
+ ### Training results
150
+
151
+ | Training Loss | Epoch | Step | Validation Loss |
152
+ |:-------------:|:------:|:----:|:---------------:|
153
+ | 1.1751 | 0.0169 | 1 | 1.1860 |
154
+ | 1.1007 | 0.5063 | 30 | 1.0912 |
155
+ | 1.0418 | 1.0127 | 60 | 1.0428 |
156
+ | 1.0105 | 1.5042 | 90 | 1.0232 |
157
+ | 1.0082 | 2.0105 | 120 | 1.0127 |
158
+ | 0.9946 | 2.5042 | 150 | 1.0074 |
159
+ | 0.9826 | 3.0105 | 180 | 1.0057 |
160
+ | 0.9898 | 3.5021 | 210 | 1.0055 |
161
+
162
+
163
+ ### Framework versions
164
+
165
+ - PEFT 0.11.1
166
+ - Transformers 4.42.3
167
+ - Pytorch 2.1.2+cu118
168
+ - Datasets 2.19.1
169
+ - Tokenizers 0.19.1
adapter_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "/workspace/data/models/Qwen2-7B",
5
+ "bias": "none",
6
+ "fan_in_fan_out": null,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 16,
14
+ "lora_dropout": 0.05,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": null,
18
+ "peft_type": "LORA",
19
+ "r": 32,
20
+ "rank_pattern": {},
21
+ "revision": null,
22
+ "target_modules": [
23
+ "k_proj",
24
+ "up_proj",
25
+ "down_proj",
26
+ "q_proj",
27
+ "gate_proj",
28
+ "v_proj",
29
+ "o_proj"
30
+ ],
31
+ "task_type": "CAUSAL_LM",
32
+ "use_dora": false,
33
+ "use_rslora": false
34
+ }
adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a8d74db78cede6a8d0f55f6e138c4d042f5e04e56667fad86df0f8b21da9e901
3
+ size 161622314
added_tokens.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "<|endoftext|>": 151643,
3
+ "<|im_end|>": 151645,
4
+ "<|im_start|>": 151644
5
+ }
checkpoint-150/README.md ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: /workspace/data/models/Qwen2-7B
3
+ library_name: peft
4
+ ---
5
+
6
+ # Model Card for Model ID
7
+
8
+ <!-- Provide a quick summary of what the model is/does. -->
9
+
10
+
11
+
12
+ ## Model Details
13
+
14
+ ### Model Description
15
+
16
+ <!-- Provide a longer summary of what this model is. -->
17
+
18
+
19
+
20
+ - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
+ - **Shared by [optional]:** [More Information Needed]
23
+ - **Model type:** [More Information Needed]
24
+ - **Language(s) (NLP):** [More Information Needed]
25
+ - **License:** [More Information Needed]
26
+ - **Finetuned from model [optional]:** [More Information Needed]
27
+
28
+ ### Model Sources [optional]
29
+
30
+ <!-- Provide the basic links for the model. -->
31
+
32
+ - **Repository:** [More Information Needed]
33
+ - **Paper [optional]:** [More Information Needed]
34
+ - **Demo [optional]:** [More Information Needed]
35
+
36
+ ## Uses
37
+
38
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
+
40
+ ### Direct Use
41
+
42
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
+
44
+ [More Information Needed]
45
+
46
+ ### Downstream Use [optional]
47
+
48
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
+
50
+ [More Information Needed]
51
+
52
+ ### Out-of-Scope Use
53
+
54
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
+
56
+ [More Information Needed]
57
+
58
+ ## Bias, Risks, and Limitations
59
+
60
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
+
62
+ [More Information Needed]
63
+
64
+ ### Recommendations
65
+
66
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
+
68
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
+
70
+ ## How to Get Started with the Model
71
+
72
+ Use the code below to get started with the model.
73
+
74
+ [More Information Needed]
75
+
76
+ ## Training Details
77
+
78
+ ### Training Data
79
+
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
+
82
+ [More Information Needed]
83
+
84
+ ### Training Procedure
85
+
86
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
+
88
+ #### Preprocessing [optional]
89
+
90
+ [More Information Needed]
91
+
92
+
93
+ #### Training Hyperparameters
94
+
95
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
+
97
+ #### Speeds, Sizes, Times [optional]
98
+
99
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
+
101
+ [More Information Needed]
102
+
103
+ ## Evaluation
104
+
105
+ <!-- This section describes the evaluation protocols and provides the results. -->
106
+
107
+ ### Testing Data, Factors & Metrics
108
+
109
+ #### Testing Data
110
+
111
+ <!-- This should link to a Dataset Card if possible. -->
112
+
113
+ [More Information Needed]
114
+
115
+ #### Factors
116
+
117
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
+
119
+ [More Information Needed]
120
+
121
+ #### Metrics
122
+
123
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
+
125
+ [More Information Needed]
126
+
127
+ ### Results
128
+
129
+ [More Information Needed]
130
+
131
+ #### Summary
132
+
133
+
134
+
135
+ ## Model Examination [optional]
136
+
137
+ <!-- Relevant interpretability work for the model goes here -->
138
+
139
+ [More Information Needed]
140
+
141
+ ## Environmental Impact
142
+
143
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
+
145
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
+
147
+ - **Hardware Type:** [More Information Needed]
148
+ - **Hours used:** [More Information Needed]
149
+ - **Cloud Provider:** [More Information Needed]
150
+ - **Compute Region:** [More Information Needed]
151
+ - **Carbon Emitted:** [More Information Needed]
152
+
153
+ ## Technical Specifications [optional]
154
+
155
+ ### Model Architecture and Objective
156
+
157
+ [More Information Needed]
158
+
159
+ ### Compute Infrastructure
160
+
161
+ [More Information Needed]
162
+
163
+ #### Hardware
164
+
165
+ [More Information Needed]
166
+
167
+ #### Software
168
+
169
+ [More Information Needed]
170
+
171
+ ## Citation [optional]
172
+
173
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
+
175
+ **BibTeX:**
176
+
177
+ [More Information Needed]
178
+
179
+ **APA:**
180
+
181
+ [More Information Needed]
182
+
183
+ ## Glossary [optional]
184
+
185
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
+
187
+ [More Information Needed]
188
+
189
+ ## More Information [optional]
190
+
191
+ [More Information Needed]
192
+
193
+ ## Model Card Authors [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Contact
198
+
199
+ [More Information Needed]
200
+ ### Framework versions
201
+
202
+ - PEFT 0.11.1
checkpoint-150/adapter_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "/workspace/data/models/Qwen2-7B",
5
+ "bias": "none",
6
+ "fan_in_fan_out": null,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 16,
14
+ "lora_dropout": 0.05,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": null,
18
+ "peft_type": "LORA",
19
+ "r": 32,
20
+ "rank_pattern": {},
21
+ "revision": null,
22
+ "target_modules": [
23
+ "k_proj",
24
+ "up_proj",
25
+ "down_proj",
26
+ "q_proj",
27
+ "gate_proj",
28
+ "v_proj",
29
+ "o_proj"
30
+ ],
31
+ "task_type": "CAUSAL_LM",
32
+ "use_dora": false,
33
+ "use_rslora": false
34
+ }
checkpoint-150/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d85c16b2611b53132f65205d8cc2cfea6dd4535a080257570c53d1bca38a406b
3
+ size 161533584
checkpoint-150/added_tokens.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "<|endoftext|>": 151643,
3
+ "<|im_end|>": 151645,
4
+ "<|im_start|>": 151644
5
+ }
checkpoint-150/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-150/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af7fd609322fa7c6d8c35bdebd39fe0c9275a5845980d682fb787cde90bee591
3
+ size 323292010
checkpoint-150/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a99d4192b32fbb6795950b98e8cadf12766e63518543fb1e2091e7f45f3c5e77
3
+ size 14244
checkpoint-150/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3019b93b7cc8175e4ec5a34d768cfc964cc70a5a29c48f62e0f050568322c02f
3
+ size 1064
checkpoint-150/special_tokens_map.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>"
5
+ ],
6
+ "eos_token": {
7
+ "content": "<|im_end|>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false
12
+ },
13
+ "pad_token": {
14
+ "content": "<|endoftext|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false
19
+ }
20
+ }
checkpoint-150/tokenizer_config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "151643": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "151644": {
13
+ "content": "<|im_start|>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "151645": {
21
+ "content": "<|im_end|>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ }
28
+ },
29
+ "additional_special_tokens": [
30
+ "<|im_start|>",
31
+ "<|im_end|>"
32
+ ],
33
+ "bos_token": null,
34
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
35
+ "clean_up_tokenization_spaces": false,
36
+ "eos_token": "<|im_end|>",
37
+ "errors": "replace",
38
+ "model_max_length": 32768,
39
+ "pad_token": "<|endoftext|>",
40
+ "split_special_tokens": false,
41
+ "tokenizer_class": "Qwen2Tokenizer",
42
+ "unk_token": null,
43
+ "use_fast": true
44
+ }
checkpoint-150/trainer_state.json ADDED
@@ -0,0 +1,1131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 2.5042194092827,
5
+ "eval_steps": 30,
6
+ "global_step": 150,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.016877637130801686,
13
+ "grad_norm": 0.08447265625,
14
+ "learning_rate": 3e-06,
15
+ "loss": 1.1751,
16
+ "step": 1
17
+ },
18
+ {
19
+ "epoch": 0.016877637130801686,
20
+ "eval_loss": 1.185997486114502,
21
+ "eval_runtime": 72.8223,
22
+ "eval_samples_per_second": 14.144,
23
+ "eval_steps_per_second": 14.144,
24
+ "step": 1
25
+ },
26
+ {
27
+ "epoch": 0.03375527426160337,
28
+ "grad_norm": 0.08544921875,
29
+ "learning_rate": 6e-06,
30
+ "loss": 1.1683,
31
+ "step": 2
32
+ },
33
+ {
34
+ "epoch": 0.05063291139240506,
35
+ "grad_norm": 0.0830078125,
36
+ "learning_rate": 9e-06,
37
+ "loss": 1.1737,
38
+ "step": 3
39
+ },
40
+ {
41
+ "epoch": 0.06751054852320675,
42
+ "grad_norm": 0.08544921875,
43
+ "learning_rate": 1.2e-05,
44
+ "loss": 1.1889,
45
+ "step": 4
46
+ },
47
+ {
48
+ "epoch": 0.08438818565400844,
49
+ "grad_norm": 0.0849609375,
50
+ "learning_rate": 1.5e-05,
51
+ "loss": 1.1619,
52
+ "step": 5
53
+ },
54
+ {
55
+ "epoch": 0.10126582278481013,
56
+ "grad_norm": 0.0869140625,
57
+ "learning_rate": 1.8e-05,
58
+ "loss": 1.1815,
59
+ "step": 6
60
+ },
61
+ {
62
+ "epoch": 0.11814345991561181,
63
+ "grad_norm": 0.08447265625,
64
+ "learning_rate": 2.1e-05,
65
+ "loss": 1.1726,
66
+ "step": 7
67
+ },
68
+ {
69
+ "epoch": 0.1350210970464135,
70
+ "grad_norm": 0.08740234375,
71
+ "learning_rate": 2.4e-05,
72
+ "loss": 1.1701,
73
+ "step": 8
74
+ },
75
+ {
76
+ "epoch": 0.1518987341772152,
77
+ "grad_norm": 0.08642578125,
78
+ "learning_rate": 2.7000000000000002e-05,
79
+ "loss": 1.1818,
80
+ "step": 9
81
+ },
82
+ {
83
+ "epoch": 0.16877637130801687,
84
+ "grad_norm": 0.08935546875,
85
+ "learning_rate": 3e-05,
86
+ "loss": 1.1896,
87
+ "step": 10
88
+ },
89
+ {
90
+ "epoch": 0.18565400843881857,
91
+ "grad_norm": 0.0888671875,
92
+ "learning_rate": 2.999855077059572e-05,
93
+ "loss": 1.1873,
94
+ "step": 11
95
+ },
96
+ {
97
+ "epoch": 0.20253164556962025,
98
+ "grad_norm": 0.08984375,
99
+ "learning_rate": 2.9994203362418313e-05,
100
+ "loss": 1.1838,
101
+ "step": 12
102
+ },
103
+ {
104
+ "epoch": 0.21940928270042195,
105
+ "grad_norm": 0.0859375,
106
+ "learning_rate": 2.998695861552002e-05,
107
+ "loss": 1.1569,
108
+ "step": 13
109
+ },
110
+ {
111
+ "epoch": 0.23628691983122363,
112
+ "grad_norm": 0.08642578125,
113
+ "learning_rate": 2.9976817929807542e-05,
114
+ "loss": 1.1595,
115
+ "step": 14
116
+ },
117
+ {
118
+ "epoch": 0.25316455696202533,
119
+ "grad_norm": 0.078125,
120
+ "learning_rate": 2.996378326477153e-05,
121
+ "loss": 1.1348,
122
+ "step": 15
123
+ },
124
+ {
125
+ "epoch": 0.270042194092827,
126
+ "grad_norm": 0.07470703125,
127
+ "learning_rate": 2.9947857139107964e-05,
128
+ "loss": 1.1434,
129
+ "step": 16
130
+ },
131
+ {
132
+ "epoch": 0.2869198312236287,
133
+ "grad_norm": 0.140625,
134
+ "learning_rate": 2.992904263023146e-05,
135
+ "loss": 1.8213,
136
+ "step": 17
137
+ },
138
+ {
139
+ "epoch": 0.3037974683544304,
140
+ "grad_norm": 0.06787109375,
141
+ "learning_rate": 2.990734337368062e-05,
142
+ "loss": 1.1289,
143
+ "step": 18
144
+ },
145
+ {
146
+ "epoch": 0.3206751054852321,
147
+ "grad_norm": 0.0654296875,
148
+ "learning_rate": 2.9882763562415518e-05,
149
+ "loss": 1.1106,
150
+ "step": 19
151
+ },
152
+ {
153
+ "epoch": 0.33755274261603374,
154
+ "grad_norm": 0.06640625,
155
+ "learning_rate": 2.9855307946007532e-05,
156
+ "loss": 1.1381,
157
+ "step": 20
158
+ },
159
+ {
160
+ "epoch": 0.35443037974683544,
161
+ "grad_norm": 0.064453125,
162
+ "learning_rate": 2.982498182972154e-05,
163
+ "loss": 1.1192,
164
+ "step": 21
165
+ },
166
+ {
167
+ "epoch": 0.37130801687763715,
168
+ "grad_norm": 0.0615234375,
169
+ "learning_rate": 2.9791791073490795e-05,
170
+ "loss": 1.1105,
171
+ "step": 22
172
+ },
173
+ {
174
+ "epoch": 0.3881856540084388,
175
+ "grad_norm": 0.061767578125,
176
+ "learning_rate": 2.9755742090784617e-05,
177
+ "loss": 1.1207,
178
+ "step": 23
179
+ },
180
+ {
181
+ "epoch": 0.4050632911392405,
182
+ "grad_norm": 0.0634765625,
183
+ "learning_rate": 2.9716841847369106e-05,
184
+ "loss": 1.1083,
185
+ "step": 24
186
+ },
187
+ {
188
+ "epoch": 0.4219409282700422,
189
+ "grad_norm": 0.058349609375,
190
+ "learning_rate": 2.967509785996114e-05,
191
+ "loss": 1.1007,
192
+ "step": 25
193
+ },
194
+ {
195
+ "epoch": 0.4388185654008439,
196
+ "grad_norm": 0.0615234375,
197
+ "learning_rate": 2.963051819477592e-05,
198
+ "loss": 1.0842,
199
+ "step": 26
200
+ },
201
+ {
202
+ "epoch": 0.45569620253164556,
203
+ "grad_norm": 0.0625,
204
+ "learning_rate": 2.958311146596833e-05,
205
+ "loss": 1.0961,
206
+ "step": 27
207
+ },
208
+ {
209
+ "epoch": 0.47257383966244726,
210
+ "grad_norm": 0.05908203125,
211
+ "learning_rate": 2.953288683396841e-05,
212
+ "loss": 1.109,
213
+ "step": 28
214
+ },
215
+ {
216
+ "epoch": 0.48945147679324896,
217
+ "grad_norm": 0.061767578125,
218
+ "learning_rate": 2.9479854003711298e-05,
219
+ "loss": 1.0789,
220
+ "step": 29
221
+ },
222
+ {
223
+ "epoch": 0.5063291139240507,
224
+ "grad_norm": 0.06103515625,
225
+ "learning_rate": 2.9424023222761938e-05,
226
+ "loss": 1.1007,
227
+ "step": 30
228
+ },
229
+ {
230
+ "epoch": 0.5063291139240507,
231
+ "eval_loss": 1.091182827949524,
232
+ "eval_runtime": 74.0135,
233
+ "eval_samples_per_second": 13.916,
234
+ "eval_steps_per_second": 13.916,
235
+ "step": 30
236
+ },
237
+ {
238
+ "epoch": 0.5232067510548524,
239
+ "grad_norm": 0.059814453125,
240
+ "learning_rate": 2.9365405279334904e-05,
241
+ "loss": 1.0756,
242
+ "step": 31
243
+ },
244
+ {
245
+ "epoch": 0.540084388185654,
246
+ "grad_norm": 0.056396484375,
247
+ "learning_rate": 2.930401150020983e-05,
248
+ "loss": 1.0939,
249
+ "step": 32
250
+ },
251
+ {
252
+ "epoch": 0.5569620253164557,
253
+ "grad_norm": 0.05712890625,
254
+ "learning_rate": 2.9239853748542717e-05,
255
+ "loss": 1.0901,
256
+ "step": 33
257
+ },
258
+ {
259
+ "epoch": 0.5738396624472574,
260
+ "grad_norm": 0.054931640625,
261
+ "learning_rate": 2.9172944421573587e-05,
262
+ "loss": 1.0873,
263
+ "step": 34
264
+ },
265
+ {
266
+ "epoch": 0.5907172995780591,
267
+ "grad_norm": 0.0546875,
268
+ "learning_rate": 2.9103296448230986e-05,
269
+ "loss": 1.0584,
270
+ "step": 35
271
+ },
272
+ {
273
+ "epoch": 0.6075949367088608,
274
+ "grad_norm": 0.0556640625,
275
+ "learning_rate": 2.9030923286633703e-05,
276
+ "loss": 1.0692,
277
+ "step": 36
278
+ },
279
+ {
280
+ "epoch": 0.6244725738396625,
281
+ "grad_norm": 0.16015625,
282
+ "learning_rate": 2.8955838921490252e-05,
283
+ "loss": 1.782,
284
+ "step": 37
285
+ },
286
+ {
287
+ "epoch": 0.6413502109704642,
288
+ "grad_norm": 0.052001953125,
289
+ "learning_rate": 2.8878057861396606e-05,
290
+ "loss": 1.0667,
291
+ "step": 38
292
+ },
293
+ {
294
+ "epoch": 0.6582278481012658,
295
+ "grad_norm": 0.052001953125,
296
+ "learning_rate": 2.8797595136032675e-05,
297
+ "loss": 1.0656,
298
+ "step": 39
299
+ },
300
+ {
301
+ "epoch": 0.6751054852320675,
302
+ "grad_norm": 0.052734375,
303
+ "learning_rate": 2.8714466293258142e-05,
304
+ "loss": 1.0736,
305
+ "step": 40
306
+ },
307
+ {
308
+ "epoch": 0.6919831223628692,
309
+ "grad_norm": 0.053955078125,
310
+ "learning_rate": 2.8628687396108107e-05,
311
+ "loss": 1.0638,
312
+ "step": 41
313
+ },
314
+ {
315
+ "epoch": 0.7088607594936709,
316
+ "grad_norm": 0.05224609375,
317
+ "learning_rate": 2.8540275019689237e-05,
318
+ "loss": 1.0746,
319
+ "step": 42
320
+ },
321
+ {
322
+ "epoch": 0.7257383966244726,
323
+ "grad_norm": 0.051513671875,
324
+ "learning_rate": 2.8449246247976947e-05,
325
+ "loss": 1.0608,
326
+ "step": 43
327
+ },
328
+ {
329
+ "epoch": 0.7426160337552743,
330
+ "grad_norm": 0.052001953125,
331
+ "learning_rate": 2.835561867051426e-05,
332
+ "loss": 1.0619,
333
+ "step": 44
334
+ },
335
+ {
336
+ "epoch": 0.759493670886076,
337
+ "grad_norm": 0.051025390625,
338
+ "learning_rate": 2.825941037901294e-05,
339
+ "loss": 1.048,
340
+ "step": 45
341
+ },
342
+ {
343
+ "epoch": 0.7763713080168776,
344
+ "grad_norm": 0.05078125,
345
+ "learning_rate": 2.816063996385765e-05,
346
+ "loss": 1.0761,
347
+ "step": 46
348
+ },
349
+ {
350
+ "epoch": 0.7932489451476793,
351
+ "grad_norm": 0.0498046875,
352
+ "learning_rate": 2.805932651051372e-05,
353
+ "loss": 1.0443,
354
+ "step": 47
355
+ },
356
+ {
357
+ "epoch": 0.810126582278481,
358
+ "grad_norm": 0.051025390625,
359
+ "learning_rate": 2.7955489595839228e-05,
360
+ "loss": 1.0527,
361
+ "step": 48
362
+ },
363
+ {
364
+ "epoch": 0.8270042194092827,
365
+ "grad_norm": 0.052734375,
366
+ "learning_rate": 2.784914928430218e-05,
367
+ "loss": 1.0498,
368
+ "step": 49
369
+ },
370
+ {
371
+ "epoch": 0.8438818565400844,
372
+ "grad_norm": 0.049560546875,
373
+ "learning_rate": 2.7740326124103416e-05,
374
+ "loss": 1.0537,
375
+ "step": 50
376
+ },
377
+ {
378
+ "epoch": 0.8607594936708861,
379
+ "grad_norm": 0.053466796875,
380
+ "learning_rate": 2.762904114320609e-05,
381
+ "loss": 1.0326,
382
+ "step": 51
383
+ },
384
+ {
385
+ "epoch": 0.8776371308016878,
386
+ "grad_norm": 0.04931640625,
387
+ "learning_rate": 2.751531584527241e-05,
388
+ "loss": 1.043,
389
+ "step": 52
390
+ },
391
+ {
392
+ "epoch": 0.8945147679324894,
393
+ "grad_norm": 0.05126953125,
394
+ "learning_rate": 2.7399172205508476e-05,
395
+ "loss": 1.0463,
396
+ "step": 53
397
+ },
398
+ {
399
+ "epoch": 0.9113924050632911,
400
+ "grad_norm": 0.0517578125,
401
+ "learning_rate": 2.7280632666418013e-05,
402
+ "loss": 1.0476,
403
+ "step": 54
404
+ },
405
+ {
406
+ "epoch": 0.9282700421940928,
407
+ "grad_norm": 0.053466796875,
408
+ "learning_rate": 2.715972013346576e-05,
409
+ "loss": 1.0467,
410
+ "step": 55
411
+ },
412
+ {
413
+ "epoch": 0.9451476793248945,
414
+ "grad_norm": 0.0498046875,
415
+ "learning_rate": 2.703645797065147e-05,
416
+ "loss": 1.0467,
417
+ "step": 56
418
+ },
419
+ {
420
+ "epoch": 0.9620253164556962,
421
+ "grad_norm": 0.05078125,
422
+ "learning_rate": 2.6910869995995247e-05,
423
+ "loss": 1.05,
424
+ "step": 57
425
+ },
426
+ {
427
+ "epoch": 0.9789029535864979,
428
+ "grad_norm": 0.053955078125,
429
+ "learning_rate": 2.678298047693518e-05,
430
+ "loss": 1.0453,
431
+ "step": 58
432
+ },
433
+ {
434
+ "epoch": 0.9957805907172996,
435
+ "grad_norm": 0.050048828125,
436
+ "learning_rate": 2.6652814125638142e-05,
437
+ "loss": 1.0348,
438
+ "step": 59
439
+ },
440
+ {
441
+ "epoch": 1.0126582278481013,
442
+ "grad_norm": 0.052490234375,
443
+ "learning_rate": 2.652039609422463e-05,
444
+ "loss": 1.0418,
445
+ "step": 60
446
+ },
447
+ {
448
+ "epoch": 1.0126582278481013,
449
+ "eval_loss": 1.0428293943405151,
450
+ "eval_runtime": 74.0734,
451
+ "eval_samples_per_second": 13.905,
452
+ "eval_steps_per_second": 13.905,
453
+ "step": 60
454
+ },
455
+ {
456
+ "epoch": 1.0147679324894514,
457
+ "grad_norm": 0.0732421875,
458
+ "learning_rate": 2.638575196990862e-05,
459
+ "loss": 1.0194,
460
+ "step": 61
461
+ },
462
+ {
463
+ "epoch": 1.0316455696202531,
464
+ "grad_norm": 0.049560546875,
465
+ "learning_rate": 2.624890777005332e-05,
466
+ "loss": 1.0365,
467
+ "step": 62
468
+ },
469
+ {
470
+ "epoch": 1.0485232067510548,
471
+ "grad_norm": 0.05126953125,
472
+ "learning_rate": 2.6109889937143828e-05,
473
+ "loss": 1.0426,
474
+ "step": 63
475
+ },
476
+ {
477
+ "epoch": 1.0654008438818565,
478
+ "grad_norm": 0.05126953125,
479
+ "learning_rate": 2.5968725333677628e-05,
480
+ "loss": 1.043,
481
+ "step": 64
482
+ },
483
+ {
484
+ "epoch": 1.0822784810126582,
485
+ "grad_norm": 0.051513671875,
486
+ "learning_rate": 2.582544123697395e-05,
487
+ "loss": 1.0243,
488
+ "step": 65
489
+ },
490
+ {
491
+ "epoch": 1.09915611814346,
492
+ "grad_norm": 0.0517578125,
493
+ "learning_rate": 2.568006533390295e-05,
494
+ "loss": 1.0258,
495
+ "step": 66
496
+ },
497
+ {
498
+ "epoch": 1.1160337552742616,
499
+ "grad_norm": 0.050537109375,
500
+ "learning_rate": 2.5532625715535733e-05,
501
+ "loss": 1.0248,
502
+ "step": 67
503
+ },
504
+ {
505
+ "epoch": 1.1329113924050633,
506
+ "grad_norm": 0.0498046875,
507
+ "learning_rate": 2.5383150871716342e-05,
508
+ "loss": 1.0083,
509
+ "step": 68
510
+ },
511
+ {
512
+ "epoch": 1.149789029535865,
513
+ "grad_norm": 0.05224609375,
514
+ "learning_rate": 2.5231669685556636e-05,
515
+ "loss": 1.0207,
516
+ "step": 69
517
+ },
518
+ {
519
+ "epoch": 1.1666666666666667,
520
+ "grad_norm": 0.051513671875,
521
+ "learning_rate": 2.507821142785516e-05,
522
+ "loss": 1.0435,
523
+ "step": 70
524
+ },
525
+ {
526
+ "epoch": 1.1835443037974684,
527
+ "grad_norm": 0.05224609375,
528
+ "learning_rate": 2.4922805751441174e-05,
529
+ "loss": 1.0354,
530
+ "step": 71
531
+ },
532
+ {
533
+ "epoch": 1.20042194092827,
534
+ "grad_norm": 0.054443359375,
535
+ "learning_rate": 2.4765482685444786e-05,
536
+ "loss": 1.0266,
537
+ "step": 72
538
+ },
539
+ {
540
+ "epoch": 1.2172995780590719,
541
+ "grad_norm": 0.05126953125,
542
+ "learning_rate": 2.460627262949443e-05,
543
+ "loss": 1.0411,
544
+ "step": 73
545
+ },
546
+ {
547
+ "epoch": 1.2341772151898733,
548
+ "grad_norm": 0.05126953125,
549
+ "learning_rate": 2.4445206347842714e-05,
550
+ "loss": 1.0224,
551
+ "step": 74
552
+ },
553
+ {
554
+ "epoch": 1.251054852320675,
555
+ "grad_norm": 0.052978515625,
556
+ "learning_rate": 2.428231496342181e-05,
557
+ "loss": 1.0253,
558
+ "step": 75
559
+ },
560
+ {
561
+ "epoch": 1.2679324894514767,
562
+ "grad_norm": 0.05322265625,
563
+ "learning_rate": 2.4117629951829602e-05,
564
+ "loss": 1.0298,
565
+ "step": 76
566
+ },
567
+ {
568
+ "epoch": 1.2848101265822784,
569
+ "grad_norm": 0.052490234375,
570
+ "learning_rate": 2.395118313524758e-05,
571
+ "loss": 1.0239,
572
+ "step": 77
573
+ },
574
+ {
575
+ "epoch": 1.3016877637130801,
576
+ "grad_norm": 0.0537109375,
577
+ "learning_rate": 2.3783006676291866e-05,
578
+ "loss": 1.0212,
579
+ "step": 78
580
+ },
581
+ {
582
+ "epoch": 1.3185654008438819,
583
+ "grad_norm": 0.052734375,
584
+ "learning_rate": 2.361313307179837e-05,
585
+ "loss": 1.0371,
586
+ "step": 79
587
+ },
588
+ {
589
+ "epoch": 1.3354430379746836,
590
+ "grad_norm": 0.05126953125,
591
+ "learning_rate": 2.3441595146543458e-05,
592
+ "loss": 1.0314,
593
+ "step": 80
594
+ },
595
+ {
596
+ "epoch": 1.3523206751054853,
597
+ "grad_norm": 0.052001953125,
598
+ "learning_rate": 2.3268426046901153e-05,
599
+ "loss": 1.0195,
600
+ "step": 81
601
+ },
602
+ {
603
+ "epoch": 1.369198312236287,
604
+ "grad_norm": 0.052978515625,
605
+ "learning_rate": 2.3093659234438266e-05,
606
+ "loss": 1.0219,
607
+ "step": 82
608
+ },
609
+ {
610
+ "epoch": 1.3860759493670887,
611
+ "grad_norm": 0.054931640625,
612
+ "learning_rate": 2.291732847944861e-05,
613
+ "loss": 1.0293,
614
+ "step": 83
615
+ },
616
+ {
617
+ "epoch": 1.4029535864978904,
618
+ "grad_norm": 0.052734375,
619
+ "learning_rate": 2.2739467854427512e-05,
620
+ "loss": 0.9992,
621
+ "step": 84
622
+ },
623
+ {
624
+ "epoch": 1.4198312236286919,
625
+ "grad_norm": 0.05224609375,
626
+ "learning_rate": 2.2560111727488e-05,
627
+ "loss": 1.0254,
628
+ "step": 85
629
+ },
630
+ {
631
+ "epoch": 1.4367088607594938,
632
+ "grad_norm": 0.0546875,
633
+ "learning_rate": 2.237929475571979e-05,
634
+ "loss": 1.0148,
635
+ "step": 86
636
+ },
637
+ {
638
+ "epoch": 1.4535864978902953,
639
+ "grad_norm": 0.055419921875,
640
+ "learning_rate": 2.219705187849254e-05,
641
+ "loss": 1.0228,
642
+ "step": 87
643
+ },
644
+ {
645
+ "epoch": 1.4704641350210972,
646
+ "grad_norm": 0.05810546875,
647
+ "learning_rate": 2.2013418310704422e-05,
648
+ "loss": 1.021,
649
+ "step": 88
650
+ },
651
+ {
652
+ "epoch": 1.4873417721518987,
653
+ "grad_norm": 0.0537109375,
654
+ "learning_rate": 2.1828429535977585e-05,
655
+ "loss": 1.0352,
656
+ "step": 89
657
+ },
658
+ {
659
+ "epoch": 1.5042194092827004,
660
+ "grad_norm": 0.054931640625,
661
+ "learning_rate": 2.1642121299801594e-05,
662
+ "loss": 1.0105,
663
+ "step": 90
664
+ },
665
+ {
666
+ "epoch": 1.5042194092827004,
667
+ "eval_loss": 1.0232045650482178,
668
+ "eval_runtime": 74.0589,
669
+ "eval_samples_per_second": 13.908,
670
+ "eval_steps_per_second": 13.908,
671
+ "step": 90
672
+ },
673
+ {
674
+ "epoch": 1.521097046413502,
675
+ "grad_norm": 0.054931640625,
676
+ "learning_rate": 2.1454529602626336e-05,
677
+ "loss": 1.0051,
678
+ "step": 91
679
+ },
680
+ {
681
+ "epoch": 1.5379746835443038,
682
+ "grad_norm": 0.0546875,
683
+ "learning_rate": 2.126569069290562e-05,
684
+ "loss": 1.023,
685
+ "step": 92
686
+ },
687
+ {
688
+ "epoch": 1.5548523206751055,
689
+ "grad_norm": 0.0546875,
690
+ "learning_rate": 2.107564106009286e-05,
691
+ "loss": 1.012,
692
+ "step": 93
693
+ },
694
+ {
695
+ "epoch": 1.5717299578059072,
696
+ "grad_norm": 0.0546875,
697
+ "learning_rate": 2.0884417427590217e-05,
698
+ "loss": 1.0136,
699
+ "step": 94
700
+ },
701
+ {
702
+ "epoch": 1.5886075949367089,
703
+ "grad_norm": 0.0546875,
704
+ "learning_rate": 2.0692056745652483e-05,
705
+ "loss": 1.0194,
706
+ "step": 95
707
+ },
708
+ {
709
+ "epoch": 1.6054852320675106,
710
+ "grad_norm": 0.0556640625,
711
+ "learning_rate": 2.0498596184247196e-05,
712
+ "loss": 1.0089,
713
+ "step": 96
714
+ },
715
+ {
716
+ "epoch": 1.6223628691983123,
717
+ "grad_norm": 0.054931640625,
718
+ "learning_rate": 2.030407312587224e-05,
719
+ "loss": 1.0226,
720
+ "step": 97
721
+ },
722
+ {
723
+ "epoch": 1.6392405063291138,
724
+ "grad_norm": 0.058349609375,
725
+ "learning_rate": 2.010852515833242e-05,
726
+ "loss": 1.0219,
727
+ "step": 98
728
+ },
729
+ {
730
+ "epoch": 1.6561181434599157,
731
+ "grad_norm": 0.0556640625,
732
+ "learning_rate": 1.9911990067476336e-05,
733
+ "loss": 1.0035,
734
+ "step": 99
735
+ },
736
+ {
737
+ "epoch": 1.6729957805907172,
738
+ "grad_norm": 0.055908203125,
739
+ "learning_rate": 1.9714505829895004e-05,
740
+ "loss": 1.0052,
741
+ "step": 100
742
+ },
743
+ {
744
+ "epoch": 1.689873417721519,
745
+ "grad_norm": 0.05712890625,
746
+ "learning_rate": 1.951611060558363e-05,
747
+ "loss": 1.0175,
748
+ "step": 101
749
+ },
750
+ {
751
+ "epoch": 1.7067510548523206,
752
+ "grad_norm": 0.056396484375,
753
+ "learning_rate": 1.9316842730567902e-05,
754
+ "loss": 1.0099,
755
+ "step": 102
756
+ },
757
+ {
758
+ "epoch": 1.7236286919831225,
759
+ "grad_norm": 0.05615234375,
760
+ "learning_rate": 1.9116740709496334e-05,
761
+ "loss": 0.998,
762
+ "step": 103
763
+ },
764
+ {
765
+ "epoch": 1.740506329113924,
766
+ "grad_norm": 0.05615234375,
767
+ "learning_rate": 1.8915843208199967e-05,
768
+ "loss": 0.996,
769
+ "step": 104
770
+ },
771
+ {
772
+ "epoch": 1.7573839662447257,
773
+ "grad_norm": 0.058837890625,
774
+ "learning_rate": 1.8714189046220946e-05,
775
+ "loss": 1.009,
776
+ "step": 105
777
+ },
778
+ {
779
+ "epoch": 1.7742616033755274,
780
+ "grad_norm": 0.059326171875,
781
+ "learning_rate": 1.851181718931141e-05,
782
+ "loss": 1.0127,
783
+ "step": 106
784
+ },
785
+ {
786
+ "epoch": 1.7911392405063291,
787
+ "grad_norm": 0.0556640625,
788
+ "learning_rate": 1.830876674190411e-05,
789
+ "loss": 1.0107,
790
+ "step": 107
791
+ },
792
+ {
793
+ "epoch": 1.8080168776371308,
794
+ "grad_norm": 0.06201171875,
795
+ "learning_rate": 1.8105076939556238e-05,
796
+ "loss": 1.0264,
797
+ "step": 108
798
+ },
799
+ {
800
+ "epoch": 1.8248945147679325,
801
+ "grad_norm": 0.056884765625,
802
+ "learning_rate": 1.790078714136792e-05,
803
+ "loss": 1.0068,
804
+ "step": 109
805
+ },
806
+ {
807
+ "epoch": 1.8417721518987342,
808
+ "grad_norm": 0.056640625,
809
+ "learning_rate": 1.769593682237682e-05,
810
+ "loss": 1.0094,
811
+ "step": 110
812
+ },
813
+ {
814
+ "epoch": 1.8586497890295357,
815
+ "grad_norm": 0.057861328125,
816
+ "learning_rate": 1.7490565565930382e-05,
817
+ "loss": 1.0135,
818
+ "step": 111
819
+ },
820
+ {
821
+ "epoch": 1.8755274261603376,
822
+ "grad_norm": 0.05859375,
823
+ "learning_rate": 1.7284713056037074e-05,
824
+ "loss": 0.993,
825
+ "step": 112
826
+ },
827
+ {
828
+ "epoch": 1.8924050632911391,
829
+ "grad_norm": 0.056884765625,
830
+ "learning_rate": 1.7078419069698283e-05,
831
+ "loss": 1.015,
832
+ "step": 113
833
+ },
834
+ {
835
+ "epoch": 1.909282700421941,
836
+ "grad_norm": 0.055908203125,
837
+ "learning_rate": 1.687172346922213e-05,
838
+ "loss": 1.0043,
839
+ "step": 114
840
+ },
841
+ {
842
+ "epoch": 1.9261603375527425,
843
+ "grad_norm": 0.05859375,
844
+ "learning_rate": 1.6664666194520873e-05,
845
+ "loss": 0.9959,
846
+ "step": 115
847
+ },
848
+ {
849
+ "epoch": 1.9430379746835444,
850
+ "grad_norm": 0.0625,
851
+ "learning_rate": 1.645728725539329e-05,
852
+ "loss": 1.0177,
853
+ "step": 116
854
+ },
855
+ {
856
+ "epoch": 1.959915611814346,
857
+ "grad_norm": 0.05810546875,
858
+ "learning_rate": 1.6249626723793572e-05,
859
+ "loss": 1.033,
860
+ "step": 117
861
+ },
862
+ {
863
+ "epoch": 1.9767932489451476,
864
+ "grad_norm": 0.059326171875,
865
+ "learning_rate": 1.6041724726088187e-05,
866
+ "loss": 1.0155,
867
+ "step": 118
868
+ },
869
+ {
870
+ "epoch": 1.9936708860759493,
871
+ "grad_norm": 0.060791015625,
872
+ "learning_rate": 1.5833621435302247e-05,
873
+ "loss": 1.0167,
874
+ "step": 119
875
+ },
876
+ {
877
+ "epoch": 2.010548523206751,
878
+ "grad_norm": 0.0595703125,
879
+ "learning_rate": 1.5625357063356825e-05,
880
+ "loss": 1.0082,
881
+ "step": 120
882
+ },
883
+ {
884
+ "epoch": 2.010548523206751,
885
+ "eval_loss": 1.0127062797546387,
886
+ "eval_runtime": 74.0711,
887
+ "eval_samples_per_second": 13.906,
888
+ "eval_steps_per_second": 13.906,
889
+ "step": 120
890
+ },
891
+ {
892
+ "epoch": 2.0147679324894514,
893
+ "grad_norm": 0.1865234375,
894
+ "learning_rate": 1.541697185329881e-05,
895
+ "loss": 1.6592,
896
+ "step": 121
897
+ },
898
+ {
899
+ "epoch": 2.0316455696202533,
900
+ "grad_norm": 0.0595703125,
901
+ "learning_rate": 1.5208506071524727e-05,
902
+ "loss": 1.0041,
903
+ "step": 122
904
+ },
905
+ {
906
+ "epoch": 2.048523206751055,
907
+ "grad_norm": 0.059326171875,
908
+ "learning_rate": 1.5e-05,
909
+ "loss": 1.0116,
910
+ "step": 123
911
+ },
912
+ {
913
+ "epoch": 2.0654008438818567,
914
+ "grad_norm": 0.05859375,
915
+ "learning_rate": 1.4791493928475275e-05,
916
+ "loss": 1.0026,
917
+ "step": 124
918
+ },
919
+ {
920
+ "epoch": 2.0822784810126582,
921
+ "grad_norm": 0.05810546875,
922
+ "learning_rate": 1.4583028146701191e-05,
923
+ "loss": 1.0122,
924
+ "step": 125
925
+ },
926
+ {
927
+ "epoch": 2.0991561181434597,
928
+ "grad_norm": 0.06005859375,
929
+ "learning_rate": 1.437464293664318e-05,
930
+ "loss": 1.0024,
931
+ "step": 126
932
+ },
933
+ {
934
+ "epoch": 2.1160337552742616,
935
+ "grad_norm": 0.060546875,
936
+ "learning_rate": 1.4166378564697757e-05,
937
+ "loss": 1.0092,
938
+ "step": 127
939
+ },
940
+ {
941
+ "epoch": 2.132911392405063,
942
+ "grad_norm": 0.05908203125,
943
+ "learning_rate": 1.3958275273911812e-05,
944
+ "loss": 1.0048,
945
+ "step": 128
946
+ },
947
+ {
948
+ "epoch": 2.149789029535865,
949
+ "grad_norm": 0.058837890625,
950
+ "learning_rate": 1.375037327620643e-05,
951
+ "loss": 0.9921,
952
+ "step": 129
953
+ },
954
+ {
955
+ "epoch": 2.1666666666666665,
956
+ "grad_norm": 0.06005859375,
957
+ "learning_rate": 1.3542712744606712e-05,
958
+ "loss": 1.0117,
959
+ "step": 130
960
+ },
961
+ {
962
+ "epoch": 2.1835443037974684,
963
+ "grad_norm": 0.05859375,
964
+ "learning_rate": 1.3335333805479126e-05,
965
+ "loss": 0.9965,
966
+ "step": 131
967
+ },
968
+ {
969
+ "epoch": 2.20042194092827,
970
+ "grad_norm": 0.059814453125,
971
+ "learning_rate": 1.3128276530777874e-05,
972
+ "loss": 1.0108,
973
+ "step": 132
974
+ },
975
+ {
976
+ "epoch": 2.217299578059072,
977
+ "grad_norm": 0.060791015625,
978
+ "learning_rate": 1.292158093030172e-05,
979
+ "loss": 1.0029,
980
+ "step": 133
981
+ },
982
+ {
983
+ "epoch": 2.2341772151898733,
984
+ "grad_norm": 0.058349609375,
985
+ "learning_rate": 1.2715286943962925e-05,
986
+ "loss": 0.9958,
987
+ "step": 134
988
+ },
989
+ {
990
+ "epoch": 2.2510548523206753,
991
+ "grad_norm": 0.05908203125,
992
+ "learning_rate": 1.2509434434069625e-05,
993
+ "loss": 1.0054,
994
+ "step": 135
995
+ },
996
+ {
997
+ "epoch": 2.2679324894514767,
998
+ "grad_norm": 0.0576171875,
999
+ "learning_rate": 1.2304063177623182e-05,
1000
+ "loss": 0.9837,
1001
+ "step": 136
1002
+ },
1003
+ {
1004
+ "epoch": 2.2848101265822787,
1005
+ "grad_norm": 0.060302734375,
1006
+ "learning_rate": 1.2099212858632083e-05,
1007
+ "loss": 1.0014,
1008
+ "step": 137
1009
+ },
1010
+ {
1011
+ "epoch": 2.30168776371308,
1012
+ "grad_norm": 0.0595703125,
1013
+ "learning_rate": 1.1894923060443763e-05,
1014
+ "loss": 0.9816,
1015
+ "step": 138
1016
+ },
1017
+ {
1018
+ "epoch": 2.318565400843882,
1019
+ "grad_norm": 0.0595703125,
1020
+ "learning_rate": 1.169123325809589e-05,
1021
+ "loss": 1.0021,
1022
+ "step": 139
1023
+ },
1024
+ {
1025
+ "epoch": 2.3354430379746836,
1026
+ "grad_norm": 0.05859375,
1027
+ "learning_rate": 1.1488182810688593e-05,
1028
+ "loss": 1.0005,
1029
+ "step": 140
1030
+ },
1031
+ {
1032
+ "epoch": 2.352320675105485,
1033
+ "grad_norm": 0.06103515625,
1034
+ "learning_rate": 1.1285810953779057e-05,
1035
+ "loss": 1.0167,
1036
+ "step": 141
1037
+ },
1038
+ {
1039
+ "epoch": 2.369198312236287,
1040
+ "grad_norm": 0.059814453125,
1041
+ "learning_rate": 1.1084156791800036e-05,
1042
+ "loss": 1.0146,
1043
+ "step": 142
1044
+ },
1045
+ {
1046
+ "epoch": 2.3860759493670884,
1047
+ "grad_norm": 0.05859375,
1048
+ "learning_rate": 1.0883259290503665e-05,
1049
+ "loss": 1.0024,
1050
+ "step": 143
1051
+ },
1052
+ {
1053
+ "epoch": 2.4029535864978904,
1054
+ "grad_norm": 0.0576171875,
1055
+ "learning_rate": 1.0683157269432097e-05,
1056
+ "loss": 0.9959,
1057
+ "step": 144
1058
+ },
1059
+ {
1060
+ "epoch": 2.419831223628692,
1061
+ "grad_norm": 0.060546875,
1062
+ "learning_rate": 1.0483889394416373e-05,
1063
+ "loss": 1.0059,
1064
+ "step": 145
1065
+ },
1066
+ {
1067
+ "epoch": 2.4367088607594938,
1068
+ "grad_norm": 0.06396484375,
1069
+ "learning_rate": 1.0285494170104996e-05,
1070
+ "loss": 0.983,
1071
+ "step": 146
1072
+ },
1073
+ {
1074
+ "epoch": 2.4535864978902953,
1075
+ "grad_norm": 0.06005859375,
1076
+ "learning_rate": 1.0088009932523664e-05,
1077
+ "loss": 1.0116,
1078
+ "step": 147
1079
+ },
1080
+ {
1081
+ "epoch": 2.470464135021097,
1082
+ "grad_norm": 0.061279296875,
1083
+ "learning_rate": 9.891474841667585e-06,
1084
+ "loss": 1.0136,
1085
+ "step": 148
1086
+ },
1087
+ {
1088
+ "epoch": 2.4873417721518987,
1089
+ "grad_norm": 0.057861328125,
1090
+ "learning_rate": 9.695926874127765e-06,
1091
+ "loss": 0.9937,
1092
+ "step": 149
1093
+ },
1094
+ {
1095
+ "epoch": 2.5042194092827,
1096
+ "grad_norm": 0.0595703125,
1097
+ "learning_rate": 9.501403815752813e-06,
1098
+ "loss": 0.9946,
1099
+ "step": 150
1100
+ },
1101
+ {
1102
+ "epoch": 2.5042194092827,
1103
+ "eval_loss": 1.0074498653411865,
1104
+ "eval_runtime": 74.0482,
1105
+ "eval_samples_per_second": 13.91,
1106
+ "eval_steps_per_second": 13.91,
1107
+ "step": 150
1108
+ }
1109
+ ],
1110
+ "logging_steps": 1,
1111
+ "max_steps": 236,
1112
+ "num_input_tokens_seen": 0,
1113
+ "num_train_epochs": 4,
1114
+ "save_steps": 30,
1115
+ "stateful_callbacks": {
1116
+ "TrainerControl": {
1117
+ "args": {
1118
+ "should_epoch_stop": false,
1119
+ "should_evaluate": false,
1120
+ "should_log": false,
1121
+ "should_save": true,
1122
+ "should_training_stop": false
1123
+ },
1124
+ "attributes": {}
1125
+ }
1126
+ },
1127
+ "total_flos": 4.218043458650112e+17,
1128
+ "train_batch_size": 1,
1129
+ "trial_name": null,
1130
+ "trial_params": null
1131
+ }
checkpoint-150/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:47fe3d8f86dd99270fe973ee57cf8bc56524ac1c04eb16ea3572e34a069a173f
3
+ size 6072
checkpoint-150/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-180/README.md ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: /workspace/data/models/Qwen2-7B
3
+ library_name: peft
4
+ ---
5
+
6
+ # Model Card for Model ID
7
+
8
+ <!-- Provide a quick summary of what the model is/does. -->
9
+
10
+
11
+
12
+ ## Model Details
13
+
14
+ ### Model Description
15
+
16
+ <!-- Provide a longer summary of what this model is. -->
17
+
18
+
19
+
20
+ - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
+ - **Shared by [optional]:** [More Information Needed]
23
+ - **Model type:** [More Information Needed]
24
+ - **Language(s) (NLP):** [More Information Needed]
25
+ - **License:** [More Information Needed]
26
+ - **Finetuned from model [optional]:** [More Information Needed]
27
+
28
+ ### Model Sources [optional]
29
+
30
+ <!-- Provide the basic links for the model. -->
31
+
32
+ - **Repository:** [More Information Needed]
33
+ - **Paper [optional]:** [More Information Needed]
34
+ - **Demo [optional]:** [More Information Needed]
35
+
36
+ ## Uses
37
+
38
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
+
40
+ ### Direct Use
41
+
42
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
+
44
+ [More Information Needed]
45
+
46
+ ### Downstream Use [optional]
47
+
48
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
+
50
+ [More Information Needed]
51
+
52
+ ### Out-of-Scope Use
53
+
54
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
+
56
+ [More Information Needed]
57
+
58
+ ## Bias, Risks, and Limitations
59
+
60
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
+
62
+ [More Information Needed]
63
+
64
+ ### Recommendations
65
+
66
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
+
68
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
+
70
+ ## How to Get Started with the Model
71
+
72
+ Use the code below to get started with the model.
73
+
74
+ [More Information Needed]
75
+
76
+ ## Training Details
77
+
78
+ ### Training Data
79
+
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
+
82
+ [More Information Needed]
83
+
84
+ ### Training Procedure
85
+
86
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
+
88
+ #### Preprocessing [optional]
89
+
90
+ [More Information Needed]
91
+
92
+
93
+ #### Training Hyperparameters
94
+
95
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
+
97
+ #### Speeds, Sizes, Times [optional]
98
+
99
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
+
101
+ [More Information Needed]
102
+
103
+ ## Evaluation
104
+
105
+ <!-- This section describes the evaluation protocols and provides the results. -->
106
+
107
+ ### Testing Data, Factors & Metrics
108
+
109
+ #### Testing Data
110
+
111
+ <!-- This should link to a Dataset Card if possible. -->
112
+
113
+ [More Information Needed]
114
+
115
+ #### Factors
116
+
117
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
+
119
+ [More Information Needed]
120
+
121
+ #### Metrics
122
+
123
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
+
125
+ [More Information Needed]
126
+
127
+ ### Results
128
+
129
+ [More Information Needed]
130
+
131
+ #### Summary
132
+
133
+
134
+
135
+ ## Model Examination [optional]
136
+
137
+ <!-- Relevant interpretability work for the model goes here -->
138
+
139
+ [More Information Needed]
140
+
141
+ ## Environmental Impact
142
+
143
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
+
145
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
+
147
+ - **Hardware Type:** [More Information Needed]
148
+ - **Hours used:** [More Information Needed]
149
+ - **Cloud Provider:** [More Information Needed]
150
+ - **Compute Region:** [More Information Needed]
151
+ - **Carbon Emitted:** [More Information Needed]
152
+
153
+ ## Technical Specifications [optional]
154
+
155
+ ### Model Architecture and Objective
156
+
157
+ [More Information Needed]
158
+
159
+ ### Compute Infrastructure
160
+
161
+ [More Information Needed]
162
+
163
+ #### Hardware
164
+
165
+ [More Information Needed]
166
+
167
+ #### Software
168
+
169
+ [More Information Needed]
170
+
171
+ ## Citation [optional]
172
+
173
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
+
175
+ **BibTeX:**
176
+
177
+ [More Information Needed]
178
+
179
+ **APA:**
180
+
181
+ [More Information Needed]
182
+
183
+ ## Glossary [optional]
184
+
185
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
+
187
+ [More Information Needed]
188
+
189
+ ## More Information [optional]
190
+
191
+ [More Information Needed]
192
+
193
+ ## Model Card Authors [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Contact
198
+
199
+ [More Information Needed]
200
+ ### Framework versions
201
+
202
+ - PEFT 0.11.1
checkpoint-180/adapter_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "/workspace/data/models/Qwen2-7B",
5
+ "bias": "none",
6
+ "fan_in_fan_out": null,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 16,
14
+ "lora_dropout": 0.05,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": null,
18
+ "peft_type": "LORA",
19
+ "r": 32,
20
+ "rank_pattern": {},
21
+ "revision": null,
22
+ "target_modules": [
23
+ "k_proj",
24
+ "up_proj",
25
+ "down_proj",
26
+ "q_proj",
27
+ "gate_proj",
28
+ "v_proj",
29
+ "o_proj"
30
+ ],
31
+ "task_type": "CAUSAL_LM",
32
+ "use_dora": false,
33
+ "use_rslora": false
34
+ }
checkpoint-180/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a933cf7ed41256d49bccda4b1153f6affe2986053033092840b95fa48d9df31e
3
+ size 161533584
checkpoint-180/added_tokens.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "<|endoftext|>": 151643,
3
+ "<|im_end|>": 151645,
4
+ "<|im_start|>": 151644
5
+ }
checkpoint-180/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-180/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:be1dc869bec2cf0aa1ad33c3eeb39c8a620aba198b8c5ebb4e7bb24833ead283
3
+ size 323292010
checkpoint-180/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e3781185f0ef813c9c8a67f316a3f390d519f89baede75c3072348102493359e
3
+ size 14244
checkpoint-180/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7f9a133418374b5458f322f3eadfaa2df68068d4d299ddea0c4f82fc6655354b
3
+ size 1064
checkpoint-180/special_tokens_map.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>"
5
+ ],
6
+ "eos_token": {
7
+ "content": "<|im_end|>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false
12
+ },
13
+ "pad_token": {
14
+ "content": "<|endoftext|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false
19
+ }
20
+ }
checkpoint-180/tokenizer_config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "151643": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "151644": {
13
+ "content": "<|im_start|>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "151645": {
21
+ "content": "<|im_end|>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ }
28
+ },
29
+ "additional_special_tokens": [
30
+ "<|im_start|>",
31
+ "<|im_end|>"
32
+ ],
33
+ "bos_token": null,
34
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
35
+ "clean_up_tokenization_spaces": false,
36
+ "eos_token": "<|im_end|>",
37
+ "errors": "replace",
38
+ "model_max_length": 32768,
39
+ "pad_token": "<|endoftext|>",
40
+ "split_special_tokens": false,
41
+ "tokenizer_class": "Qwen2Tokenizer",
42
+ "unk_token": null,
43
+ "use_fast": true
44
+ }
checkpoint-180/trainer_state.json ADDED
@@ -0,0 +1,1349 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 3.010548523206751,
5
+ "eval_steps": 30,
6
+ "global_step": 180,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.016877637130801686,
13
+ "grad_norm": 0.08447265625,
14
+ "learning_rate": 3e-06,
15
+ "loss": 1.1751,
16
+ "step": 1
17
+ },
18
+ {
19
+ "epoch": 0.016877637130801686,
20
+ "eval_loss": 1.185997486114502,
21
+ "eval_runtime": 72.8223,
22
+ "eval_samples_per_second": 14.144,
23
+ "eval_steps_per_second": 14.144,
24
+ "step": 1
25
+ },
26
+ {
27
+ "epoch": 0.03375527426160337,
28
+ "grad_norm": 0.08544921875,
29
+ "learning_rate": 6e-06,
30
+ "loss": 1.1683,
31
+ "step": 2
32
+ },
33
+ {
34
+ "epoch": 0.05063291139240506,
35
+ "grad_norm": 0.0830078125,
36
+ "learning_rate": 9e-06,
37
+ "loss": 1.1737,
38
+ "step": 3
39
+ },
40
+ {
41
+ "epoch": 0.06751054852320675,
42
+ "grad_norm": 0.08544921875,
43
+ "learning_rate": 1.2e-05,
44
+ "loss": 1.1889,
45
+ "step": 4
46
+ },
47
+ {
48
+ "epoch": 0.08438818565400844,
49
+ "grad_norm": 0.0849609375,
50
+ "learning_rate": 1.5e-05,
51
+ "loss": 1.1619,
52
+ "step": 5
53
+ },
54
+ {
55
+ "epoch": 0.10126582278481013,
56
+ "grad_norm": 0.0869140625,
57
+ "learning_rate": 1.8e-05,
58
+ "loss": 1.1815,
59
+ "step": 6
60
+ },
61
+ {
62
+ "epoch": 0.11814345991561181,
63
+ "grad_norm": 0.08447265625,
64
+ "learning_rate": 2.1e-05,
65
+ "loss": 1.1726,
66
+ "step": 7
67
+ },
68
+ {
69
+ "epoch": 0.1350210970464135,
70
+ "grad_norm": 0.08740234375,
71
+ "learning_rate": 2.4e-05,
72
+ "loss": 1.1701,
73
+ "step": 8
74
+ },
75
+ {
76
+ "epoch": 0.1518987341772152,
77
+ "grad_norm": 0.08642578125,
78
+ "learning_rate": 2.7000000000000002e-05,
79
+ "loss": 1.1818,
80
+ "step": 9
81
+ },
82
+ {
83
+ "epoch": 0.16877637130801687,
84
+ "grad_norm": 0.08935546875,
85
+ "learning_rate": 3e-05,
86
+ "loss": 1.1896,
87
+ "step": 10
88
+ },
89
+ {
90
+ "epoch": 0.18565400843881857,
91
+ "grad_norm": 0.0888671875,
92
+ "learning_rate": 2.999855077059572e-05,
93
+ "loss": 1.1873,
94
+ "step": 11
95
+ },
96
+ {
97
+ "epoch": 0.20253164556962025,
98
+ "grad_norm": 0.08984375,
99
+ "learning_rate": 2.9994203362418313e-05,
100
+ "loss": 1.1838,
101
+ "step": 12
102
+ },
103
+ {
104
+ "epoch": 0.21940928270042195,
105
+ "grad_norm": 0.0859375,
106
+ "learning_rate": 2.998695861552002e-05,
107
+ "loss": 1.1569,
108
+ "step": 13
109
+ },
110
+ {
111
+ "epoch": 0.23628691983122363,
112
+ "grad_norm": 0.08642578125,
113
+ "learning_rate": 2.9976817929807542e-05,
114
+ "loss": 1.1595,
115
+ "step": 14
116
+ },
117
+ {
118
+ "epoch": 0.25316455696202533,
119
+ "grad_norm": 0.078125,
120
+ "learning_rate": 2.996378326477153e-05,
121
+ "loss": 1.1348,
122
+ "step": 15
123
+ },
124
+ {
125
+ "epoch": 0.270042194092827,
126
+ "grad_norm": 0.07470703125,
127
+ "learning_rate": 2.9947857139107964e-05,
128
+ "loss": 1.1434,
129
+ "step": 16
130
+ },
131
+ {
132
+ "epoch": 0.2869198312236287,
133
+ "grad_norm": 0.140625,
134
+ "learning_rate": 2.992904263023146e-05,
135
+ "loss": 1.8213,
136
+ "step": 17
137
+ },
138
+ {
139
+ "epoch": 0.3037974683544304,
140
+ "grad_norm": 0.06787109375,
141
+ "learning_rate": 2.990734337368062e-05,
142
+ "loss": 1.1289,
143
+ "step": 18
144
+ },
145
+ {
146
+ "epoch": 0.3206751054852321,
147
+ "grad_norm": 0.0654296875,
148
+ "learning_rate": 2.9882763562415518e-05,
149
+ "loss": 1.1106,
150
+ "step": 19
151
+ },
152
+ {
153
+ "epoch": 0.33755274261603374,
154
+ "grad_norm": 0.06640625,
155
+ "learning_rate": 2.9855307946007532e-05,
156
+ "loss": 1.1381,
157
+ "step": 20
158
+ },
159
+ {
160
+ "epoch": 0.35443037974683544,
161
+ "grad_norm": 0.064453125,
162
+ "learning_rate": 2.982498182972154e-05,
163
+ "loss": 1.1192,
164
+ "step": 21
165
+ },
166
+ {
167
+ "epoch": 0.37130801687763715,
168
+ "grad_norm": 0.0615234375,
169
+ "learning_rate": 2.9791791073490795e-05,
170
+ "loss": 1.1105,
171
+ "step": 22
172
+ },
173
+ {
174
+ "epoch": 0.3881856540084388,
175
+ "grad_norm": 0.061767578125,
176
+ "learning_rate": 2.9755742090784617e-05,
177
+ "loss": 1.1207,
178
+ "step": 23
179
+ },
180
+ {
181
+ "epoch": 0.4050632911392405,
182
+ "grad_norm": 0.0634765625,
183
+ "learning_rate": 2.9716841847369106e-05,
184
+ "loss": 1.1083,
185
+ "step": 24
186
+ },
187
+ {
188
+ "epoch": 0.4219409282700422,
189
+ "grad_norm": 0.058349609375,
190
+ "learning_rate": 2.967509785996114e-05,
191
+ "loss": 1.1007,
192
+ "step": 25
193
+ },
194
+ {
195
+ "epoch": 0.4388185654008439,
196
+ "grad_norm": 0.0615234375,
197
+ "learning_rate": 2.963051819477592e-05,
198
+ "loss": 1.0842,
199
+ "step": 26
200
+ },
201
+ {
202
+ "epoch": 0.45569620253164556,
203
+ "grad_norm": 0.0625,
204
+ "learning_rate": 2.958311146596833e-05,
205
+ "loss": 1.0961,
206
+ "step": 27
207
+ },
208
+ {
209
+ "epoch": 0.47257383966244726,
210
+ "grad_norm": 0.05908203125,
211
+ "learning_rate": 2.953288683396841e-05,
212
+ "loss": 1.109,
213
+ "step": 28
214
+ },
215
+ {
216
+ "epoch": 0.48945147679324896,
217
+ "grad_norm": 0.061767578125,
218
+ "learning_rate": 2.9479854003711298e-05,
219
+ "loss": 1.0789,
220
+ "step": 29
221
+ },
222
+ {
223
+ "epoch": 0.5063291139240507,
224
+ "grad_norm": 0.06103515625,
225
+ "learning_rate": 2.9424023222761938e-05,
226
+ "loss": 1.1007,
227
+ "step": 30
228
+ },
229
+ {
230
+ "epoch": 0.5063291139240507,
231
+ "eval_loss": 1.091182827949524,
232
+ "eval_runtime": 74.0135,
233
+ "eval_samples_per_second": 13.916,
234
+ "eval_steps_per_second": 13.916,
235
+ "step": 30
236
+ },
237
+ {
238
+ "epoch": 0.5232067510548524,
239
+ "grad_norm": 0.059814453125,
240
+ "learning_rate": 2.9365405279334904e-05,
241
+ "loss": 1.0756,
242
+ "step": 31
243
+ },
244
+ {
245
+ "epoch": 0.540084388185654,
246
+ "grad_norm": 0.056396484375,
247
+ "learning_rate": 2.930401150020983e-05,
248
+ "loss": 1.0939,
249
+ "step": 32
250
+ },
251
+ {
252
+ "epoch": 0.5569620253164557,
253
+ "grad_norm": 0.05712890625,
254
+ "learning_rate": 2.9239853748542717e-05,
255
+ "loss": 1.0901,
256
+ "step": 33
257
+ },
258
+ {
259
+ "epoch": 0.5738396624472574,
260
+ "grad_norm": 0.054931640625,
261
+ "learning_rate": 2.9172944421573587e-05,
262
+ "loss": 1.0873,
263
+ "step": 34
264
+ },
265
+ {
266
+ "epoch": 0.5907172995780591,
267
+ "grad_norm": 0.0546875,
268
+ "learning_rate": 2.9103296448230986e-05,
269
+ "loss": 1.0584,
270
+ "step": 35
271
+ },
272
+ {
273
+ "epoch": 0.6075949367088608,
274
+ "grad_norm": 0.0556640625,
275
+ "learning_rate": 2.9030923286633703e-05,
276
+ "loss": 1.0692,
277
+ "step": 36
278
+ },
279
+ {
280
+ "epoch": 0.6244725738396625,
281
+ "grad_norm": 0.16015625,
282
+ "learning_rate": 2.8955838921490252e-05,
283
+ "loss": 1.782,
284
+ "step": 37
285
+ },
286
+ {
287
+ "epoch": 0.6413502109704642,
288
+ "grad_norm": 0.052001953125,
289
+ "learning_rate": 2.8878057861396606e-05,
290
+ "loss": 1.0667,
291
+ "step": 38
292
+ },
293
+ {
294
+ "epoch": 0.6582278481012658,
295
+ "grad_norm": 0.052001953125,
296
+ "learning_rate": 2.8797595136032675e-05,
297
+ "loss": 1.0656,
298
+ "step": 39
299
+ },
300
+ {
301
+ "epoch": 0.6751054852320675,
302
+ "grad_norm": 0.052734375,
303
+ "learning_rate": 2.8714466293258142e-05,
304
+ "loss": 1.0736,
305
+ "step": 40
306
+ },
307
+ {
308
+ "epoch": 0.6919831223628692,
309
+ "grad_norm": 0.053955078125,
310
+ "learning_rate": 2.8628687396108107e-05,
311
+ "loss": 1.0638,
312
+ "step": 41
313
+ },
314
+ {
315
+ "epoch": 0.7088607594936709,
316
+ "grad_norm": 0.05224609375,
317
+ "learning_rate": 2.8540275019689237e-05,
318
+ "loss": 1.0746,
319
+ "step": 42
320
+ },
321
+ {
322
+ "epoch": 0.7257383966244726,
323
+ "grad_norm": 0.051513671875,
324
+ "learning_rate": 2.8449246247976947e-05,
325
+ "loss": 1.0608,
326
+ "step": 43
327
+ },
328
+ {
329
+ "epoch": 0.7426160337552743,
330
+ "grad_norm": 0.052001953125,
331
+ "learning_rate": 2.835561867051426e-05,
332
+ "loss": 1.0619,
333
+ "step": 44
334
+ },
335
+ {
336
+ "epoch": 0.759493670886076,
337
+ "grad_norm": 0.051025390625,
338
+ "learning_rate": 2.825941037901294e-05,
339
+ "loss": 1.048,
340
+ "step": 45
341
+ },
342
+ {
343
+ "epoch": 0.7763713080168776,
344
+ "grad_norm": 0.05078125,
345
+ "learning_rate": 2.816063996385765e-05,
346
+ "loss": 1.0761,
347
+ "step": 46
348
+ },
349
+ {
350
+ "epoch": 0.7932489451476793,
351
+ "grad_norm": 0.0498046875,
352
+ "learning_rate": 2.805932651051372e-05,
353
+ "loss": 1.0443,
354
+ "step": 47
355
+ },
356
+ {
357
+ "epoch": 0.810126582278481,
358
+ "grad_norm": 0.051025390625,
359
+ "learning_rate": 2.7955489595839228e-05,
360
+ "loss": 1.0527,
361
+ "step": 48
362
+ },
363
+ {
364
+ "epoch": 0.8270042194092827,
365
+ "grad_norm": 0.052734375,
366
+ "learning_rate": 2.784914928430218e-05,
367
+ "loss": 1.0498,
368
+ "step": 49
369
+ },
370
+ {
371
+ "epoch": 0.8438818565400844,
372
+ "grad_norm": 0.049560546875,
373
+ "learning_rate": 2.7740326124103416e-05,
374
+ "loss": 1.0537,
375
+ "step": 50
376
+ },
377
+ {
378
+ "epoch": 0.8607594936708861,
379
+ "grad_norm": 0.053466796875,
380
+ "learning_rate": 2.762904114320609e-05,
381
+ "loss": 1.0326,
382
+ "step": 51
383
+ },
384
+ {
385
+ "epoch": 0.8776371308016878,
386
+ "grad_norm": 0.04931640625,
387
+ "learning_rate": 2.751531584527241e-05,
388
+ "loss": 1.043,
389
+ "step": 52
390
+ },
391
+ {
392
+ "epoch": 0.8945147679324894,
393
+ "grad_norm": 0.05126953125,
394
+ "learning_rate": 2.7399172205508476e-05,
395
+ "loss": 1.0463,
396
+ "step": 53
397
+ },
398
+ {
399
+ "epoch": 0.9113924050632911,
400
+ "grad_norm": 0.0517578125,
401
+ "learning_rate": 2.7280632666418013e-05,
402
+ "loss": 1.0476,
403
+ "step": 54
404
+ },
405
+ {
406
+ "epoch": 0.9282700421940928,
407
+ "grad_norm": 0.053466796875,
408
+ "learning_rate": 2.715972013346576e-05,
409
+ "loss": 1.0467,
410
+ "step": 55
411
+ },
412
+ {
413
+ "epoch": 0.9451476793248945,
414
+ "grad_norm": 0.0498046875,
415
+ "learning_rate": 2.703645797065147e-05,
416
+ "loss": 1.0467,
417
+ "step": 56
418
+ },
419
+ {
420
+ "epoch": 0.9620253164556962,
421
+ "grad_norm": 0.05078125,
422
+ "learning_rate": 2.6910869995995247e-05,
423
+ "loss": 1.05,
424
+ "step": 57
425
+ },
426
+ {
427
+ "epoch": 0.9789029535864979,
428
+ "grad_norm": 0.053955078125,
429
+ "learning_rate": 2.678298047693518e-05,
430
+ "loss": 1.0453,
431
+ "step": 58
432
+ },
433
+ {
434
+ "epoch": 0.9957805907172996,
435
+ "grad_norm": 0.050048828125,
436
+ "learning_rate": 2.6652814125638142e-05,
437
+ "loss": 1.0348,
438
+ "step": 59
439
+ },
440
+ {
441
+ "epoch": 1.0126582278481013,
442
+ "grad_norm": 0.052490234375,
443
+ "learning_rate": 2.652039609422463e-05,
444
+ "loss": 1.0418,
445
+ "step": 60
446
+ },
447
+ {
448
+ "epoch": 1.0126582278481013,
449
+ "eval_loss": 1.0428293943405151,
450
+ "eval_runtime": 74.0734,
451
+ "eval_samples_per_second": 13.905,
452
+ "eval_steps_per_second": 13.905,
453
+ "step": 60
454
+ },
455
+ {
456
+ "epoch": 1.0147679324894514,
457
+ "grad_norm": 0.0732421875,
458
+ "learning_rate": 2.638575196990862e-05,
459
+ "loss": 1.0194,
460
+ "step": 61
461
+ },
462
+ {
463
+ "epoch": 1.0316455696202531,
464
+ "grad_norm": 0.049560546875,
465
+ "learning_rate": 2.624890777005332e-05,
466
+ "loss": 1.0365,
467
+ "step": 62
468
+ },
469
+ {
470
+ "epoch": 1.0485232067510548,
471
+ "grad_norm": 0.05126953125,
472
+ "learning_rate": 2.6109889937143828e-05,
473
+ "loss": 1.0426,
474
+ "step": 63
475
+ },
476
+ {
477
+ "epoch": 1.0654008438818565,
478
+ "grad_norm": 0.05126953125,
479
+ "learning_rate": 2.5968725333677628e-05,
480
+ "loss": 1.043,
481
+ "step": 64
482
+ },
483
+ {
484
+ "epoch": 1.0822784810126582,
485
+ "grad_norm": 0.051513671875,
486
+ "learning_rate": 2.582544123697395e-05,
487
+ "loss": 1.0243,
488
+ "step": 65
489
+ },
490
+ {
491
+ "epoch": 1.09915611814346,
492
+ "grad_norm": 0.0517578125,
493
+ "learning_rate": 2.568006533390295e-05,
494
+ "loss": 1.0258,
495
+ "step": 66
496
+ },
497
+ {
498
+ "epoch": 1.1160337552742616,
499
+ "grad_norm": 0.050537109375,
500
+ "learning_rate": 2.5532625715535733e-05,
501
+ "loss": 1.0248,
502
+ "step": 67
503
+ },
504
+ {
505
+ "epoch": 1.1329113924050633,
506
+ "grad_norm": 0.0498046875,
507
+ "learning_rate": 2.5383150871716342e-05,
508
+ "loss": 1.0083,
509
+ "step": 68
510
+ },
511
+ {
512
+ "epoch": 1.149789029535865,
513
+ "grad_norm": 0.05224609375,
514
+ "learning_rate": 2.5231669685556636e-05,
515
+ "loss": 1.0207,
516
+ "step": 69
517
+ },
518
+ {
519
+ "epoch": 1.1666666666666667,
520
+ "grad_norm": 0.051513671875,
521
+ "learning_rate": 2.507821142785516e-05,
522
+ "loss": 1.0435,
523
+ "step": 70
524
+ },
525
+ {
526
+ "epoch": 1.1835443037974684,
527
+ "grad_norm": 0.05224609375,
528
+ "learning_rate": 2.4922805751441174e-05,
529
+ "loss": 1.0354,
530
+ "step": 71
531
+ },
532
+ {
533
+ "epoch": 1.20042194092827,
534
+ "grad_norm": 0.054443359375,
535
+ "learning_rate": 2.4765482685444786e-05,
536
+ "loss": 1.0266,
537
+ "step": 72
538
+ },
539
+ {
540
+ "epoch": 1.2172995780590719,
541
+ "grad_norm": 0.05126953125,
542
+ "learning_rate": 2.460627262949443e-05,
543
+ "loss": 1.0411,
544
+ "step": 73
545
+ },
546
+ {
547
+ "epoch": 1.2341772151898733,
548
+ "grad_norm": 0.05126953125,
549
+ "learning_rate": 2.4445206347842714e-05,
550
+ "loss": 1.0224,
551
+ "step": 74
552
+ },
553
+ {
554
+ "epoch": 1.251054852320675,
555
+ "grad_norm": 0.052978515625,
556
+ "learning_rate": 2.428231496342181e-05,
557
+ "loss": 1.0253,
558
+ "step": 75
559
+ },
560
+ {
561
+ "epoch": 1.2679324894514767,
562
+ "grad_norm": 0.05322265625,
563
+ "learning_rate": 2.4117629951829602e-05,
564
+ "loss": 1.0298,
565
+ "step": 76
566
+ },
567
+ {
568
+ "epoch": 1.2848101265822784,
569
+ "grad_norm": 0.052490234375,
570
+ "learning_rate": 2.395118313524758e-05,
571
+ "loss": 1.0239,
572
+ "step": 77
573
+ },
574
+ {
575
+ "epoch": 1.3016877637130801,
576
+ "grad_norm": 0.0537109375,
577
+ "learning_rate": 2.3783006676291866e-05,
578
+ "loss": 1.0212,
579
+ "step": 78
580
+ },
581
+ {
582
+ "epoch": 1.3185654008438819,
583
+ "grad_norm": 0.052734375,
584
+ "learning_rate": 2.361313307179837e-05,
585
+ "loss": 1.0371,
586
+ "step": 79
587
+ },
588
+ {
589
+ "epoch": 1.3354430379746836,
590
+ "grad_norm": 0.05126953125,
591
+ "learning_rate": 2.3441595146543458e-05,
592
+ "loss": 1.0314,
593
+ "step": 80
594
+ },
595
+ {
596
+ "epoch": 1.3523206751054853,
597
+ "grad_norm": 0.052001953125,
598
+ "learning_rate": 2.3268426046901153e-05,
599
+ "loss": 1.0195,
600
+ "step": 81
601
+ },
602
+ {
603
+ "epoch": 1.369198312236287,
604
+ "grad_norm": 0.052978515625,
605
+ "learning_rate": 2.3093659234438266e-05,
606
+ "loss": 1.0219,
607
+ "step": 82
608
+ },
609
+ {
610
+ "epoch": 1.3860759493670887,
611
+ "grad_norm": 0.054931640625,
612
+ "learning_rate": 2.291732847944861e-05,
613
+ "loss": 1.0293,
614
+ "step": 83
615
+ },
616
+ {
617
+ "epoch": 1.4029535864978904,
618
+ "grad_norm": 0.052734375,
619
+ "learning_rate": 2.2739467854427512e-05,
620
+ "loss": 0.9992,
621
+ "step": 84
622
+ },
623
+ {
624
+ "epoch": 1.4198312236286919,
625
+ "grad_norm": 0.05224609375,
626
+ "learning_rate": 2.2560111727488e-05,
627
+ "loss": 1.0254,
628
+ "step": 85
629
+ },
630
+ {
631
+ "epoch": 1.4367088607594938,
632
+ "grad_norm": 0.0546875,
633
+ "learning_rate": 2.237929475571979e-05,
634
+ "loss": 1.0148,
635
+ "step": 86
636
+ },
637
+ {
638
+ "epoch": 1.4535864978902953,
639
+ "grad_norm": 0.055419921875,
640
+ "learning_rate": 2.219705187849254e-05,
641
+ "loss": 1.0228,
642
+ "step": 87
643
+ },
644
+ {
645
+ "epoch": 1.4704641350210972,
646
+ "grad_norm": 0.05810546875,
647
+ "learning_rate": 2.2013418310704422e-05,
648
+ "loss": 1.021,
649
+ "step": 88
650
+ },
651
+ {
652
+ "epoch": 1.4873417721518987,
653
+ "grad_norm": 0.0537109375,
654
+ "learning_rate": 2.1828429535977585e-05,
655
+ "loss": 1.0352,
656
+ "step": 89
657
+ },
658
+ {
659
+ "epoch": 1.5042194092827004,
660
+ "grad_norm": 0.054931640625,
661
+ "learning_rate": 2.1642121299801594e-05,
662
+ "loss": 1.0105,
663
+ "step": 90
664
+ },
665
+ {
666
+ "epoch": 1.5042194092827004,
667
+ "eval_loss": 1.0232045650482178,
668
+ "eval_runtime": 74.0589,
669
+ "eval_samples_per_second": 13.908,
670
+ "eval_steps_per_second": 13.908,
671
+ "step": 90
672
+ },
673
+ {
674
+ "epoch": 1.521097046413502,
675
+ "grad_norm": 0.054931640625,
676
+ "learning_rate": 2.1454529602626336e-05,
677
+ "loss": 1.0051,
678
+ "step": 91
679
+ },
680
+ {
681
+ "epoch": 1.5379746835443038,
682
+ "grad_norm": 0.0546875,
683
+ "learning_rate": 2.126569069290562e-05,
684
+ "loss": 1.023,
685
+ "step": 92
686
+ },
687
+ {
688
+ "epoch": 1.5548523206751055,
689
+ "grad_norm": 0.0546875,
690
+ "learning_rate": 2.107564106009286e-05,
691
+ "loss": 1.012,
692
+ "step": 93
693
+ },
694
+ {
695
+ "epoch": 1.5717299578059072,
696
+ "grad_norm": 0.0546875,
697
+ "learning_rate": 2.0884417427590217e-05,
698
+ "loss": 1.0136,
699
+ "step": 94
700
+ },
701
+ {
702
+ "epoch": 1.5886075949367089,
703
+ "grad_norm": 0.0546875,
704
+ "learning_rate": 2.0692056745652483e-05,
705
+ "loss": 1.0194,
706
+ "step": 95
707
+ },
708
+ {
709
+ "epoch": 1.6054852320675106,
710
+ "grad_norm": 0.0556640625,
711
+ "learning_rate": 2.0498596184247196e-05,
712
+ "loss": 1.0089,
713
+ "step": 96
714
+ },
715
+ {
716
+ "epoch": 1.6223628691983123,
717
+ "grad_norm": 0.054931640625,
718
+ "learning_rate": 2.030407312587224e-05,
719
+ "loss": 1.0226,
720
+ "step": 97
721
+ },
722
+ {
723
+ "epoch": 1.6392405063291138,
724
+ "grad_norm": 0.058349609375,
725
+ "learning_rate": 2.010852515833242e-05,
726
+ "loss": 1.0219,
727
+ "step": 98
728
+ },
729
+ {
730
+ "epoch": 1.6561181434599157,
731
+ "grad_norm": 0.0556640625,
732
+ "learning_rate": 1.9911990067476336e-05,
733
+ "loss": 1.0035,
734
+ "step": 99
735
+ },
736
+ {
737
+ "epoch": 1.6729957805907172,
738
+ "grad_norm": 0.055908203125,
739
+ "learning_rate": 1.9714505829895004e-05,
740
+ "loss": 1.0052,
741
+ "step": 100
742
+ },
743
+ {
744
+ "epoch": 1.689873417721519,
745
+ "grad_norm": 0.05712890625,
746
+ "learning_rate": 1.951611060558363e-05,
747
+ "loss": 1.0175,
748
+ "step": 101
749
+ },
750
+ {
751
+ "epoch": 1.7067510548523206,
752
+ "grad_norm": 0.056396484375,
753
+ "learning_rate": 1.9316842730567902e-05,
754
+ "loss": 1.0099,
755
+ "step": 102
756
+ },
757
+ {
758
+ "epoch": 1.7236286919831225,
759
+ "grad_norm": 0.05615234375,
760
+ "learning_rate": 1.9116740709496334e-05,
761
+ "loss": 0.998,
762
+ "step": 103
763
+ },
764
+ {
765
+ "epoch": 1.740506329113924,
766
+ "grad_norm": 0.05615234375,
767
+ "learning_rate": 1.8915843208199967e-05,
768
+ "loss": 0.996,
769
+ "step": 104
770
+ },
771
+ {
772
+ "epoch": 1.7573839662447257,
773
+ "grad_norm": 0.058837890625,
774
+ "learning_rate": 1.8714189046220946e-05,
775
+ "loss": 1.009,
776
+ "step": 105
777
+ },
778
+ {
779
+ "epoch": 1.7742616033755274,
780
+ "grad_norm": 0.059326171875,
781
+ "learning_rate": 1.851181718931141e-05,
782
+ "loss": 1.0127,
783
+ "step": 106
784
+ },
785
+ {
786
+ "epoch": 1.7911392405063291,
787
+ "grad_norm": 0.0556640625,
788
+ "learning_rate": 1.830876674190411e-05,
789
+ "loss": 1.0107,
790
+ "step": 107
791
+ },
792
+ {
793
+ "epoch": 1.8080168776371308,
794
+ "grad_norm": 0.06201171875,
795
+ "learning_rate": 1.8105076939556238e-05,
796
+ "loss": 1.0264,
797
+ "step": 108
798
+ },
799
+ {
800
+ "epoch": 1.8248945147679325,
801
+ "grad_norm": 0.056884765625,
802
+ "learning_rate": 1.790078714136792e-05,
803
+ "loss": 1.0068,
804
+ "step": 109
805
+ },
806
+ {
807
+ "epoch": 1.8417721518987342,
808
+ "grad_norm": 0.056640625,
809
+ "learning_rate": 1.769593682237682e-05,
810
+ "loss": 1.0094,
811
+ "step": 110
812
+ },
813
+ {
814
+ "epoch": 1.8586497890295357,
815
+ "grad_norm": 0.057861328125,
816
+ "learning_rate": 1.7490565565930382e-05,
817
+ "loss": 1.0135,
818
+ "step": 111
819
+ },
820
+ {
821
+ "epoch": 1.8755274261603376,
822
+ "grad_norm": 0.05859375,
823
+ "learning_rate": 1.7284713056037074e-05,
824
+ "loss": 0.993,
825
+ "step": 112
826
+ },
827
+ {
828
+ "epoch": 1.8924050632911391,
829
+ "grad_norm": 0.056884765625,
830
+ "learning_rate": 1.7078419069698283e-05,
831
+ "loss": 1.015,
832
+ "step": 113
833
+ },
834
+ {
835
+ "epoch": 1.909282700421941,
836
+ "grad_norm": 0.055908203125,
837
+ "learning_rate": 1.687172346922213e-05,
838
+ "loss": 1.0043,
839
+ "step": 114
840
+ },
841
+ {
842
+ "epoch": 1.9261603375527425,
843
+ "grad_norm": 0.05859375,
844
+ "learning_rate": 1.6664666194520873e-05,
845
+ "loss": 0.9959,
846
+ "step": 115
847
+ },
848
+ {
849
+ "epoch": 1.9430379746835444,
850
+ "grad_norm": 0.0625,
851
+ "learning_rate": 1.645728725539329e-05,
852
+ "loss": 1.0177,
853
+ "step": 116
854
+ },
855
+ {
856
+ "epoch": 1.959915611814346,
857
+ "grad_norm": 0.05810546875,
858
+ "learning_rate": 1.6249626723793572e-05,
859
+ "loss": 1.033,
860
+ "step": 117
861
+ },
862
+ {
863
+ "epoch": 1.9767932489451476,
864
+ "grad_norm": 0.059326171875,
865
+ "learning_rate": 1.6041724726088187e-05,
866
+ "loss": 1.0155,
867
+ "step": 118
868
+ },
869
+ {
870
+ "epoch": 1.9936708860759493,
871
+ "grad_norm": 0.060791015625,
872
+ "learning_rate": 1.5833621435302247e-05,
873
+ "loss": 1.0167,
874
+ "step": 119
875
+ },
876
+ {
877
+ "epoch": 2.010548523206751,
878
+ "grad_norm": 0.0595703125,
879
+ "learning_rate": 1.5625357063356825e-05,
880
+ "loss": 1.0082,
881
+ "step": 120
882
+ },
883
+ {
884
+ "epoch": 2.010548523206751,
885
+ "eval_loss": 1.0127062797546387,
886
+ "eval_runtime": 74.0711,
887
+ "eval_samples_per_second": 13.906,
888
+ "eval_steps_per_second": 13.906,
889
+ "step": 120
890
+ },
891
+ {
892
+ "epoch": 2.0147679324894514,
893
+ "grad_norm": 0.1865234375,
894
+ "learning_rate": 1.541697185329881e-05,
895
+ "loss": 1.6592,
896
+ "step": 121
897
+ },
898
+ {
899
+ "epoch": 2.0316455696202533,
900
+ "grad_norm": 0.0595703125,
901
+ "learning_rate": 1.5208506071524727e-05,
902
+ "loss": 1.0041,
903
+ "step": 122
904
+ },
905
+ {
906
+ "epoch": 2.048523206751055,
907
+ "grad_norm": 0.059326171875,
908
+ "learning_rate": 1.5e-05,
909
+ "loss": 1.0116,
910
+ "step": 123
911
+ },
912
+ {
913
+ "epoch": 2.0654008438818567,
914
+ "grad_norm": 0.05859375,
915
+ "learning_rate": 1.4791493928475275e-05,
916
+ "loss": 1.0026,
917
+ "step": 124
918
+ },
919
+ {
920
+ "epoch": 2.0822784810126582,
921
+ "grad_norm": 0.05810546875,
922
+ "learning_rate": 1.4583028146701191e-05,
923
+ "loss": 1.0122,
924
+ "step": 125
925
+ },
926
+ {
927
+ "epoch": 2.0991561181434597,
928
+ "grad_norm": 0.06005859375,
929
+ "learning_rate": 1.437464293664318e-05,
930
+ "loss": 1.0024,
931
+ "step": 126
932
+ },
933
+ {
934
+ "epoch": 2.1160337552742616,
935
+ "grad_norm": 0.060546875,
936
+ "learning_rate": 1.4166378564697757e-05,
937
+ "loss": 1.0092,
938
+ "step": 127
939
+ },
940
+ {
941
+ "epoch": 2.132911392405063,
942
+ "grad_norm": 0.05908203125,
943
+ "learning_rate": 1.3958275273911812e-05,
944
+ "loss": 1.0048,
945
+ "step": 128
946
+ },
947
+ {
948
+ "epoch": 2.149789029535865,
949
+ "grad_norm": 0.058837890625,
950
+ "learning_rate": 1.375037327620643e-05,
951
+ "loss": 0.9921,
952
+ "step": 129
953
+ },
954
+ {
955
+ "epoch": 2.1666666666666665,
956
+ "grad_norm": 0.06005859375,
957
+ "learning_rate": 1.3542712744606712e-05,
958
+ "loss": 1.0117,
959
+ "step": 130
960
+ },
961
+ {
962
+ "epoch": 2.1835443037974684,
963
+ "grad_norm": 0.05859375,
964
+ "learning_rate": 1.3335333805479126e-05,
965
+ "loss": 0.9965,
966
+ "step": 131
967
+ },
968
+ {
969
+ "epoch": 2.20042194092827,
970
+ "grad_norm": 0.059814453125,
971
+ "learning_rate": 1.3128276530777874e-05,
972
+ "loss": 1.0108,
973
+ "step": 132
974
+ },
975
+ {
976
+ "epoch": 2.217299578059072,
977
+ "grad_norm": 0.060791015625,
978
+ "learning_rate": 1.292158093030172e-05,
979
+ "loss": 1.0029,
980
+ "step": 133
981
+ },
982
+ {
983
+ "epoch": 2.2341772151898733,
984
+ "grad_norm": 0.058349609375,
985
+ "learning_rate": 1.2715286943962925e-05,
986
+ "loss": 0.9958,
987
+ "step": 134
988
+ },
989
+ {
990
+ "epoch": 2.2510548523206753,
991
+ "grad_norm": 0.05908203125,
992
+ "learning_rate": 1.2509434434069625e-05,
993
+ "loss": 1.0054,
994
+ "step": 135
995
+ },
996
+ {
997
+ "epoch": 2.2679324894514767,
998
+ "grad_norm": 0.0576171875,
999
+ "learning_rate": 1.2304063177623182e-05,
1000
+ "loss": 0.9837,
1001
+ "step": 136
1002
+ },
1003
+ {
1004
+ "epoch": 2.2848101265822787,
1005
+ "grad_norm": 0.060302734375,
1006
+ "learning_rate": 1.2099212858632083e-05,
1007
+ "loss": 1.0014,
1008
+ "step": 137
1009
+ },
1010
+ {
1011
+ "epoch": 2.30168776371308,
1012
+ "grad_norm": 0.0595703125,
1013
+ "learning_rate": 1.1894923060443763e-05,
1014
+ "loss": 0.9816,
1015
+ "step": 138
1016
+ },
1017
+ {
1018
+ "epoch": 2.318565400843882,
1019
+ "grad_norm": 0.0595703125,
1020
+ "learning_rate": 1.169123325809589e-05,
1021
+ "loss": 1.0021,
1022
+ "step": 139
1023
+ },
1024
+ {
1025
+ "epoch": 2.3354430379746836,
1026
+ "grad_norm": 0.05859375,
1027
+ "learning_rate": 1.1488182810688593e-05,
1028
+ "loss": 1.0005,
1029
+ "step": 140
1030
+ },
1031
+ {
1032
+ "epoch": 2.352320675105485,
1033
+ "grad_norm": 0.06103515625,
1034
+ "learning_rate": 1.1285810953779057e-05,
1035
+ "loss": 1.0167,
1036
+ "step": 141
1037
+ },
1038
+ {
1039
+ "epoch": 2.369198312236287,
1040
+ "grad_norm": 0.059814453125,
1041
+ "learning_rate": 1.1084156791800036e-05,
1042
+ "loss": 1.0146,
1043
+ "step": 142
1044
+ },
1045
+ {
1046
+ "epoch": 2.3860759493670884,
1047
+ "grad_norm": 0.05859375,
1048
+ "learning_rate": 1.0883259290503665e-05,
1049
+ "loss": 1.0024,
1050
+ "step": 143
1051
+ },
1052
+ {
1053
+ "epoch": 2.4029535864978904,
1054
+ "grad_norm": 0.0576171875,
1055
+ "learning_rate": 1.0683157269432097e-05,
1056
+ "loss": 0.9959,
1057
+ "step": 144
1058
+ },
1059
+ {
1060
+ "epoch": 2.419831223628692,
1061
+ "grad_norm": 0.060546875,
1062
+ "learning_rate": 1.0483889394416373e-05,
1063
+ "loss": 1.0059,
1064
+ "step": 145
1065
+ },
1066
+ {
1067
+ "epoch": 2.4367088607594938,
1068
+ "grad_norm": 0.06396484375,
1069
+ "learning_rate": 1.0285494170104996e-05,
1070
+ "loss": 0.983,
1071
+ "step": 146
1072
+ },
1073
+ {
1074
+ "epoch": 2.4535864978902953,
1075
+ "grad_norm": 0.06005859375,
1076
+ "learning_rate": 1.0088009932523664e-05,
1077
+ "loss": 1.0116,
1078
+ "step": 147
1079
+ },
1080
+ {
1081
+ "epoch": 2.470464135021097,
1082
+ "grad_norm": 0.061279296875,
1083
+ "learning_rate": 9.891474841667585e-06,
1084
+ "loss": 1.0136,
1085
+ "step": 148
1086
+ },
1087
+ {
1088
+ "epoch": 2.4873417721518987,
1089
+ "grad_norm": 0.057861328125,
1090
+ "learning_rate": 9.695926874127765e-06,
1091
+ "loss": 0.9937,
1092
+ "step": 149
1093
+ },
1094
+ {
1095
+ "epoch": 2.5042194092827,
1096
+ "grad_norm": 0.0595703125,
1097
+ "learning_rate": 9.501403815752813e-06,
1098
+ "loss": 0.9946,
1099
+ "step": 150
1100
+ },
1101
+ {
1102
+ "epoch": 2.5042194092827,
1103
+ "eval_loss": 1.0074498653411865,
1104
+ "eval_runtime": 74.0482,
1105
+ "eval_samples_per_second": 13.91,
1106
+ "eval_steps_per_second": 13.91,
1107
+ "step": 150
1108
+ },
1109
+ {
1110
+ "epoch": 2.521097046413502,
1111
+ "grad_norm": 0.060302734375,
1112
+ "learning_rate": 9.307943254347521e-06,
1113
+ "loss": 1.0043,
1114
+ "step": 151
1115
+ },
1116
+ {
1117
+ "epoch": 2.537974683544304,
1118
+ "grad_norm": 0.06201171875,
1119
+ "learning_rate": 9.115582572409789e-06,
1120
+ "loss": 1.0125,
1121
+ "step": 152
1122
+ },
1123
+ {
1124
+ "epoch": 2.5548523206751055,
1125
+ "grad_norm": 0.05859375,
1126
+ "learning_rate": 8.92435893990714e-06,
1127
+ "loss": 0.9753,
1128
+ "step": 153
1129
+ },
1130
+ {
1131
+ "epoch": 2.571729957805907,
1132
+ "grad_norm": 0.060546875,
1133
+ "learning_rate": 8.734309307094382e-06,
1134
+ "loss": 0.9959,
1135
+ "step": 154
1136
+ },
1137
+ {
1138
+ "epoch": 2.588607594936709,
1139
+ "grad_norm": 0.061279296875,
1140
+ "learning_rate": 8.545470397373665e-06,
1141
+ "loss": 1.0158,
1142
+ "step": 155
1143
+ },
1144
+ {
1145
+ "epoch": 2.605485232067511,
1146
+ "grad_norm": 0.062255859375,
1147
+ "learning_rate": 8.357878700198407e-06,
1148
+ "loss": 1.0001,
1149
+ "step": 156
1150
+ },
1151
+ {
1152
+ "epoch": 2.6223628691983123,
1153
+ "grad_norm": 0.060546875,
1154
+ "learning_rate": 8.171570464022419e-06,
1155
+ "loss": 0.9963,
1156
+ "step": 157
1157
+ },
1158
+ {
1159
+ "epoch": 2.6392405063291138,
1160
+ "grad_norm": 0.0595703125,
1161
+ "learning_rate": 7.986581689295577e-06,
1162
+ "loss": 0.968,
1163
+ "step": 158
1164
+ },
1165
+ {
1166
+ "epoch": 2.6561181434599157,
1167
+ "grad_norm": 0.059326171875,
1168
+ "learning_rate": 7.802948121507462e-06,
1169
+ "loss": 0.9872,
1170
+ "step": 159
1171
+ },
1172
+ {
1173
+ "epoch": 2.672995780590717,
1174
+ "grad_norm": 0.0625,
1175
+ "learning_rate": 7.620705244280208e-06,
1176
+ "loss": 1.0181,
1177
+ "step": 160
1178
+ },
1179
+ {
1180
+ "epoch": 2.689873417721519,
1181
+ "grad_norm": 0.06298828125,
1182
+ "learning_rate": 7.439888272512004e-06,
1183
+ "loss": 1.0057,
1184
+ "step": 161
1185
+ },
1186
+ {
1187
+ "epoch": 2.7067510548523206,
1188
+ "grad_norm": 0.059814453125,
1189
+ "learning_rate": 7.260532145572487e-06,
1190
+ "loss": 0.9985,
1191
+ "step": 162
1192
+ },
1193
+ {
1194
+ "epoch": 2.7236286919831225,
1195
+ "grad_norm": 0.059814453125,
1196
+ "learning_rate": 7.082671520551391e-06,
1197
+ "loss": 0.9973,
1198
+ "step": 163
1199
+ },
1200
+ {
1201
+ "epoch": 2.740506329113924,
1202
+ "grad_norm": 0.06103515625,
1203
+ "learning_rate": 6.906340765561734e-06,
1204
+ "loss": 0.991,
1205
+ "step": 164
1206
+ },
1207
+ {
1208
+ "epoch": 2.757383966244726,
1209
+ "grad_norm": 0.060546875,
1210
+ "learning_rate": 6.731573953098851e-06,
1211
+ "loss": 0.9979,
1212
+ "step": 165
1213
+ },
1214
+ {
1215
+ "epoch": 2.7742616033755274,
1216
+ "grad_norm": 0.060546875,
1217
+ "learning_rate": 6.558404853456545e-06,
1218
+ "loss": 1.0096,
1219
+ "step": 166
1220
+ },
1221
+ {
1222
+ "epoch": 2.791139240506329,
1223
+ "grad_norm": 0.060302734375,
1224
+ "learning_rate": 6.38686692820163e-06,
1225
+ "loss": 0.9981,
1226
+ "step": 167
1227
+ },
1228
+ {
1229
+ "epoch": 2.808016877637131,
1230
+ "grad_norm": 0.0595703125,
1231
+ "learning_rate": 6.2169933237081386e-06,
1232
+ "loss": 1.0169,
1233
+ "step": 168
1234
+ },
1235
+ {
1236
+ "epoch": 2.8248945147679327,
1237
+ "grad_norm": 0.0615234375,
1238
+ "learning_rate": 6.048816864752422e-06,
1239
+ "loss": 0.9934,
1240
+ "step": 169
1241
+ },
1242
+ {
1243
+ "epoch": 2.8417721518987342,
1244
+ "grad_norm": 0.05859375,
1245
+ "learning_rate": 5.882370048170403e-06,
1246
+ "loss": 0.9913,
1247
+ "step": 170
1248
+ },
1249
+ {
1250
+ "epoch": 2.8586497890295357,
1251
+ "grad_norm": 0.06005859375,
1252
+ "learning_rate": 5.71768503657819e-06,
1253
+ "loss": 1.0107,
1254
+ "step": 171
1255
+ },
1256
+ {
1257
+ "epoch": 2.8755274261603376,
1258
+ "grad_norm": 0.060791015625,
1259
+ "learning_rate": 5.55479365215729e-06,
1260
+ "loss": 1.0098,
1261
+ "step": 172
1262
+ },
1263
+ {
1264
+ "epoch": 2.892405063291139,
1265
+ "grad_norm": 0.0595703125,
1266
+ "learning_rate": 5.393727370505569e-06,
1267
+ "loss": 1.0123,
1268
+ "step": 173
1269
+ },
1270
+ {
1271
+ "epoch": 2.909282700421941,
1272
+ "grad_norm": 0.060546875,
1273
+ "learning_rate": 5.234517314555213e-06,
1274
+ "loss": 0.9837,
1275
+ "step": 174
1276
+ },
1277
+ {
1278
+ "epoch": 2.9261603375527425,
1279
+ "grad_norm": 0.060791015625,
1280
+ "learning_rate": 5.077194248558827e-06,
1281
+ "loss": 0.9865,
1282
+ "step": 175
1283
+ },
1284
+ {
1285
+ "epoch": 2.9430379746835444,
1286
+ "grad_norm": 0.058349609375,
1287
+ "learning_rate": 4.921788572144841e-06,
1288
+ "loss": 0.995,
1289
+ "step": 176
1290
+ },
1291
+ {
1292
+ "epoch": 2.959915611814346,
1293
+ "grad_norm": 0.06201171875,
1294
+ "learning_rate": 4.768330314443367e-06,
1295
+ "loss": 1.0009,
1296
+ "step": 177
1297
+ },
1298
+ {
1299
+ "epoch": 2.976793248945148,
1300
+ "grad_norm": 0.060546875,
1301
+ "learning_rate": 4.616849128283658e-06,
1302
+ "loss": 1.0016,
1303
+ "step": 178
1304
+ },
1305
+ {
1306
+ "epoch": 2.9936708860759493,
1307
+ "grad_norm": 0.0625,
1308
+ "learning_rate": 4.4673742844642716e-06,
1309
+ "loss": 0.9979,
1310
+ "step": 179
1311
+ },
1312
+ {
1313
+ "epoch": 3.010548523206751,
1314
+ "grad_norm": 0.062255859375,
1315
+ "learning_rate": 4.319934666097055e-06,
1316
+ "loss": 0.9826,
1317
+ "step": 180
1318
+ },
1319
+ {
1320
+ "epoch": 3.010548523206751,
1321
+ "eval_loss": 1.0056920051574707,
1322
+ "eval_runtime": 74.0404,
1323
+ "eval_samples_per_second": 13.911,
1324
+ "eval_steps_per_second": 13.911,
1325
+ "step": 180
1326
+ }
1327
+ ],
1328
+ "logging_steps": 1,
1329
+ "max_steps": 236,
1330
+ "num_input_tokens_seen": 0,
1331
+ "num_train_epochs": 4,
1332
+ "save_steps": 30,
1333
+ "stateful_callbacks": {
1334
+ "TrainerControl": {
1335
+ "args": {
1336
+ "should_epoch_stop": false,
1337
+ "should_evaluate": false,
1338
+ "should_log": false,
1339
+ "should_save": true,
1340
+ "should_training_stop": false
1341
+ },
1342
+ "attributes": {}
1343
+ }
1344
+ },
1345
+ "total_flos": 5.0616521503801344e+17,
1346
+ "train_batch_size": 1,
1347
+ "trial_name": null,
1348
+ "trial_params": null
1349
+ }
checkpoint-180/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:47fe3d8f86dd99270fe973ee57cf8bc56524ac1c04eb16ea3572e34a069a173f
3
+ size 6072
checkpoint-180/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-210/README.md ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: /workspace/data/models/Qwen2-7B
3
+ library_name: peft
4
+ ---
5
+
6
+ # Model Card for Model ID
7
+
8
+ <!-- Provide a quick summary of what the model is/does. -->
9
+
10
+
11
+
12
+ ## Model Details
13
+
14
+ ### Model Description
15
+
16
+ <!-- Provide a longer summary of what this model is. -->
17
+
18
+
19
+
20
+ - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
+ - **Shared by [optional]:** [More Information Needed]
23
+ - **Model type:** [More Information Needed]
24
+ - **Language(s) (NLP):** [More Information Needed]
25
+ - **License:** [More Information Needed]
26
+ - **Finetuned from model [optional]:** [More Information Needed]
27
+
28
+ ### Model Sources [optional]
29
+
30
+ <!-- Provide the basic links for the model. -->
31
+
32
+ - **Repository:** [More Information Needed]
33
+ - **Paper [optional]:** [More Information Needed]
34
+ - **Demo [optional]:** [More Information Needed]
35
+
36
+ ## Uses
37
+
38
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
+
40
+ ### Direct Use
41
+
42
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
+
44
+ [More Information Needed]
45
+
46
+ ### Downstream Use [optional]
47
+
48
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
+
50
+ [More Information Needed]
51
+
52
+ ### Out-of-Scope Use
53
+
54
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
+
56
+ [More Information Needed]
57
+
58
+ ## Bias, Risks, and Limitations
59
+
60
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
+
62
+ [More Information Needed]
63
+
64
+ ### Recommendations
65
+
66
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
+
68
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
+
70
+ ## How to Get Started with the Model
71
+
72
+ Use the code below to get started with the model.
73
+
74
+ [More Information Needed]
75
+
76
+ ## Training Details
77
+
78
+ ### Training Data
79
+
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
+
82
+ [More Information Needed]
83
+
84
+ ### Training Procedure
85
+
86
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
+
88
+ #### Preprocessing [optional]
89
+
90
+ [More Information Needed]
91
+
92
+
93
+ #### Training Hyperparameters
94
+
95
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
+
97
+ #### Speeds, Sizes, Times [optional]
98
+
99
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
+
101
+ [More Information Needed]
102
+
103
+ ## Evaluation
104
+
105
+ <!-- This section describes the evaluation protocols and provides the results. -->
106
+
107
+ ### Testing Data, Factors & Metrics
108
+
109
+ #### Testing Data
110
+
111
+ <!-- This should link to a Dataset Card if possible. -->
112
+
113
+ [More Information Needed]
114
+
115
+ #### Factors
116
+
117
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
+
119
+ [More Information Needed]
120
+
121
+ #### Metrics
122
+
123
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
+
125
+ [More Information Needed]
126
+
127
+ ### Results
128
+
129
+ [More Information Needed]
130
+
131
+ #### Summary
132
+
133
+
134
+
135
+ ## Model Examination [optional]
136
+
137
+ <!-- Relevant interpretability work for the model goes here -->
138
+
139
+ [More Information Needed]
140
+
141
+ ## Environmental Impact
142
+
143
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
+
145
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
+
147
+ - **Hardware Type:** [More Information Needed]
148
+ - **Hours used:** [More Information Needed]
149
+ - **Cloud Provider:** [More Information Needed]
150
+ - **Compute Region:** [More Information Needed]
151
+ - **Carbon Emitted:** [More Information Needed]
152
+
153
+ ## Technical Specifications [optional]
154
+
155
+ ### Model Architecture and Objective
156
+
157
+ [More Information Needed]
158
+
159
+ ### Compute Infrastructure
160
+
161
+ [More Information Needed]
162
+
163
+ #### Hardware
164
+
165
+ [More Information Needed]
166
+
167
+ #### Software
168
+
169
+ [More Information Needed]
170
+
171
+ ## Citation [optional]
172
+
173
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
+
175
+ **BibTeX:**
176
+
177
+ [More Information Needed]
178
+
179
+ **APA:**
180
+
181
+ [More Information Needed]
182
+
183
+ ## Glossary [optional]
184
+
185
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
+
187
+ [More Information Needed]
188
+
189
+ ## More Information [optional]
190
+
191
+ [More Information Needed]
192
+
193
+ ## Model Card Authors [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Contact
198
+
199
+ [More Information Needed]
200
+ ### Framework versions
201
+
202
+ - PEFT 0.11.1
checkpoint-210/adapter_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "/workspace/data/models/Qwen2-7B",
5
+ "bias": "none",
6
+ "fan_in_fan_out": null,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 16,
14
+ "lora_dropout": 0.05,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": null,
18
+ "peft_type": "LORA",
19
+ "r": 32,
20
+ "rank_pattern": {},
21
+ "revision": null,
22
+ "target_modules": [
23
+ "k_proj",
24
+ "up_proj",
25
+ "down_proj",
26
+ "q_proj",
27
+ "gate_proj",
28
+ "v_proj",
29
+ "o_proj"
30
+ ],
31
+ "task_type": "CAUSAL_LM",
32
+ "use_dora": false,
33
+ "use_rslora": false
34
+ }
checkpoint-210/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:211e075290261b180bf07444a698f876b0bf0d4f25a0f5d9a63c1a0817dd684a
3
+ size 161533584
checkpoint-210/added_tokens.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "<|endoftext|>": 151643,
3
+ "<|im_end|>": 151645,
4
+ "<|im_start|>": 151644
5
+ }
checkpoint-210/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-210/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:98e88a09d37b5d510640296d54ddd41c6450aa4ba2b6e23c8afe6658afd53b8e
3
+ size 323292010
checkpoint-210/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3acd095418f1f164bc481993af24e659f064b41527543ccbde827f79f691f6a2
3
+ size 14244
checkpoint-210/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3572e42c81044982b589162cb8d8c0f32fd7261e0a56bbd08c208bc4f8dd4f36
3
+ size 1064
checkpoint-210/special_tokens_map.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>"
5
+ ],
6
+ "eos_token": {
7
+ "content": "<|im_end|>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false
12
+ },
13
+ "pad_token": {
14
+ "content": "<|endoftext|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false
19
+ }
20
+ }
checkpoint-210/tokenizer_config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "151643": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "151644": {
13
+ "content": "<|im_start|>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "151645": {
21
+ "content": "<|im_end|>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ }
28
+ },
29
+ "additional_special_tokens": [
30
+ "<|im_start|>",
31
+ "<|im_end|>"
32
+ ],
33
+ "bos_token": null,
34
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
35
+ "clean_up_tokenization_spaces": false,
36
+ "eos_token": "<|im_end|>",
37
+ "errors": "replace",
38
+ "model_max_length": 32768,
39
+ "pad_token": "<|endoftext|>",
40
+ "split_special_tokens": false,
41
+ "tokenizer_class": "Qwen2Tokenizer",
42
+ "unk_token": null,
43
+ "use_fast": true
44
+ }
checkpoint-210/trainer_state.json ADDED
@@ -0,0 +1,1567 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 3.50210970464135,
5
+ "eval_steps": 30,
6
+ "global_step": 210,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.016877637130801686,
13
+ "grad_norm": 0.08447265625,
14
+ "learning_rate": 3e-06,
15
+ "loss": 1.1751,
16
+ "step": 1
17
+ },
18
+ {
19
+ "epoch": 0.016877637130801686,
20
+ "eval_loss": 1.185997486114502,
21
+ "eval_runtime": 72.8223,
22
+ "eval_samples_per_second": 14.144,
23
+ "eval_steps_per_second": 14.144,
24
+ "step": 1
25
+ },
26
+ {
27
+ "epoch": 0.03375527426160337,
28
+ "grad_norm": 0.08544921875,
29
+ "learning_rate": 6e-06,
30
+ "loss": 1.1683,
31
+ "step": 2
32
+ },
33
+ {
34
+ "epoch": 0.05063291139240506,
35
+ "grad_norm": 0.0830078125,
36
+ "learning_rate": 9e-06,
37
+ "loss": 1.1737,
38
+ "step": 3
39
+ },
40
+ {
41
+ "epoch": 0.06751054852320675,
42
+ "grad_norm": 0.08544921875,
43
+ "learning_rate": 1.2e-05,
44
+ "loss": 1.1889,
45
+ "step": 4
46
+ },
47
+ {
48
+ "epoch": 0.08438818565400844,
49
+ "grad_norm": 0.0849609375,
50
+ "learning_rate": 1.5e-05,
51
+ "loss": 1.1619,
52
+ "step": 5
53
+ },
54
+ {
55
+ "epoch": 0.10126582278481013,
56
+ "grad_norm": 0.0869140625,
57
+ "learning_rate": 1.8e-05,
58
+ "loss": 1.1815,
59
+ "step": 6
60
+ },
61
+ {
62
+ "epoch": 0.11814345991561181,
63
+ "grad_norm": 0.08447265625,
64
+ "learning_rate": 2.1e-05,
65
+ "loss": 1.1726,
66
+ "step": 7
67
+ },
68
+ {
69
+ "epoch": 0.1350210970464135,
70
+ "grad_norm": 0.08740234375,
71
+ "learning_rate": 2.4e-05,
72
+ "loss": 1.1701,
73
+ "step": 8
74
+ },
75
+ {
76
+ "epoch": 0.1518987341772152,
77
+ "grad_norm": 0.08642578125,
78
+ "learning_rate": 2.7000000000000002e-05,
79
+ "loss": 1.1818,
80
+ "step": 9
81
+ },
82
+ {
83
+ "epoch": 0.16877637130801687,
84
+ "grad_norm": 0.08935546875,
85
+ "learning_rate": 3e-05,
86
+ "loss": 1.1896,
87
+ "step": 10
88
+ },
89
+ {
90
+ "epoch": 0.18565400843881857,
91
+ "grad_norm": 0.0888671875,
92
+ "learning_rate": 2.999855077059572e-05,
93
+ "loss": 1.1873,
94
+ "step": 11
95
+ },
96
+ {
97
+ "epoch": 0.20253164556962025,
98
+ "grad_norm": 0.08984375,
99
+ "learning_rate": 2.9994203362418313e-05,
100
+ "loss": 1.1838,
101
+ "step": 12
102
+ },
103
+ {
104
+ "epoch": 0.21940928270042195,
105
+ "grad_norm": 0.0859375,
106
+ "learning_rate": 2.998695861552002e-05,
107
+ "loss": 1.1569,
108
+ "step": 13
109
+ },
110
+ {
111
+ "epoch": 0.23628691983122363,
112
+ "grad_norm": 0.08642578125,
113
+ "learning_rate": 2.9976817929807542e-05,
114
+ "loss": 1.1595,
115
+ "step": 14
116
+ },
117
+ {
118
+ "epoch": 0.25316455696202533,
119
+ "grad_norm": 0.078125,
120
+ "learning_rate": 2.996378326477153e-05,
121
+ "loss": 1.1348,
122
+ "step": 15
123
+ },
124
+ {
125
+ "epoch": 0.270042194092827,
126
+ "grad_norm": 0.07470703125,
127
+ "learning_rate": 2.9947857139107964e-05,
128
+ "loss": 1.1434,
129
+ "step": 16
130
+ },
131
+ {
132
+ "epoch": 0.2869198312236287,
133
+ "grad_norm": 0.140625,
134
+ "learning_rate": 2.992904263023146e-05,
135
+ "loss": 1.8213,
136
+ "step": 17
137
+ },
138
+ {
139
+ "epoch": 0.3037974683544304,
140
+ "grad_norm": 0.06787109375,
141
+ "learning_rate": 2.990734337368062e-05,
142
+ "loss": 1.1289,
143
+ "step": 18
144
+ },
145
+ {
146
+ "epoch": 0.3206751054852321,
147
+ "grad_norm": 0.0654296875,
148
+ "learning_rate": 2.9882763562415518e-05,
149
+ "loss": 1.1106,
150
+ "step": 19
151
+ },
152
+ {
153
+ "epoch": 0.33755274261603374,
154
+ "grad_norm": 0.06640625,
155
+ "learning_rate": 2.9855307946007532e-05,
156
+ "loss": 1.1381,
157
+ "step": 20
158
+ },
159
+ {
160
+ "epoch": 0.35443037974683544,
161
+ "grad_norm": 0.064453125,
162
+ "learning_rate": 2.982498182972154e-05,
163
+ "loss": 1.1192,
164
+ "step": 21
165
+ },
166
+ {
167
+ "epoch": 0.37130801687763715,
168
+ "grad_norm": 0.0615234375,
169
+ "learning_rate": 2.9791791073490795e-05,
170
+ "loss": 1.1105,
171
+ "step": 22
172
+ },
173
+ {
174
+ "epoch": 0.3881856540084388,
175
+ "grad_norm": 0.061767578125,
176
+ "learning_rate": 2.9755742090784617e-05,
177
+ "loss": 1.1207,
178
+ "step": 23
179
+ },
180
+ {
181
+ "epoch": 0.4050632911392405,
182
+ "grad_norm": 0.0634765625,
183
+ "learning_rate": 2.9716841847369106e-05,
184
+ "loss": 1.1083,
185
+ "step": 24
186
+ },
187
+ {
188
+ "epoch": 0.4219409282700422,
189
+ "grad_norm": 0.058349609375,
190
+ "learning_rate": 2.967509785996114e-05,
191
+ "loss": 1.1007,
192
+ "step": 25
193
+ },
194
+ {
195
+ "epoch": 0.4388185654008439,
196
+ "grad_norm": 0.0615234375,
197
+ "learning_rate": 2.963051819477592e-05,
198
+ "loss": 1.0842,
199
+ "step": 26
200
+ },
201
+ {
202
+ "epoch": 0.45569620253164556,
203
+ "grad_norm": 0.0625,
204
+ "learning_rate": 2.958311146596833e-05,
205
+ "loss": 1.0961,
206
+ "step": 27
207
+ },
208
+ {
209
+ "epoch": 0.47257383966244726,
210
+ "grad_norm": 0.05908203125,
211
+ "learning_rate": 2.953288683396841e-05,
212
+ "loss": 1.109,
213
+ "step": 28
214
+ },
215
+ {
216
+ "epoch": 0.48945147679324896,
217
+ "grad_norm": 0.061767578125,
218
+ "learning_rate": 2.9479854003711298e-05,
219
+ "loss": 1.0789,
220
+ "step": 29
221
+ },
222
+ {
223
+ "epoch": 0.5063291139240507,
224
+ "grad_norm": 0.06103515625,
225
+ "learning_rate": 2.9424023222761938e-05,
226
+ "loss": 1.1007,
227
+ "step": 30
228
+ },
229
+ {
230
+ "epoch": 0.5063291139240507,
231
+ "eval_loss": 1.091182827949524,
232
+ "eval_runtime": 74.0135,
233
+ "eval_samples_per_second": 13.916,
234
+ "eval_steps_per_second": 13.916,
235
+ "step": 30
236
+ },
237
+ {
238
+ "epoch": 0.5232067510548524,
239
+ "grad_norm": 0.059814453125,
240
+ "learning_rate": 2.9365405279334904e-05,
241
+ "loss": 1.0756,
242
+ "step": 31
243
+ },
244
+ {
245
+ "epoch": 0.540084388185654,
246
+ "grad_norm": 0.056396484375,
247
+ "learning_rate": 2.930401150020983e-05,
248
+ "loss": 1.0939,
249
+ "step": 32
250
+ },
251
+ {
252
+ "epoch": 0.5569620253164557,
253
+ "grad_norm": 0.05712890625,
254
+ "learning_rate": 2.9239853748542717e-05,
255
+ "loss": 1.0901,
256
+ "step": 33
257
+ },
258
+ {
259
+ "epoch": 0.5738396624472574,
260
+ "grad_norm": 0.054931640625,
261
+ "learning_rate": 2.9172944421573587e-05,
262
+ "loss": 1.0873,
263
+ "step": 34
264
+ },
265
+ {
266
+ "epoch": 0.5907172995780591,
267
+ "grad_norm": 0.0546875,
268
+ "learning_rate": 2.9103296448230986e-05,
269
+ "loss": 1.0584,
270
+ "step": 35
271
+ },
272
+ {
273
+ "epoch": 0.6075949367088608,
274
+ "grad_norm": 0.0556640625,
275
+ "learning_rate": 2.9030923286633703e-05,
276
+ "loss": 1.0692,
277
+ "step": 36
278
+ },
279
+ {
280
+ "epoch": 0.6244725738396625,
281
+ "grad_norm": 0.16015625,
282
+ "learning_rate": 2.8955838921490252e-05,
283
+ "loss": 1.782,
284
+ "step": 37
285
+ },
286
+ {
287
+ "epoch": 0.6413502109704642,
288
+ "grad_norm": 0.052001953125,
289
+ "learning_rate": 2.8878057861396606e-05,
290
+ "loss": 1.0667,
291
+ "step": 38
292
+ },
293
+ {
294
+ "epoch": 0.6582278481012658,
295
+ "grad_norm": 0.052001953125,
296
+ "learning_rate": 2.8797595136032675e-05,
297
+ "loss": 1.0656,
298
+ "step": 39
299
+ },
300
+ {
301
+ "epoch": 0.6751054852320675,
302
+ "grad_norm": 0.052734375,
303
+ "learning_rate": 2.8714466293258142e-05,
304
+ "loss": 1.0736,
305
+ "step": 40
306
+ },
307
+ {
308
+ "epoch": 0.6919831223628692,
309
+ "grad_norm": 0.053955078125,
310
+ "learning_rate": 2.8628687396108107e-05,
311
+ "loss": 1.0638,
312
+ "step": 41
313
+ },
314
+ {
315
+ "epoch": 0.7088607594936709,
316
+ "grad_norm": 0.05224609375,
317
+ "learning_rate": 2.8540275019689237e-05,
318
+ "loss": 1.0746,
319
+ "step": 42
320
+ },
321
+ {
322
+ "epoch": 0.7257383966244726,
323
+ "grad_norm": 0.051513671875,
324
+ "learning_rate": 2.8449246247976947e-05,
325
+ "loss": 1.0608,
326
+ "step": 43
327
+ },
328
+ {
329
+ "epoch": 0.7426160337552743,
330
+ "grad_norm": 0.052001953125,
331
+ "learning_rate": 2.835561867051426e-05,
332
+ "loss": 1.0619,
333
+ "step": 44
334
+ },
335
+ {
336
+ "epoch": 0.759493670886076,
337
+ "grad_norm": 0.051025390625,
338
+ "learning_rate": 2.825941037901294e-05,
339
+ "loss": 1.048,
340
+ "step": 45
341
+ },
342
+ {
343
+ "epoch": 0.7763713080168776,
344
+ "grad_norm": 0.05078125,
345
+ "learning_rate": 2.816063996385765e-05,
346
+ "loss": 1.0761,
347
+ "step": 46
348
+ },
349
+ {
350
+ "epoch": 0.7932489451476793,
351
+ "grad_norm": 0.0498046875,
352
+ "learning_rate": 2.805932651051372e-05,
353
+ "loss": 1.0443,
354
+ "step": 47
355
+ },
356
+ {
357
+ "epoch": 0.810126582278481,
358
+ "grad_norm": 0.051025390625,
359
+ "learning_rate": 2.7955489595839228e-05,
360
+ "loss": 1.0527,
361
+ "step": 48
362
+ },
363
+ {
364
+ "epoch": 0.8270042194092827,
365
+ "grad_norm": 0.052734375,
366
+ "learning_rate": 2.784914928430218e-05,
367
+ "loss": 1.0498,
368
+ "step": 49
369
+ },
370
+ {
371
+ "epoch": 0.8438818565400844,
372
+ "grad_norm": 0.049560546875,
373
+ "learning_rate": 2.7740326124103416e-05,
374
+ "loss": 1.0537,
375
+ "step": 50
376
+ },
377
+ {
378
+ "epoch": 0.8607594936708861,
379
+ "grad_norm": 0.053466796875,
380
+ "learning_rate": 2.762904114320609e-05,
381
+ "loss": 1.0326,
382
+ "step": 51
383
+ },
384
+ {
385
+ "epoch": 0.8776371308016878,
386
+ "grad_norm": 0.04931640625,
387
+ "learning_rate": 2.751531584527241e-05,
388
+ "loss": 1.043,
389
+ "step": 52
390
+ },
391
+ {
392
+ "epoch": 0.8945147679324894,
393
+ "grad_norm": 0.05126953125,
394
+ "learning_rate": 2.7399172205508476e-05,
395
+ "loss": 1.0463,
396
+ "step": 53
397
+ },
398
+ {
399
+ "epoch": 0.9113924050632911,
400
+ "grad_norm": 0.0517578125,
401
+ "learning_rate": 2.7280632666418013e-05,
402
+ "loss": 1.0476,
403
+ "step": 54
404
+ },
405
+ {
406
+ "epoch": 0.9282700421940928,
407
+ "grad_norm": 0.053466796875,
408
+ "learning_rate": 2.715972013346576e-05,
409
+ "loss": 1.0467,
410
+ "step": 55
411
+ },
412
+ {
413
+ "epoch": 0.9451476793248945,
414
+ "grad_norm": 0.0498046875,
415
+ "learning_rate": 2.703645797065147e-05,
416
+ "loss": 1.0467,
417
+ "step": 56
418
+ },
419
+ {
420
+ "epoch": 0.9620253164556962,
421
+ "grad_norm": 0.05078125,
422
+ "learning_rate": 2.6910869995995247e-05,
423
+ "loss": 1.05,
424
+ "step": 57
425
+ },
426
+ {
427
+ "epoch": 0.9789029535864979,
428
+ "grad_norm": 0.053955078125,
429
+ "learning_rate": 2.678298047693518e-05,
430
+ "loss": 1.0453,
431
+ "step": 58
432
+ },
433
+ {
434
+ "epoch": 0.9957805907172996,
435
+ "grad_norm": 0.050048828125,
436
+ "learning_rate": 2.6652814125638142e-05,
437
+ "loss": 1.0348,
438
+ "step": 59
439
+ },
440
+ {
441
+ "epoch": 1.0126582278481013,
442
+ "grad_norm": 0.052490234375,
443
+ "learning_rate": 2.652039609422463e-05,
444
+ "loss": 1.0418,
445
+ "step": 60
446
+ },
447
+ {
448
+ "epoch": 1.0126582278481013,
449
+ "eval_loss": 1.0428293943405151,
450
+ "eval_runtime": 74.0734,
451
+ "eval_samples_per_second": 13.905,
452
+ "eval_steps_per_second": 13.905,
453
+ "step": 60
454
+ },
455
+ {
456
+ "epoch": 1.0147679324894514,
457
+ "grad_norm": 0.0732421875,
458
+ "learning_rate": 2.638575196990862e-05,
459
+ "loss": 1.0194,
460
+ "step": 61
461
+ },
462
+ {
463
+ "epoch": 1.0316455696202531,
464
+ "grad_norm": 0.049560546875,
465
+ "learning_rate": 2.624890777005332e-05,
466
+ "loss": 1.0365,
467
+ "step": 62
468
+ },
469
+ {
470
+ "epoch": 1.0485232067510548,
471
+ "grad_norm": 0.05126953125,
472
+ "learning_rate": 2.6109889937143828e-05,
473
+ "loss": 1.0426,
474
+ "step": 63
475
+ },
476
+ {
477
+ "epoch": 1.0654008438818565,
478
+ "grad_norm": 0.05126953125,
479
+ "learning_rate": 2.5968725333677628e-05,
480
+ "loss": 1.043,
481
+ "step": 64
482
+ },
483
+ {
484
+ "epoch": 1.0822784810126582,
485
+ "grad_norm": 0.051513671875,
486
+ "learning_rate": 2.582544123697395e-05,
487
+ "loss": 1.0243,
488
+ "step": 65
489
+ },
490
+ {
491
+ "epoch": 1.09915611814346,
492
+ "grad_norm": 0.0517578125,
493
+ "learning_rate": 2.568006533390295e-05,
494
+ "loss": 1.0258,
495
+ "step": 66
496
+ },
497
+ {
498
+ "epoch": 1.1160337552742616,
499
+ "grad_norm": 0.050537109375,
500
+ "learning_rate": 2.5532625715535733e-05,
501
+ "loss": 1.0248,
502
+ "step": 67
503
+ },
504
+ {
505
+ "epoch": 1.1329113924050633,
506
+ "grad_norm": 0.0498046875,
507
+ "learning_rate": 2.5383150871716342e-05,
508
+ "loss": 1.0083,
509
+ "step": 68
510
+ },
511
+ {
512
+ "epoch": 1.149789029535865,
513
+ "grad_norm": 0.05224609375,
514
+ "learning_rate": 2.5231669685556636e-05,
515
+ "loss": 1.0207,
516
+ "step": 69
517
+ },
518
+ {
519
+ "epoch": 1.1666666666666667,
520
+ "grad_norm": 0.051513671875,
521
+ "learning_rate": 2.507821142785516e-05,
522
+ "loss": 1.0435,
523
+ "step": 70
524
+ },
525
+ {
526
+ "epoch": 1.1835443037974684,
527
+ "grad_norm": 0.05224609375,
528
+ "learning_rate": 2.4922805751441174e-05,
529
+ "loss": 1.0354,
530
+ "step": 71
531
+ },
532
+ {
533
+ "epoch": 1.20042194092827,
534
+ "grad_norm": 0.054443359375,
535
+ "learning_rate": 2.4765482685444786e-05,
536
+ "loss": 1.0266,
537
+ "step": 72
538
+ },
539
+ {
540
+ "epoch": 1.2172995780590719,
541
+ "grad_norm": 0.05126953125,
542
+ "learning_rate": 2.460627262949443e-05,
543
+ "loss": 1.0411,
544
+ "step": 73
545
+ },
546
+ {
547
+ "epoch": 1.2341772151898733,
548
+ "grad_norm": 0.05126953125,
549
+ "learning_rate": 2.4445206347842714e-05,
550
+ "loss": 1.0224,
551
+ "step": 74
552
+ },
553
+ {
554
+ "epoch": 1.251054852320675,
555
+ "grad_norm": 0.052978515625,
556
+ "learning_rate": 2.428231496342181e-05,
557
+ "loss": 1.0253,
558
+ "step": 75
559
+ },
560
+ {
561
+ "epoch": 1.2679324894514767,
562
+ "grad_norm": 0.05322265625,
563
+ "learning_rate": 2.4117629951829602e-05,
564
+ "loss": 1.0298,
565
+ "step": 76
566
+ },
567
+ {
568
+ "epoch": 1.2848101265822784,
569
+ "grad_norm": 0.052490234375,
570
+ "learning_rate": 2.395118313524758e-05,
571
+ "loss": 1.0239,
572
+ "step": 77
573
+ },
574
+ {
575
+ "epoch": 1.3016877637130801,
576
+ "grad_norm": 0.0537109375,
577
+ "learning_rate": 2.3783006676291866e-05,
578
+ "loss": 1.0212,
579
+ "step": 78
580
+ },
581
+ {
582
+ "epoch": 1.3185654008438819,
583
+ "grad_norm": 0.052734375,
584
+ "learning_rate": 2.361313307179837e-05,
585
+ "loss": 1.0371,
586
+ "step": 79
587
+ },
588
+ {
589
+ "epoch": 1.3354430379746836,
590
+ "grad_norm": 0.05126953125,
591
+ "learning_rate": 2.3441595146543458e-05,
592
+ "loss": 1.0314,
593
+ "step": 80
594
+ },
595
+ {
596
+ "epoch": 1.3523206751054853,
597
+ "grad_norm": 0.052001953125,
598
+ "learning_rate": 2.3268426046901153e-05,
599
+ "loss": 1.0195,
600
+ "step": 81
601
+ },
602
+ {
603
+ "epoch": 1.369198312236287,
604
+ "grad_norm": 0.052978515625,
605
+ "learning_rate": 2.3093659234438266e-05,
606
+ "loss": 1.0219,
607
+ "step": 82
608
+ },
609
+ {
610
+ "epoch": 1.3860759493670887,
611
+ "grad_norm": 0.054931640625,
612
+ "learning_rate": 2.291732847944861e-05,
613
+ "loss": 1.0293,
614
+ "step": 83
615
+ },
616
+ {
617
+ "epoch": 1.4029535864978904,
618
+ "grad_norm": 0.052734375,
619
+ "learning_rate": 2.2739467854427512e-05,
620
+ "loss": 0.9992,
621
+ "step": 84
622
+ },
623
+ {
624
+ "epoch": 1.4198312236286919,
625
+ "grad_norm": 0.05224609375,
626
+ "learning_rate": 2.2560111727488e-05,
627
+ "loss": 1.0254,
628
+ "step": 85
629
+ },
630
+ {
631
+ "epoch": 1.4367088607594938,
632
+ "grad_norm": 0.0546875,
633
+ "learning_rate": 2.237929475571979e-05,
634
+ "loss": 1.0148,
635
+ "step": 86
636
+ },
637
+ {
638
+ "epoch": 1.4535864978902953,
639
+ "grad_norm": 0.055419921875,
640
+ "learning_rate": 2.219705187849254e-05,
641
+ "loss": 1.0228,
642
+ "step": 87
643
+ },
644
+ {
645
+ "epoch": 1.4704641350210972,
646
+ "grad_norm": 0.05810546875,
647
+ "learning_rate": 2.2013418310704422e-05,
648
+ "loss": 1.021,
649
+ "step": 88
650
+ },
651
+ {
652
+ "epoch": 1.4873417721518987,
653
+ "grad_norm": 0.0537109375,
654
+ "learning_rate": 2.1828429535977585e-05,
655
+ "loss": 1.0352,
656
+ "step": 89
657
+ },
658
+ {
659
+ "epoch": 1.5042194092827004,
660
+ "grad_norm": 0.054931640625,
661
+ "learning_rate": 2.1642121299801594e-05,
662
+ "loss": 1.0105,
663
+ "step": 90
664
+ },
665
+ {
666
+ "epoch": 1.5042194092827004,
667
+ "eval_loss": 1.0232045650482178,
668
+ "eval_runtime": 74.0589,
669
+ "eval_samples_per_second": 13.908,
670
+ "eval_steps_per_second": 13.908,
671
+ "step": 90
672
+ },
673
+ {
674
+ "epoch": 1.521097046413502,
675
+ "grad_norm": 0.054931640625,
676
+ "learning_rate": 2.1454529602626336e-05,
677
+ "loss": 1.0051,
678
+ "step": 91
679
+ },
680
+ {
681
+ "epoch": 1.5379746835443038,
682
+ "grad_norm": 0.0546875,
683
+ "learning_rate": 2.126569069290562e-05,
684
+ "loss": 1.023,
685
+ "step": 92
686
+ },
687
+ {
688
+ "epoch": 1.5548523206751055,
689
+ "grad_norm": 0.0546875,
690
+ "learning_rate": 2.107564106009286e-05,
691
+ "loss": 1.012,
692
+ "step": 93
693
+ },
694
+ {
695
+ "epoch": 1.5717299578059072,
696
+ "grad_norm": 0.0546875,
697
+ "learning_rate": 2.0884417427590217e-05,
698
+ "loss": 1.0136,
699
+ "step": 94
700
+ },
701
+ {
702
+ "epoch": 1.5886075949367089,
703
+ "grad_norm": 0.0546875,
704
+ "learning_rate": 2.0692056745652483e-05,
705
+ "loss": 1.0194,
706
+ "step": 95
707
+ },
708
+ {
709
+ "epoch": 1.6054852320675106,
710
+ "grad_norm": 0.0556640625,
711
+ "learning_rate": 2.0498596184247196e-05,
712
+ "loss": 1.0089,
713
+ "step": 96
714
+ },
715
+ {
716
+ "epoch": 1.6223628691983123,
717
+ "grad_norm": 0.054931640625,
718
+ "learning_rate": 2.030407312587224e-05,
719
+ "loss": 1.0226,
720
+ "step": 97
721
+ },
722
+ {
723
+ "epoch": 1.6392405063291138,
724
+ "grad_norm": 0.058349609375,
725
+ "learning_rate": 2.010852515833242e-05,
726
+ "loss": 1.0219,
727
+ "step": 98
728
+ },
729
+ {
730
+ "epoch": 1.6561181434599157,
731
+ "grad_norm": 0.0556640625,
732
+ "learning_rate": 1.9911990067476336e-05,
733
+ "loss": 1.0035,
734
+ "step": 99
735
+ },
736
+ {
737
+ "epoch": 1.6729957805907172,
738
+ "grad_norm": 0.055908203125,
739
+ "learning_rate": 1.9714505829895004e-05,
740
+ "loss": 1.0052,
741
+ "step": 100
742
+ },
743
+ {
744
+ "epoch": 1.689873417721519,
745
+ "grad_norm": 0.05712890625,
746
+ "learning_rate": 1.951611060558363e-05,
747
+ "loss": 1.0175,
748
+ "step": 101
749
+ },
750
+ {
751
+ "epoch": 1.7067510548523206,
752
+ "grad_norm": 0.056396484375,
753
+ "learning_rate": 1.9316842730567902e-05,
754
+ "loss": 1.0099,
755
+ "step": 102
756
+ },
757
+ {
758
+ "epoch": 1.7236286919831225,
759
+ "grad_norm": 0.05615234375,
760
+ "learning_rate": 1.9116740709496334e-05,
761
+ "loss": 0.998,
762
+ "step": 103
763
+ },
764
+ {
765
+ "epoch": 1.740506329113924,
766
+ "grad_norm": 0.05615234375,
767
+ "learning_rate": 1.8915843208199967e-05,
768
+ "loss": 0.996,
769
+ "step": 104
770
+ },
771
+ {
772
+ "epoch": 1.7573839662447257,
773
+ "grad_norm": 0.058837890625,
774
+ "learning_rate": 1.8714189046220946e-05,
775
+ "loss": 1.009,
776
+ "step": 105
777
+ },
778
+ {
779
+ "epoch": 1.7742616033755274,
780
+ "grad_norm": 0.059326171875,
781
+ "learning_rate": 1.851181718931141e-05,
782
+ "loss": 1.0127,
783
+ "step": 106
784
+ },
785
+ {
786
+ "epoch": 1.7911392405063291,
787
+ "grad_norm": 0.0556640625,
788
+ "learning_rate": 1.830876674190411e-05,
789
+ "loss": 1.0107,
790
+ "step": 107
791
+ },
792
+ {
793
+ "epoch": 1.8080168776371308,
794
+ "grad_norm": 0.06201171875,
795
+ "learning_rate": 1.8105076939556238e-05,
796
+ "loss": 1.0264,
797
+ "step": 108
798
+ },
799
+ {
800
+ "epoch": 1.8248945147679325,
801
+ "grad_norm": 0.056884765625,
802
+ "learning_rate": 1.790078714136792e-05,
803
+ "loss": 1.0068,
804
+ "step": 109
805
+ },
806
+ {
807
+ "epoch": 1.8417721518987342,
808
+ "grad_norm": 0.056640625,
809
+ "learning_rate": 1.769593682237682e-05,
810
+ "loss": 1.0094,
811
+ "step": 110
812
+ },
813
+ {
814
+ "epoch": 1.8586497890295357,
815
+ "grad_norm": 0.057861328125,
816
+ "learning_rate": 1.7490565565930382e-05,
817
+ "loss": 1.0135,
818
+ "step": 111
819
+ },
820
+ {
821
+ "epoch": 1.8755274261603376,
822
+ "grad_norm": 0.05859375,
823
+ "learning_rate": 1.7284713056037074e-05,
824
+ "loss": 0.993,
825
+ "step": 112
826
+ },
827
+ {
828
+ "epoch": 1.8924050632911391,
829
+ "grad_norm": 0.056884765625,
830
+ "learning_rate": 1.7078419069698283e-05,
831
+ "loss": 1.015,
832
+ "step": 113
833
+ },
834
+ {
835
+ "epoch": 1.909282700421941,
836
+ "grad_norm": 0.055908203125,
837
+ "learning_rate": 1.687172346922213e-05,
838
+ "loss": 1.0043,
839
+ "step": 114
840
+ },
841
+ {
842
+ "epoch": 1.9261603375527425,
843
+ "grad_norm": 0.05859375,
844
+ "learning_rate": 1.6664666194520873e-05,
845
+ "loss": 0.9959,
846
+ "step": 115
847
+ },
848
+ {
849
+ "epoch": 1.9430379746835444,
850
+ "grad_norm": 0.0625,
851
+ "learning_rate": 1.645728725539329e-05,
852
+ "loss": 1.0177,
853
+ "step": 116
854
+ },
855
+ {
856
+ "epoch": 1.959915611814346,
857
+ "grad_norm": 0.05810546875,
858
+ "learning_rate": 1.6249626723793572e-05,
859
+ "loss": 1.033,
860
+ "step": 117
861
+ },
862
+ {
863
+ "epoch": 1.9767932489451476,
864
+ "grad_norm": 0.059326171875,
865
+ "learning_rate": 1.6041724726088187e-05,
866
+ "loss": 1.0155,
867
+ "step": 118
868
+ },
869
+ {
870
+ "epoch": 1.9936708860759493,
871
+ "grad_norm": 0.060791015625,
872
+ "learning_rate": 1.5833621435302247e-05,
873
+ "loss": 1.0167,
874
+ "step": 119
875
+ },
876
+ {
877
+ "epoch": 2.010548523206751,
878
+ "grad_norm": 0.0595703125,
879
+ "learning_rate": 1.5625357063356825e-05,
880
+ "loss": 1.0082,
881
+ "step": 120
882
+ },
883
+ {
884
+ "epoch": 2.010548523206751,
885
+ "eval_loss": 1.0127062797546387,
886
+ "eval_runtime": 74.0711,
887
+ "eval_samples_per_second": 13.906,
888
+ "eval_steps_per_second": 13.906,
889
+ "step": 120
890
+ },
891
+ {
892
+ "epoch": 2.0147679324894514,
893
+ "grad_norm": 0.1865234375,
894
+ "learning_rate": 1.541697185329881e-05,
895
+ "loss": 1.6592,
896
+ "step": 121
897
+ },
898
+ {
899
+ "epoch": 2.0316455696202533,
900
+ "grad_norm": 0.0595703125,
901
+ "learning_rate": 1.5208506071524727e-05,
902
+ "loss": 1.0041,
903
+ "step": 122
904
+ },
905
+ {
906
+ "epoch": 2.048523206751055,
907
+ "grad_norm": 0.059326171875,
908
+ "learning_rate": 1.5e-05,
909
+ "loss": 1.0116,
910
+ "step": 123
911
+ },
912
+ {
913
+ "epoch": 2.0654008438818567,
914
+ "grad_norm": 0.05859375,
915
+ "learning_rate": 1.4791493928475275e-05,
916
+ "loss": 1.0026,
917
+ "step": 124
918
+ },
919
+ {
920
+ "epoch": 2.0822784810126582,
921
+ "grad_norm": 0.05810546875,
922
+ "learning_rate": 1.4583028146701191e-05,
923
+ "loss": 1.0122,
924
+ "step": 125
925
+ },
926
+ {
927
+ "epoch": 2.0991561181434597,
928
+ "grad_norm": 0.06005859375,
929
+ "learning_rate": 1.437464293664318e-05,
930
+ "loss": 1.0024,
931
+ "step": 126
932
+ },
933
+ {
934
+ "epoch": 2.1160337552742616,
935
+ "grad_norm": 0.060546875,
936
+ "learning_rate": 1.4166378564697757e-05,
937
+ "loss": 1.0092,
938
+ "step": 127
939
+ },
940
+ {
941
+ "epoch": 2.132911392405063,
942
+ "grad_norm": 0.05908203125,
943
+ "learning_rate": 1.3958275273911812e-05,
944
+ "loss": 1.0048,
945
+ "step": 128
946
+ },
947
+ {
948
+ "epoch": 2.149789029535865,
949
+ "grad_norm": 0.058837890625,
950
+ "learning_rate": 1.375037327620643e-05,
951
+ "loss": 0.9921,
952
+ "step": 129
953
+ },
954
+ {
955
+ "epoch": 2.1666666666666665,
956
+ "grad_norm": 0.06005859375,
957
+ "learning_rate": 1.3542712744606712e-05,
958
+ "loss": 1.0117,
959
+ "step": 130
960
+ },
961
+ {
962
+ "epoch": 2.1835443037974684,
963
+ "grad_norm": 0.05859375,
964
+ "learning_rate": 1.3335333805479126e-05,
965
+ "loss": 0.9965,
966
+ "step": 131
967
+ },
968
+ {
969
+ "epoch": 2.20042194092827,
970
+ "grad_norm": 0.059814453125,
971
+ "learning_rate": 1.3128276530777874e-05,
972
+ "loss": 1.0108,
973
+ "step": 132
974
+ },
975
+ {
976
+ "epoch": 2.217299578059072,
977
+ "grad_norm": 0.060791015625,
978
+ "learning_rate": 1.292158093030172e-05,
979
+ "loss": 1.0029,
980
+ "step": 133
981
+ },
982
+ {
983
+ "epoch": 2.2341772151898733,
984
+ "grad_norm": 0.058349609375,
985
+ "learning_rate": 1.2715286943962925e-05,
986
+ "loss": 0.9958,
987
+ "step": 134
988
+ },
989
+ {
990
+ "epoch": 2.2510548523206753,
991
+ "grad_norm": 0.05908203125,
992
+ "learning_rate": 1.2509434434069625e-05,
993
+ "loss": 1.0054,
994
+ "step": 135
995
+ },
996
+ {
997
+ "epoch": 2.2679324894514767,
998
+ "grad_norm": 0.0576171875,
999
+ "learning_rate": 1.2304063177623182e-05,
1000
+ "loss": 0.9837,
1001
+ "step": 136
1002
+ },
1003
+ {
1004
+ "epoch": 2.2848101265822787,
1005
+ "grad_norm": 0.060302734375,
1006
+ "learning_rate": 1.2099212858632083e-05,
1007
+ "loss": 1.0014,
1008
+ "step": 137
1009
+ },
1010
+ {
1011
+ "epoch": 2.30168776371308,
1012
+ "grad_norm": 0.0595703125,
1013
+ "learning_rate": 1.1894923060443763e-05,
1014
+ "loss": 0.9816,
1015
+ "step": 138
1016
+ },
1017
+ {
1018
+ "epoch": 2.318565400843882,
1019
+ "grad_norm": 0.0595703125,
1020
+ "learning_rate": 1.169123325809589e-05,
1021
+ "loss": 1.0021,
1022
+ "step": 139
1023
+ },
1024
+ {
1025
+ "epoch": 2.3354430379746836,
1026
+ "grad_norm": 0.05859375,
1027
+ "learning_rate": 1.1488182810688593e-05,
1028
+ "loss": 1.0005,
1029
+ "step": 140
1030
+ },
1031
+ {
1032
+ "epoch": 2.352320675105485,
1033
+ "grad_norm": 0.06103515625,
1034
+ "learning_rate": 1.1285810953779057e-05,
1035
+ "loss": 1.0167,
1036
+ "step": 141
1037
+ },
1038
+ {
1039
+ "epoch": 2.369198312236287,
1040
+ "grad_norm": 0.059814453125,
1041
+ "learning_rate": 1.1084156791800036e-05,
1042
+ "loss": 1.0146,
1043
+ "step": 142
1044
+ },
1045
+ {
1046
+ "epoch": 2.3860759493670884,
1047
+ "grad_norm": 0.05859375,
1048
+ "learning_rate": 1.0883259290503665e-05,
1049
+ "loss": 1.0024,
1050
+ "step": 143
1051
+ },
1052
+ {
1053
+ "epoch": 2.4029535864978904,
1054
+ "grad_norm": 0.0576171875,
1055
+ "learning_rate": 1.0683157269432097e-05,
1056
+ "loss": 0.9959,
1057
+ "step": 144
1058
+ },
1059
+ {
1060
+ "epoch": 2.419831223628692,
1061
+ "grad_norm": 0.060546875,
1062
+ "learning_rate": 1.0483889394416373e-05,
1063
+ "loss": 1.0059,
1064
+ "step": 145
1065
+ },
1066
+ {
1067
+ "epoch": 2.4367088607594938,
1068
+ "grad_norm": 0.06396484375,
1069
+ "learning_rate": 1.0285494170104996e-05,
1070
+ "loss": 0.983,
1071
+ "step": 146
1072
+ },
1073
+ {
1074
+ "epoch": 2.4535864978902953,
1075
+ "grad_norm": 0.06005859375,
1076
+ "learning_rate": 1.0088009932523664e-05,
1077
+ "loss": 1.0116,
1078
+ "step": 147
1079
+ },
1080
+ {
1081
+ "epoch": 2.470464135021097,
1082
+ "grad_norm": 0.061279296875,
1083
+ "learning_rate": 9.891474841667585e-06,
1084
+ "loss": 1.0136,
1085
+ "step": 148
1086
+ },
1087
+ {
1088
+ "epoch": 2.4873417721518987,
1089
+ "grad_norm": 0.057861328125,
1090
+ "learning_rate": 9.695926874127765e-06,
1091
+ "loss": 0.9937,
1092
+ "step": 149
1093
+ },
1094
+ {
1095
+ "epoch": 2.5042194092827,
1096
+ "grad_norm": 0.0595703125,
1097
+ "learning_rate": 9.501403815752813e-06,
1098
+ "loss": 0.9946,
1099
+ "step": 150
1100
+ },
1101
+ {
1102
+ "epoch": 2.5042194092827,
1103
+ "eval_loss": 1.0074498653411865,
1104
+ "eval_runtime": 74.0482,
1105
+ "eval_samples_per_second": 13.91,
1106
+ "eval_steps_per_second": 13.91,
1107
+ "step": 150
1108
+ },
1109
+ {
1110
+ "epoch": 2.521097046413502,
1111
+ "grad_norm": 0.060302734375,
1112
+ "learning_rate": 9.307943254347521e-06,
1113
+ "loss": 1.0043,
1114
+ "step": 151
1115
+ },
1116
+ {
1117
+ "epoch": 2.537974683544304,
1118
+ "grad_norm": 0.06201171875,
1119
+ "learning_rate": 9.115582572409789e-06,
1120
+ "loss": 1.0125,
1121
+ "step": 152
1122
+ },
1123
+ {
1124
+ "epoch": 2.5548523206751055,
1125
+ "grad_norm": 0.05859375,
1126
+ "learning_rate": 8.92435893990714e-06,
1127
+ "loss": 0.9753,
1128
+ "step": 153
1129
+ },
1130
+ {
1131
+ "epoch": 2.571729957805907,
1132
+ "grad_norm": 0.060546875,
1133
+ "learning_rate": 8.734309307094382e-06,
1134
+ "loss": 0.9959,
1135
+ "step": 154
1136
+ },
1137
+ {
1138
+ "epoch": 2.588607594936709,
1139
+ "grad_norm": 0.061279296875,
1140
+ "learning_rate": 8.545470397373665e-06,
1141
+ "loss": 1.0158,
1142
+ "step": 155
1143
+ },
1144
+ {
1145
+ "epoch": 2.605485232067511,
1146
+ "grad_norm": 0.062255859375,
1147
+ "learning_rate": 8.357878700198407e-06,
1148
+ "loss": 1.0001,
1149
+ "step": 156
1150
+ },
1151
+ {
1152
+ "epoch": 2.6223628691983123,
1153
+ "grad_norm": 0.060546875,
1154
+ "learning_rate": 8.171570464022419e-06,
1155
+ "loss": 0.9963,
1156
+ "step": 157
1157
+ },
1158
+ {
1159
+ "epoch": 2.6392405063291138,
1160
+ "grad_norm": 0.0595703125,
1161
+ "learning_rate": 7.986581689295577e-06,
1162
+ "loss": 0.968,
1163
+ "step": 158
1164
+ },
1165
+ {
1166
+ "epoch": 2.6561181434599157,
1167
+ "grad_norm": 0.059326171875,
1168
+ "learning_rate": 7.802948121507462e-06,
1169
+ "loss": 0.9872,
1170
+ "step": 159
1171
+ },
1172
+ {
1173
+ "epoch": 2.672995780590717,
1174
+ "grad_norm": 0.0625,
1175
+ "learning_rate": 7.620705244280208e-06,
1176
+ "loss": 1.0181,
1177
+ "step": 160
1178
+ },
1179
+ {
1180
+ "epoch": 2.689873417721519,
1181
+ "grad_norm": 0.06298828125,
1182
+ "learning_rate": 7.439888272512004e-06,
1183
+ "loss": 1.0057,
1184
+ "step": 161
1185
+ },
1186
+ {
1187
+ "epoch": 2.7067510548523206,
1188
+ "grad_norm": 0.059814453125,
1189
+ "learning_rate": 7.260532145572487e-06,
1190
+ "loss": 0.9985,
1191
+ "step": 162
1192
+ },
1193
+ {
1194
+ "epoch": 2.7236286919831225,
1195
+ "grad_norm": 0.059814453125,
1196
+ "learning_rate": 7.082671520551391e-06,
1197
+ "loss": 0.9973,
1198
+ "step": 163
1199
+ },
1200
+ {
1201
+ "epoch": 2.740506329113924,
1202
+ "grad_norm": 0.06103515625,
1203
+ "learning_rate": 6.906340765561734e-06,
1204
+ "loss": 0.991,
1205
+ "step": 164
1206
+ },
1207
+ {
1208
+ "epoch": 2.757383966244726,
1209
+ "grad_norm": 0.060546875,
1210
+ "learning_rate": 6.731573953098851e-06,
1211
+ "loss": 0.9979,
1212
+ "step": 165
1213
+ },
1214
+ {
1215
+ "epoch": 2.7742616033755274,
1216
+ "grad_norm": 0.060546875,
1217
+ "learning_rate": 6.558404853456545e-06,
1218
+ "loss": 1.0096,
1219
+ "step": 166
1220
+ },
1221
+ {
1222
+ "epoch": 2.791139240506329,
1223
+ "grad_norm": 0.060302734375,
1224
+ "learning_rate": 6.38686692820163e-06,
1225
+ "loss": 0.9981,
1226
+ "step": 167
1227
+ },
1228
+ {
1229
+ "epoch": 2.808016877637131,
1230
+ "grad_norm": 0.0595703125,
1231
+ "learning_rate": 6.2169933237081386e-06,
1232
+ "loss": 1.0169,
1233
+ "step": 168
1234
+ },
1235
+ {
1236
+ "epoch": 2.8248945147679327,
1237
+ "grad_norm": 0.0615234375,
1238
+ "learning_rate": 6.048816864752422e-06,
1239
+ "loss": 0.9934,
1240
+ "step": 169
1241
+ },
1242
+ {
1243
+ "epoch": 2.8417721518987342,
1244
+ "grad_norm": 0.05859375,
1245
+ "learning_rate": 5.882370048170403e-06,
1246
+ "loss": 0.9913,
1247
+ "step": 170
1248
+ },
1249
+ {
1250
+ "epoch": 2.8586497890295357,
1251
+ "grad_norm": 0.06005859375,
1252
+ "learning_rate": 5.71768503657819e-06,
1253
+ "loss": 1.0107,
1254
+ "step": 171
1255
+ },
1256
+ {
1257
+ "epoch": 2.8755274261603376,
1258
+ "grad_norm": 0.060791015625,
1259
+ "learning_rate": 5.55479365215729e-06,
1260
+ "loss": 1.0098,
1261
+ "step": 172
1262
+ },
1263
+ {
1264
+ "epoch": 2.892405063291139,
1265
+ "grad_norm": 0.0595703125,
1266
+ "learning_rate": 5.393727370505569e-06,
1267
+ "loss": 1.0123,
1268
+ "step": 173
1269
+ },
1270
+ {
1271
+ "epoch": 2.909282700421941,
1272
+ "grad_norm": 0.060546875,
1273
+ "learning_rate": 5.234517314555213e-06,
1274
+ "loss": 0.9837,
1275
+ "step": 174
1276
+ },
1277
+ {
1278
+ "epoch": 2.9261603375527425,
1279
+ "grad_norm": 0.060791015625,
1280
+ "learning_rate": 5.077194248558827e-06,
1281
+ "loss": 0.9865,
1282
+ "step": 175
1283
+ },
1284
+ {
1285
+ "epoch": 2.9430379746835444,
1286
+ "grad_norm": 0.058349609375,
1287
+ "learning_rate": 4.921788572144841e-06,
1288
+ "loss": 0.995,
1289
+ "step": 176
1290
+ },
1291
+ {
1292
+ "epoch": 2.959915611814346,
1293
+ "grad_norm": 0.06201171875,
1294
+ "learning_rate": 4.768330314443367e-06,
1295
+ "loss": 1.0009,
1296
+ "step": 177
1297
+ },
1298
+ {
1299
+ "epoch": 2.976793248945148,
1300
+ "grad_norm": 0.060546875,
1301
+ "learning_rate": 4.616849128283658e-06,
1302
+ "loss": 1.0016,
1303
+ "step": 178
1304
+ },
1305
+ {
1306
+ "epoch": 2.9936708860759493,
1307
+ "grad_norm": 0.0625,
1308
+ "learning_rate": 4.4673742844642716e-06,
1309
+ "loss": 0.9979,
1310
+ "step": 179
1311
+ },
1312
+ {
1313
+ "epoch": 3.010548523206751,
1314
+ "grad_norm": 0.062255859375,
1315
+ "learning_rate": 4.319934666097055e-06,
1316
+ "loss": 0.9826,
1317
+ "step": 180
1318
+ },
1319
+ {
1320
+ "epoch": 3.010548523206751,
1321
+ "eval_loss": 1.0056920051574707,
1322
+ "eval_runtime": 74.0404,
1323
+ "eval_samples_per_second": 13.911,
1324
+ "eval_steps_per_second": 13.911,
1325
+ "step": 180
1326
+ },
1327
+ {
1328
+ "epoch": 3.0126582278481013,
1329
+ "grad_norm": 0.06396484375,
1330
+ "learning_rate": 4.174558763026048e-06,
1331
+ "loss": 1.0021,
1332
+ "step": 181
1333
+ },
1334
+ {
1335
+ "epoch": 3.029535864978903,
1336
+ "grad_norm": 0.06201171875,
1337
+ "learning_rate": 4.031274666322372e-06,
1338
+ "loss": 0.9997,
1339
+ "step": 182
1340
+ },
1341
+ {
1342
+ "epoch": 3.0464135021097047,
1343
+ "grad_norm": 0.0595703125,
1344
+ "learning_rate": 3.8901100628561755e-06,
1345
+ "loss": 1.0048,
1346
+ "step": 183
1347
+ },
1348
+ {
1349
+ "epoch": 3.0632911392405062,
1350
+ "grad_norm": 0.060302734375,
1351
+ "learning_rate": 3.7510922299466818e-06,
1352
+ "loss": 0.9874,
1353
+ "step": 184
1354
+ },
1355
+ {
1356
+ "epoch": 3.080168776371308,
1357
+ "grad_norm": 0.05810546875,
1358
+ "learning_rate": 3.6142480300913805e-06,
1359
+ "loss": 1.0054,
1360
+ "step": 185
1361
+ },
1362
+ {
1363
+ "epoch": 3.0970464135021096,
1364
+ "grad_norm": 0.059814453125,
1365
+ "learning_rate": 3.4796039057753703e-06,
1366
+ "loss": 0.9991,
1367
+ "step": 186
1368
+ },
1369
+ {
1370
+ "epoch": 3.1139240506329116,
1371
+ "grad_norm": 0.05859375,
1372
+ "learning_rate": 3.3471858743618615e-06,
1373
+ "loss": 0.9941,
1374
+ "step": 187
1375
+ },
1376
+ {
1377
+ "epoch": 3.130801687763713,
1378
+ "grad_norm": 0.061767578125,
1379
+ "learning_rate": 3.217019523064825e-06,
1380
+ "loss": 1.0032,
1381
+ "step": 188
1382
+ },
1383
+ {
1384
+ "epoch": 3.147679324894515,
1385
+ "grad_norm": 0.060791015625,
1386
+ "learning_rate": 3.089130004004754e-06,
1387
+ "loss": 0.9965,
1388
+ "step": 189
1389
+ },
1390
+ {
1391
+ "epoch": 3.1645569620253164,
1392
+ "grad_norm": 0.05908203125,
1393
+ "learning_rate": 2.96354202934853e-06,
1394
+ "loss": 0.9954,
1395
+ "step": 190
1396
+ },
1397
+ {
1398
+ "epoch": 3.181434599156118,
1399
+ "grad_norm": 0.0595703125,
1400
+ "learning_rate": 2.8402798665342412e-06,
1401
+ "loss": 0.9938,
1402
+ "step": 191
1403
+ },
1404
+ {
1405
+ "epoch": 3.19831223628692,
1406
+ "grad_norm": 0.0615234375,
1407
+ "learning_rate": 2.7193673335819893e-06,
1408
+ "loss": 0.9967,
1409
+ "step": 192
1410
+ },
1411
+ {
1412
+ "epoch": 3.2151898734177213,
1413
+ "grad_norm": 0.061279296875,
1414
+ "learning_rate": 2.600827794491524e-06,
1415
+ "loss": 1.0009,
1416
+ "step": 193
1417
+ },
1418
+ {
1419
+ "epoch": 3.2320675105485233,
1420
+ "grad_norm": 0.0595703125,
1421
+ "learning_rate": 2.4846841547275916e-06,
1422
+ "loss": 0.9902,
1423
+ "step": 194
1424
+ },
1425
+ {
1426
+ "epoch": 3.2489451476793247,
1427
+ "grad_norm": 0.0595703125,
1428
+ "learning_rate": 2.3709588567939118e-06,
1429
+ "loss": 0.9915,
1430
+ "step": 195
1431
+ },
1432
+ {
1433
+ "epoch": 3.2658227848101267,
1434
+ "grad_norm": 0.061767578125,
1435
+ "learning_rate": 2.259673875896585e-06,
1436
+ "loss": 0.993,
1437
+ "step": 196
1438
+ },
1439
+ {
1440
+ "epoch": 3.282700421940928,
1441
+ "grad_norm": 0.060302734375,
1442
+ "learning_rate": 2.150850715697823e-06,
1443
+ "loss": 1.0027,
1444
+ "step": 197
1445
+ },
1446
+ {
1447
+ "epoch": 3.29957805907173,
1448
+ "grad_norm": 0.06005859375,
1449
+ "learning_rate": 2.044510404160774e-06,
1450
+ "loss": 0.9874,
1451
+ "step": 198
1452
+ },
1453
+ {
1454
+ "epoch": 3.3164556962025316,
1455
+ "grad_norm": 0.1572265625,
1456
+ "learning_rate": 1.9406734894862848e-06,
1457
+ "loss": 1.6607,
1458
+ "step": 199
1459
+ },
1460
+ {
1461
+ "epoch": 3.3333333333333335,
1462
+ "grad_norm": 0.0595703125,
1463
+ "learning_rate": 1.8393600361423534e-06,
1464
+ "loss": 1.002,
1465
+ "step": 200
1466
+ },
1467
+ {
1468
+ "epoch": 3.350210970464135,
1469
+ "grad_norm": 0.06103515625,
1470
+ "learning_rate": 1.7405896209870665e-06,
1471
+ "loss": 1.0005,
1472
+ "step": 201
1473
+ },
1474
+ {
1475
+ "epoch": 3.367088607594937,
1476
+ "grad_norm": 0.060302734375,
1477
+ "learning_rate": 1.6443813294857452e-06,
1478
+ "loss": 0.9842,
1479
+ "step": 202
1480
+ },
1481
+ {
1482
+ "epoch": 3.3839662447257384,
1483
+ "grad_norm": 0.06298828125,
1484
+ "learning_rate": 1.550753752023053e-06,
1485
+ "loss": 0.9877,
1486
+ "step": 203
1487
+ },
1488
+ {
1489
+ "epoch": 3.40084388185654,
1490
+ "grad_norm": 0.060546875,
1491
+ "learning_rate": 1.459724980310767e-06,
1492
+ "loss": 0.991,
1493
+ "step": 204
1494
+ },
1495
+ {
1496
+ "epoch": 3.4177215189873418,
1497
+ "grad_norm": 0.060302734375,
1498
+ "learning_rate": 1.3713126038918977e-06,
1499
+ "loss": 0.9839,
1500
+ "step": 205
1501
+ },
1502
+ {
1503
+ "epoch": 3.4345991561181437,
1504
+ "grad_norm": 0.060302734375,
1505
+ "learning_rate": 1.2855337067418576e-06,
1506
+ "loss": 0.9977,
1507
+ "step": 206
1508
+ },
1509
+ {
1510
+ "epoch": 3.451476793248945,
1511
+ "grad_norm": 0.058837890625,
1512
+ "learning_rate": 1.2024048639673225e-06,
1513
+ "loss": 0.9981,
1514
+ "step": 207
1515
+ },
1516
+ {
1517
+ "epoch": 3.4683544303797467,
1518
+ "grad_norm": 0.059814453125,
1519
+ "learning_rate": 1.1219421386033958e-06,
1520
+ "loss": 1.0007,
1521
+ "step": 208
1522
+ },
1523
+ {
1524
+ "epoch": 3.4852320675105486,
1525
+ "grad_norm": 0.06298828125,
1526
+ "learning_rate": 1.0441610785097471e-06,
1527
+ "loss": 0.9977,
1528
+ "step": 209
1529
+ },
1530
+ {
1531
+ "epoch": 3.50210970464135,
1532
+ "grad_norm": 0.0615234375,
1533
+ "learning_rate": 9.690767133662976e-07,
1534
+ "loss": 0.9898,
1535
+ "step": 210
1536
+ },
1537
+ {
1538
+ "epoch": 3.50210970464135,
1539
+ "eval_loss": 1.0054612159729004,
1540
+ "eval_runtime": 74.055,
1541
+ "eval_samples_per_second": 13.909,
1542
+ "eval_steps_per_second": 13.909,
1543
+ "step": 210
1544
+ }
1545
+ ],
1546
+ "logging_steps": 1,
1547
+ "max_steps": 236,
1548
+ "num_input_tokens_seen": 0,
1549
+ "num_train_epochs": 4,
1550
+ "save_steps": 30,
1551
+ "stateful_callbacks": {
1552
+ "TrainerControl": {
1553
+ "args": {
1554
+ "should_epoch_stop": false,
1555
+ "should_evaluate": false,
1556
+ "should_log": false,
1557
+ "should_save": true,
1558
+ "should_training_stop": false
1559
+ },
1560
+ "attributes": {}
1561
+ }
1562
+ },
1563
+ "total_flos": 5.905260842110157e+17,
1564
+ "train_batch_size": 1,
1565
+ "trial_name": null,
1566
+ "trial_params": null
1567
+ }
checkpoint-210/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:47fe3d8f86dd99270fe973ee57cf8bc56524ac1c04eb16ea3572e34a069a173f
3
+ size 6072
checkpoint-210/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-236/README.md ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: /workspace/data/models/Qwen2-7B
3
+ library_name: peft
4
+ ---
5
+
6
+ # Model Card for Model ID
7
+
8
+ <!-- Provide a quick summary of what the model is/does. -->
9
+
10
+
11
+
12
+ ## Model Details
13
+
14
+ ### Model Description
15
+
16
+ <!-- Provide a longer summary of what this model is. -->
17
+
18
+
19
+
20
+ - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
+ - **Shared by [optional]:** [More Information Needed]
23
+ - **Model type:** [More Information Needed]
24
+ - **Language(s) (NLP):** [More Information Needed]
25
+ - **License:** [More Information Needed]
26
+ - **Finetuned from model [optional]:** [More Information Needed]
27
+
28
+ ### Model Sources [optional]
29
+
30
+ <!-- Provide the basic links for the model. -->
31
+
32
+ - **Repository:** [More Information Needed]
33
+ - **Paper [optional]:** [More Information Needed]
34
+ - **Demo [optional]:** [More Information Needed]
35
+
36
+ ## Uses
37
+
38
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
+
40
+ ### Direct Use
41
+
42
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
+
44
+ [More Information Needed]
45
+
46
+ ### Downstream Use [optional]
47
+
48
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
+
50
+ [More Information Needed]
51
+
52
+ ### Out-of-Scope Use
53
+
54
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
+
56
+ [More Information Needed]
57
+
58
+ ## Bias, Risks, and Limitations
59
+
60
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
+
62
+ [More Information Needed]
63
+
64
+ ### Recommendations
65
+
66
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
+
68
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
+
70
+ ## How to Get Started with the Model
71
+
72
+ Use the code below to get started with the model.
73
+
74
+ [More Information Needed]
75
+
76
+ ## Training Details
77
+
78
+ ### Training Data
79
+
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
+
82
+ [More Information Needed]
83
+
84
+ ### Training Procedure
85
+
86
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
+
88
+ #### Preprocessing [optional]
89
+
90
+ [More Information Needed]
91
+
92
+
93
+ #### Training Hyperparameters
94
+
95
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
+
97
+ #### Speeds, Sizes, Times [optional]
98
+
99
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
+
101
+ [More Information Needed]
102
+
103
+ ## Evaluation
104
+
105
+ <!-- This section describes the evaluation protocols and provides the results. -->
106
+
107
+ ### Testing Data, Factors & Metrics
108
+
109
+ #### Testing Data
110
+
111
+ <!-- This should link to a Dataset Card if possible. -->
112
+
113
+ [More Information Needed]
114
+
115
+ #### Factors
116
+
117
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
+
119
+ [More Information Needed]
120
+
121
+ #### Metrics
122
+
123
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
+
125
+ [More Information Needed]
126
+
127
+ ### Results
128
+
129
+ [More Information Needed]
130
+
131
+ #### Summary
132
+
133
+
134
+
135
+ ## Model Examination [optional]
136
+
137
+ <!-- Relevant interpretability work for the model goes here -->
138
+
139
+ [More Information Needed]
140
+
141
+ ## Environmental Impact
142
+
143
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
+
145
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
+
147
+ - **Hardware Type:** [More Information Needed]
148
+ - **Hours used:** [More Information Needed]
149
+ - **Cloud Provider:** [More Information Needed]
150
+ - **Compute Region:** [More Information Needed]
151
+ - **Carbon Emitted:** [More Information Needed]
152
+
153
+ ## Technical Specifications [optional]
154
+
155
+ ### Model Architecture and Objective
156
+
157
+ [More Information Needed]
158
+
159
+ ### Compute Infrastructure
160
+
161
+ [More Information Needed]
162
+
163
+ #### Hardware
164
+
165
+ [More Information Needed]
166
+
167
+ #### Software
168
+
169
+ [More Information Needed]
170
+
171
+ ## Citation [optional]
172
+
173
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
+
175
+ **BibTeX:**
176
+
177
+ [More Information Needed]
178
+
179
+ **APA:**
180
+
181
+ [More Information Needed]
182
+
183
+ ## Glossary [optional]
184
+
185
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
+
187
+ [More Information Needed]
188
+
189
+ ## More Information [optional]
190
+
191
+ [More Information Needed]
192
+
193
+ ## Model Card Authors [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Contact
198
+
199
+ [More Information Needed]
200
+ ### Framework versions
201
+
202
+ - PEFT 0.11.1
checkpoint-236/adapter_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "/workspace/data/models/Qwen2-7B",
5
+ "bias": "none",
6
+ "fan_in_fan_out": null,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 16,
14
+ "lora_dropout": 0.05,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": null,
18
+ "peft_type": "LORA",
19
+ "r": 32,
20
+ "rank_pattern": {},
21
+ "revision": null,
22
+ "target_modules": [
23
+ "k_proj",
24
+ "up_proj",
25
+ "down_proj",
26
+ "q_proj",
27
+ "gate_proj",
28
+ "v_proj",
29
+ "o_proj"
30
+ ],
31
+ "task_type": "CAUSAL_LM",
32
+ "use_dora": false,
33
+ "use_rslora": false
34
+ }
checkpoint-236/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cd9d908415603516e2c6b008cd73797354cdb39ec4cbe0fe8b67f965c3e2e248
3
+ size 161533584
checkpoint-236/added_tokens.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "<|endoftext|>": 151643,
3
+ "<|im_end|>": 151645,
4
+ "<|im_start|>": 151644
5
+ }
checkpoint-236/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-236/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:36bf5d761c2d90706ab47dab1d09eff6644e44da1c6c5e3291f1629c9bd09717
3
+ size 323292010
checkpoint-236/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:745ac19464ee4024c970aee16279b92c9cc18edf13e5d5f1fb84d36ccbb42966
3
+ size 14244