NekoMikoReimu commited on
Commit
fcf26cd
1 Parent(s): 0438679

Delete checkpoint-618

Browse files
checkpoint-618/config.json DELETED
@@ -1,28 +0,0 @@
1
- {
2
- "_name_or_path": "cyberagent/calm2-7b-chat",
3
- "architectures": [
4
- "LlamaForCausalLM"
5
- ],
6
- "attention_bias": false,
7
- "bos_token_id": 0,
8
- "eos_token_id": 0,
9
- "hidden_act": "silu",
10
- "hidden_size": 4096,
11
- "initializer_range": 0.02,
12
- "intermediate_size": 11008,
13
- "max_position_embeddings": 32768,
14
- "model_type": "llama",
15
- "num_attention_heads": 32,
16
- "num_hidden_layers": 32,
17
- "num_key_value_heads": 32,
18
- "pad_token_id": 1,
19
- "pretraining_tp": 1,
20
- "rms_norm_eps": 1e-06,
21
- "rope_scaling": null,
22
- "rope_theta": 500000,
23
- "tie_word_embeddings": false,
24
- "torch_dtype": "bfloat16",
25
- "transformers_version": "4.34.1",
26
- "use_cache": false,
27
- "vocab_size": 65024
28
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
checkpoint-618/generation_config.json DELETED
@@ -1,7 +0,0 @@
1
- {
2
- "_from_model_config": true,
3
- "bos_token_id": 0,
4
- "eos_token_id": 0,
5
- "pad_token_id": 1,
6
- "transformers_version": "4.34.1"
7
- }
 
 
 
 
 
 
 
 
checkpoint-618/global_step618/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:cfb4514c3718fcaf0b44817b8b6282dfd700e0d12b937ecbf66871de6ff19c09
3
- size 28035802551
 
 
 
 
checkpoint-618/global_step618/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:6b9290cc1480f6205a0c20b10d09202764a1fa808d4fdd800776a1625bee45ce
3
- size 28035803191
 
 
 
 
checkpoint-618/global_step618/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:f36ceff325ba6cd1535f1dfceccd0fd34a624bd63b1f33ee5278c713aee01c6f
3
- size 28035802743
 
 
 
 
checkpoint-618/global_step618/mp_rank_00_model_states.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:ae8e77bdb20476e11de017ea64f26b80360300bcbfa293b237730d077a5329e1
3
- size 14017976195
 
 
 
 
checkpoint-618/latest DELETED
@@ -1 +0,0 @@
1
- global_step618
 
 
checkpoint-618/pytorch_model-00001-of-00002.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:245595c67e8712c740f414577e89a49e851bc1110aef9b159f85ad73d7bf63c1
3
- size 9976594142
 
 
 
 
checkpoint-618/pytorch_model-00002-of-00002.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:9b965f08ffaf99a0f921d52572ee779b6be1f96afb3031cad865be6cbb5bfe6f
3
- size 4041391035
 
 
 
 
checkpoint-618/pytorch_model.bin.index.json DELETED
@@ -1,266 +0,0 @@
1
- {
2
- "metadata": {
3
- "total_size": 14017896448
4
- },
5
- "weight_map": {
6
- "lm_head.weight": "pytorch_model-00002-of-00002.bin",
7
- "model.embed_tokens.weight": "pytorch_model-00001-of-00002.bin",
8
- "model.layers.0.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
9
- "model.layers.0.mlp.swiglu.w12.weight": "pytorch_model-00001-of-00002.bin",
10
- "model.layers.0.mlp.swiglu.w3.weight": "pytorch_model-00001-of-00002.bin",
11
- "model.layers.0.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
12
- "model.layers.0.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
13
- "model.layers.0.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
14
- "model.layers.0.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
15
- "model.layers.0.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
16
- "model.layers.1.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
17
- "model.layers.1.mlp.swiglu.w12.weight": "pytorch_model-00001-of-00002.bin",
18
- "model.layers.1.mlp.swiglu.w3.weight": "pytorch_model-00001-of-00002.bin",
19
- "model.layers.1.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
20
- "model.layers.1.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
21
- "model.layers.1.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
22
- "model.layers.1.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
23
- "model.layers.1.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
24
- "model.layers.10.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
25
- "model.layers.10.mlp.swiglu.w12.weight": "pytorch_model-00001-of-00002.bin",
26
- "model.layers.10.mlp.swiglu.w3.weight": "pytorch_model-00001-of-00002.bin",
27
- "model.layers.10.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
28
- "model.layers.10.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
29
- "model.layers.10.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
30
- "model.layers.10.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
31
- "model.layers.10.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
32
- "model.layers.11.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
33
- "model.layers.11.mlp.swiglu.w12.weight": "pytorch_model-00001-of-00002.bin",
34
- "model.layers.11.mlp.swiglu.w3.weight": "pytorch_model-00001-of-00002.bin",
35
- "model.layers.11.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
36
- "model.layers.11.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
37
- "model.layers.11.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
38
- "model.layers.11.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
39
- "model.layers.11.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
40
- "model.layers.12.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
41
- "model.layers.12.mlp.swiglu.w12.weight": "pytorch_model-00001-of-00002.bin",
42
- "model.layers.12.mlp.swiglu.w3.weight": "pytorch_model-00001-of-00002.bin",
43
- "model.layers.12.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
44
- "model.layers.12.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
45
- "model.layers.12.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
46
- "model.layers.12.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
47
- "model.layers.12.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
48
- "model.layers.13.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
49
- "model.layers.13.mlp.swiglu.w12.weight": "pytorch_model-00001-of-00002.bin",
50
- "model.layers.13.mlp.swiglu.w3.weight": "pytorch_model-00001-of-00002.bin",
51
- "model.layers.13.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
52
- "model.layers.13.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
53
- "model.layers.13.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
54
- "model.layers.13.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
55
- "model.layers.13.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
56
- "model.layers.14.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
57
- "model.layers.14.mlp.swiglu.w12.weight": "pytorch_model-00001-of-00002.bin",
58
- "model.layers.14.mlp.swiglu.w3.weight": "pytorch_model-00001-of-00002.bin",
59
- "model.layers.14.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
60
- "model.layers.14.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
61
- "model.layers.14.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
62
- "model.layers.14.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
63
- "model.layers.14.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
64
- "model.layers.15.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
65
- "model.layers.15.mlp.swiglu.w12.weight": "pytorch_model-00001-of-00002.bin",
66
- "model.layers.15.mlp.swiglu.w3.weight": "pytorch_model-00001-of-00002.bin",
67
- "model.layers.15.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
68
- "model.layers.15.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
69
- "model.layers.15.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
70
- "model.layers.15.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
71
- "model.layers.15.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
72
- "model.layers.16.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
73
- "model.layers.16.mlp.swiglu.w12.weight": "pytorch_model-00001-of-00002.bin",
74
- "model.layers.16.mlp.swiglu.w3.weight": "pytorch_model-00001-of-00002.bin",
75
- "model.layers.16.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
76
- "model.layers.16.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
77
- "model.layers.16.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
78
- "model.layers.16.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
79
- "model.layers.16.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
80
- "model.layers.17.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
81
- "model.layers.17.mlp.swiglu.w12.weight": "pytorch_model-00001-of-00002.bin",
82
- "model.layers.17.mlp.swiglu.w3.weight": "pytorch_model-00001-of-00002.bin",
83
- "model.layers.17.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
84
- "model.layers.17.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
85
- "model.layers.17.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
86
- "model.layers.17.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
87
- "model.layers.17.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
88
- "model.layers.18.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
89
- "model.layers.18.mlp.swiglu.w12.weight": "pytorch_model-00001-of-00002.bin",
90
- "model.layers.18.mlp.swiglu.w3.weight": "pytorch_model-00001-of-00002.bin",
91
- "model.layers.18.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
92
- "model.layers.18.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
93
- "model.layers.18.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
94
- "model.layers.18.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
95
- "model.layers.18.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
96
- "model.layers.19.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
97
- "model.layers.19.mlp.swiglu.w12.weight": "pytorch_model-00001-of-00002.bin",
98
- "model.layers.19.mlp.swiglu.w3.weight": "pytorch_model-00001-of-00002.bin",
99
- "model.layers.19.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
100
- "model.layers.19.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
101
- "model.layers.19.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
102
- "model.layers.19.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
103
- "model.layers.19.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
104
- "model.layers.2.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
105
- "model.layers.2.mlp.swiglu.w12.weight": "pytorch_model-00001-of-00002.bin",
106
- "model.layers.2.mlp.swiglu.w3.weight": "pytorch_model-00001-of-00002.bin",
107
- "model.layers.2.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
108
- "model.layers.2.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
109
- "model.layers.2.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
110
- "model.layers.2.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
111
- "model.layers.2.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
112
- "model.layers.20.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
113
- "model.layers.20.mlp.swiglu.w12.weight": "pytorch_model-00001-of-00002.bin",
114
- "model.layers.20.mlp.swiglu.w3.weight": "pytorch_model-00001-of-00002.bin",
115
- "model.layers.20.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
116
- "model.layers.20.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
117
- "model.layers.20.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
118
- "model.layers.20.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
119
- "model.layers.20.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
120
- "model.layers.21.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
121
- "model.layers.21.mlp.swiglu.w12.weight": "pytorch_model-00001-of-00002.bin",
122
- "model.layers.21.mlp.swiglu.w3.weight": "pytorch_model-00001-of-00002.bin",
123
- "model.layers.21.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
124
- "model.layers.21.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
125
- "model.layers.21.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
126
- "model.layers.21.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
127
- "model.layers.21.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
128
- "model.layers.22.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
129
- "model.layers.22.mlp.swiglu.w12.weight": "pytorch_model-00001-of-00002.bin",
130
- "model.layers.22.mlp.swiglu.w3.weight": "pytorch_model-00001-of-00002.bin",
131
- "model.layers.22.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
132
- "model.layers.22.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
133
- "model.layers.22.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
134
- "model.layers.22.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
135
- "model.layers.22.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
136
- "model.layers.23.input_layernorm.weight": "pytorch_model-00002-of-00002.bin",
137
- "model.layers.23.mlp.swiglu.w12.weight": "pytorch_model-00002-of-00002.bin",
138
- "model.layers.23.mlp.swiglu.w3.weight": "pytorch_model-00002-of-00002.bin",
139
- "model.layers.23.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
140
- "model.layers.23.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
141
- "model.layers.23.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
142
- "model.layers.23.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
143
- "model.layers.23.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
144
- "model.layers.24.input_layernorm.weight": "pytorch_model-00002-of-00002.bin",
145
- "model.layers.24.mlp.swiglu.w12.weight": "pytorch_model-00002-of-00002.bin",
146
- "model.layers.24.mlp.swiglu.w3.weight": "pytorch_model-00002-of-00002.bin",
147
- "model.layers.24.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
148
- "model.layers.24.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
149
- "model.layers.24.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin",
150
- "model.layers.24.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
151
- "model.layers.24.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
152
- "model.layers.25.input_layernorm.weight": "pytorch_model-00002-of-00002.bin",
153
- "model.layers.25.mlp.swiglu.w12.weight": "pytorch_model-00002-of-00002.bin",
154
- "model.layers.25.mlp.swiglu.w3.weight": "pytorch_model-00002-of-00002.bin",
155
- "model.layers.25.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
156
- "model.layers.25.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
157
- "model.layers.25.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin",
158
- "model.layers.25.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
159
- "model.layers.25.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
160
- "model.layers.26.input_layernorm.weight": "pytorch_model-00002-of-00002.bin",
161
- "model.layers.26.mlp.swiglu.w12.weight": "pytorch_model-00002-of-00002.bin",
162
- "model.layers.26.mlp.swiglu.w3.weight": "pytorch_model-00002-of-00002.bin",
163
- "model.layers.26.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
164
- "model.layers.26.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
165
- "model.layers.26.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin",
166
- "model.layers.26.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
167
- "model.layers.26.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
168
- "model.layers.27.input_layernorm.weight": "pytorch_model-00002-of-00002.bin",
169
- "model.layers.27.mlp.swiglu.w12.weight": "pytorch_model-00002-of-00002.bin",
170
- "model.layers.27.mlp.swiglu.w3.weight": "pytorch_model-00002-of-00002.bin",
171
- "model.layers.27.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
172
- "model.layers.27.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
173
- "model.layers.27.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin",
174
- "model.layers.27.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
175
- "model.layers.27.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
176
- "model.layers.28.input_layernorm.weight": "pytorch_model-00002-of-00002.bin",
177
- "model.layers.28.mlp.swiglu.w12.weight": "pytorch_model-00002-of-00002.bin",
178
- "model.layers.28.mlp.swiglu.w3.weight": "pytorch_model-00002-of-00002.bin",
179
- "model.layers.28.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
180
- "model.layers.28.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
181
- "model.layers.28.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin",
182
- "model.layers.28.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
183
- "model.layers.28.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
184
- "model.layers.29.input_layernorm.weight": "pytorch_model-00002-of-00002.bin",
185
- "model.layers.29.mlp.swiglu.w12.weight": "pytorch_model-00002-of-00002.bin",
186
- "model.layers.29.mlp.swiglu.w3.weight": "pytorch_model-00002-of-00002.bin",
187
- "model.layers.29.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
188
- "model.layers.29.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
189
- "model.layers.29.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin",
190
- "model.layers.29.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
191
- "model.layers.29.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
192
- "model.layers.3.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
193
- "model.layers.3.mlp.swiglu.w12.weight": "pytorch_model-00001-of-00002.bin",
194
- "model.layers.3.mlp.swiglu.w3.weight": "pytorch_model-00001-of-00002.bin",
195
- "model.layers.3.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
196
- "model.layers.3.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
197
- "model.layers.3.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
198
- "model.layers.3.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
199
- "model.layers.3.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
200
- "model.layers.30.input_layernorm.weight": "pytorch_model-00002-of-00002.bin",
201
- "model.layers.30.mlp.swiglu.w12.weight": "pytorch_model-00002-of-00002.bin",
202
- "model.layers.30.mlp.swiglu.w3.weight": "pytorch_model-00002-of-00002.bin",
203
- "model.layers.30.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
204
- "model.layers.30.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
205
- "model.layers.30.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin",
206
- "model.layers.30.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
207
- "model.layers.30.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
208
- "model.layers.31.input_layernorm.weight": "pytorch_model-00002-of-00002.bin",
209
- "model.layers.31.mlp.swiglu.w12.weight": "pytorch_model-00002-of-00002.bin",
210
- "model.layers.31.mlp.swiglu.w3.weight": "pytorch_model-00002-of-00002.bin",
211
- "model.layers.31.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
212
- "model.layers.31.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
213
- "model.layers.31.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin",
214
- "model.layers.31.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
215
- "model.layers.31.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
216
- "model.layers.4.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
217
- "model.layers.4.mlp.swiglu.w12.weight": "pytorch_model-00001-of-00002.bin",
218
- "model.layers.4.mlp.swiglu.w3.weight": "pytorch_model-00001-of-00002.bin",
219
- "model.layers.4.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
220
- "model.layers.4.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
221
- "model.layers.4.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
222
- "model.layers.4.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
223
- "model.layers.4.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
224
- "model.layers.5.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
225
- "model.layers.5.mlp.swiglu.w12.weight": "pytorch_model-00001-of-00002.bin",
226
- "model.layers.5.mlp.swiglu.w3.weight": "pytorch_model-00001-of-00002.bin",
227
- "model.layers.5.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
228
- "model.layers.5.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
229
- "model.layers.5.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
230
- "model.layers.5.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
231
- "model.layers.5.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
232
- "model.layers.6.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
233
- "model.layers.6.mlp.swiglu.w12.weight": "pytorch_model-00001-of-00002.bin",
234
- "model.layers.6.mlp.swiglu.w3.weight": "pytorch_model-00001-of-00002.bin",
235
- "model.layers.6.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
236
- "model.layers.6.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
237
- "model.layers.6.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
238
- "model.layers.6.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
239
- "model.layers.6.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
240
- "model.layers.7.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
241
- "model.layers.7.mlp.swiglu.w12.weight": "pytorch_model-00001-of-00002.bin",
242
- "model.layers.7.mlp.swiglu.w3.weight": "pytorch_model-00001-of-00002.bin",
243
- "model.layers.7.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
244
- "model.layers.7.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
245
- "model.layers.7.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
246
- "model.layers.7.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
247
- "model.layers.7.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
248
- "model.layers.8.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
249
- "model.layers.8.mlp.swiglu.w12.weight": "pytorch_model-00001-of-00002.bin",
250
- "model.layers.8.mlp.swiglu.w3.weight": "pytorch_model-00001-of-00002.bin",
251
- "model.layers.8.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
252
- "model.layers.8.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
253
- "model.layers.8.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
254
- "model.layers.8.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
255
- "model.layers.8.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
256
- "model.layers.9.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
257
- "model.layers.9.mlp.swiglu.w12.weight": "pytorch_model-00001-of-00002.bin",
258
- "model.layers.9.mlp.swiglu.w3.weight": "pytorch_model-00001-of-00002.bin",
259
- "model.layers.9.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
260
- "model.layers.9.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
261
- "model.layers.9.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
262
- "model.layers.9.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
263
- "model.layers.9.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
264
- "model.norm.weight": "pytorch_model-00002-of-00002.bin"
265
- }
266
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
checkpoint-618/rng_state_0.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:4f4f4eabd3d3209be5ecfa7748b59c9bcebe66f8280e04423295c3adb56fdda8
3
- size 16631
 
 
 
 
checkpoint-618/rng_state_1.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:0e5d459c15b3659d339b29f90d9c6d4fdbf6c828b592cb47110d9ed8c71e113f
3
- size 16631
 
 
 
 
checkpoint-618/rng_state_2.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:7b4bd6bae9c008f0e9d18f4569f4dddb1adc43f27c07518ce3f88803299dc53b
3
- size 16631
 
 
 
 
checkpoint-618/trainer_state.json DELETED
@@ -1,3887 +0,0 @@
1
- {
2
- "best_metric": null,
3
- "best_model_checkpoint": null,
4
- "epoch": 2.9611650485436893,
5
- "eval_steps": 31,
6
- "global_step": 618,
7
- "is_hyper_param_search": false,
8
- "is_local_process_zero": true,
9
- "is_world_process_zero": true,
10
- "log_history": [
11
- {
12
- "epoch": 0.0,
13
- "learning_rate": 0.0,
14
- "loss": 4.59,
15
- "step": 1
16
- },
17
- {
18
- "epoch": 0.0,
19
- "eval_loss": 4.240383148193359,
20
- "eval_runtime": 7.311,
21
- "eval_samples_per_second": 164.273,
22
- "eval_steps_per_second": 54.849,
23
- "step": 1
24
- },
25
- {
26
- "epoch": 0.01,
27
- "learning_rate": 2.0000000000000003e-06,
28
- "loss": 4.252,
29
- "step": 2
30
- },
31
- {
32
- "epoch": 0.01,
33
- "learning_rate": 4.000000000000001e-06,
34
- "loss": 4.2054,
35
- "step": 3
36
- },
37
- {
38
- "epoch": 0.02,
39
- "learning_rate": 6e-06,
40
- "loss": 4.1624,
41
- "step": 4
42
- },
43
- {
44
- "epoch": 0.02,
45
- "learning_rate": 8.000000000000001e-06,
46
- "loss": 3.9787,
47
- "step": 5
48
- },
49
- {
50
- "epoch": 0.03,
51
- "learning_rate": 1e-05,
52
- "loss": 3.7979,
53
- "step": 6
54
- },
55
- {
56
- "epoch": 0.03,
57
- "learning_rate": 1.2e-05,
58
- "loss": 3.8982,
59
- "step": 7
60
- },
61
- {
62
- "epoch": 0.04,
63
- "learning_rate": 1.4000000000000001e-05,
64
- "loss": 3.805,
65
- "step": 8
66
- },
67
- {
68
- "epoch": 0.04,
69
- "learning_rate": 1.6000000000000003e-05,
70
- "loss": 3.7176,
71
- "step": 9
72
- },
73
- {
74
- "epoch": 0.05,
75
- "learning_rate": 1.8e-05,
76
- "loss": 3.4755,
77
- "step": 10
78
- },
79
- {
80
- "epoch": 0.05,
81
- "learning_rate": 2e-05,
82
- "loss": 3.6401,
83
- "step": 11
84
- },
85
- {
86
- "epoch": 0.06,
87
- "learning_rate": 2.2000000000000003e-05,
88
- "loss": 3.5615,
89
- "step": 12
90
- },
91
- {
92
- "epoch": 0.06,
93
- "learning_rate": 2.4e-05,
94
- "loss": 3.5286,
95
- "step": 13
96
- },
97
- {
98
- "epoch": 0.07,
99
- "learning_rate": 2.6000000000000002e-05,
100
- "loss": 3.5437,
101
- "step": 14
102
- },
103
- {
104
- "epoch": 0.07,
105
- "learning_rate": 2.8000000000000003e-05,
106
- "loss": 3.5163,
107
- "step": 15
108
- },
109
- {
110
- "epoch": 0.08,
111
- "learning_rate": 3e-05,
112
- "loss": 3.4108,
113
- "step": 16
114
- },
115
- {
116
- "epoch": 0.08,
117
- "learning_rate": 3.2000000000000005e-05,
118
- "loss": 3.3637,
119
- "step": 17
120
- },
121
- {
122
- "epoch": 0.09,
123
- "learning_rate": 3.4000000000000007e-05,
124
- "loss": 3.3538,
125
- "step": 18
126
- },
127
- {
128
- "epoch": 0.09,
129
- "learning_rate": 3.6e-05,
130
- "loss": 3.3819,
131
- "step": 19
132
- },
133
- {
134
- "epoch": 0.1,
135
- "learning_rate": 3.8e-05,
136
- "loss": 3.2511,
137
- "step": 20
138
- },
139
- {
140
- "epoch": 0.1,
141
- "learning_rate": 4e-05,
142
- "loss": 3.3211,
143
- "step": 21
144
- },
145
- {
146
- "epoch": 0.11,
147
- "learning_rate": 4.2e-05,
148
- "loss": 3.2764,
149
- "step": 22
150
- },
151
- {
152
- "epoch": 0.11,
153
- "learning_rate": 4.4000000000000006e-05,
154
- "loss": 3.0653,
155
- "step": 23
156
- },
157
- {
158
- "epoch": 0.12,
159
- "learning_rate": 4.600000000000001e-05,
160
- "loss": 3.0859,
161
- "step": 24
162
- },
163
- {
164
- "epoch": 0.12,
165
- "learning_rate": 4.8e-05,
166
- "loss": 3.0804,
167
- "step": 25
168
- },
169
- {
170
- "epoch": 0.13,
171
- "learning_rate": 5e-05,
172
- "loss": 2.9774,
173
- "step": 26
174
- },
175
- {
176
- "epoch": 0.13,
177
- "learning_rate": 5.2000000000000004e-05,
178
- "loss": 2.9269,
179
- "step": 27
180
- },
181
- {
182
- "epoch": 0.14,
183
- "learning_rate": 5.4000000000000005e-05,
184
- "loss": 3.0926,
185
- "step": 28
186
- },
187
- {
188
- "epoch": 0.14,
189
- "learning_rate": 5.6000000000000006e-05,
190
- "loss": 2.9725,
191
- "step": 29
192
- },
193
- {
194
- "epoch": 0.15,
195
- "learning_rate": 5.8e-05,
196
- "loss": 3.0293,
197
- "step": 30
198
- },
199
- {
200
- "epoch": 0.15,
201
- "learning_rate": 6e-05,
202
- "loss": 3.0903,
203
- "step": 31
204
- },
205
- {
206
- "epoch": 0.15,
207
- "eval_loss": 2.9962990283966064,
208
- "eval_runtime": 7.3671,
209
- "eval_samples_per_second": 163.022,
210
- "eval_steps_per_second": 54.431,
211
- "step": 31
212
- },
213
- {
214
- "epoch": 0.16,
215
- "learning_rate": 6.2e-05,
216
- "loss": 2.9903,
217
- "step": 32
218
- },
219
- {
220
- "epoch": 0.16,
221
- "learning_rate": 6.400000000000001e-05,
222
- "loss": 3.0196,
223
- "step": 33
224
- },
225
- {
226
- "epoch": 0.17,
227
- "learning_rate": 6.6e-05,
228
- "loss": 3.0288,
229
- "step": 34
230
- },
231
- {
232
- "epoch": 0.17,
233
- "learning_rate": 6.800000000000001e-05,
234
- "loss": 3.0071,
235
- "step": 35
236
- },
237
- {
238
- "epoch": 0.17,
239
- "learning_rate": 7e-05,
240
- "loss": 3.0393,
241
- "step": 36
242
- },
243
- {
244
- "epoch": 0.18,
245
- "learning_rate": 7.2e-05,
246
- "loss": 2.9937,
247
- "step": 37
248
- },
249
- {
250
- "epoch": 0.18,
251
- "learning_rate": 7.4e-05,
252
- "loss": 2.9988,
253
- "step": 38
254
- },
255
- {
256
- "epoch": 0.19,
257
- "learning_rate": 7.6e-05,
258
- "loss": 2.9331,
259
- "step": 39
260
- },
261
- {
262
- "epoch": 0.19,
263
- "learning_rate": 7.800000000000001e-05,
264
- "loss": 3.0414,
265
- "step": 40
266
- },
267
- {
268
- "epoch": 0.2,
269
- "learning_rate": 8e-05,
270
- "loss": 3.0237,
271
- "step": 41
272
- },
273
- {
274
- "epoch": 0.2,
275
- "learning_rate": 8.2e-05,
276
- "loss": 2.9664,
277
- "step": 42
278
- },
279
- {
280
- "epoch": 0.21,
281
- "learning_rate": 8.4e-05,
282
- "loss": 2.8639,
283
- "step": 43
284
- },
285
- {
286
- "epoch": 0.21,
287
- "learning_rate": 8.6e-05,
288
- "loss": 2.8562,
289
- "step": 44
290
- },
291
- {
292
- "epoch": 0.22,
293
- "learning_rate": 8.800000000000001e-05,
294
- "loss": 2.9632,
295
- "step": 45
296
- },
297
- {
298
- "epoch": 0.22,
299
- "learning_rate": 9e-05,
300
- "loss": 2.946,
301
- "step": 46
302
- },
303
- {
304
- "epoch": 0.23,
305
- "learning_rate": 9.200000000000001e-05,
306
- "loss": 2.8428,
307
- "step": 47
308
- },
309
- {
310
- "epoch": 0.23,
311
- "learning_rate": 9.4e-05,
312
- "loss": 2.9827,
313
- "step": 48
314
- },
315
- {
316
- "epoch": 0.24,
317
- "learning_rate": 9.6e-05,
318
- "loss": 2.9512,
319
- "step": 49
320
- },
321
- {
322
- "epoch": 0.24,
323
- "learning_rate": 9.8e-05,
324
- "loss": 2.8997,
325
- "step": 50
326
- },
327
- {
328
- "epoch": 0.25,
329
- "learning_rate": 0.0001,
330
- "loss": 2.9762,
331
- "step": 51
332
- },
333
- {
334
- "epoch": 0.25,
335
- "learning_rate": 0.00010200000000000001,
336
- "loss": 3.0429,
337
- "step": 52
338
- },
339
- {
340
- "epoch": 0.26,
341
- "learning_rate": 0.00010400000000000001,
342
- "loss": 3.0223,
343
- "step": 53
344
- },
345
- {
346
- "epoch": 0.26,
347
- "learning_rate": 0.00010600000000000002,
348
- "loss": 3.0007,
349
- "step": 54
350
- },
351
- {
352
- "epoch": 0.27,
353
- "learning_rate": 0.00010800000000000001,
354
- "loss": 3.0436,
355
- "step": 55
356
- },
357
- {
358
- "epoch": 0.27,
359
- "learning_rate": 0.00011000000000000002,
360
- "loss": 3.0151,
361
- "step": 56
362
- },
363
- {
364
- "epoch": 0.28,
365
- "learning_rate": 0.00011200000000000001,
366
- "loss": 2.9909,
367
- "step": 57
368
- },
369
- {
370
- "epoch": 0.28,
371
- "learning_rate": 0.00011399999999999999,
372
- "loss": 2.9942,
373
- "step": 58
374
- },
375
- {
376
- "epoch": 0.29,
377
- "learning_rate": 0.000116,
378
- "loss": 3.0098,
379
- "step": 59
380
- },
381
- {
382
- "epoch": 0.29,
383
- "learning_rate": 0.000118,
384
- "loss": 3.0353,
385
- "step": 60
386
- },
387
- {
388
- "epoch": 0.3,
389
- "learning_rate": 0.00012,
390
- "loss": 3.0671,
391
- "step": 61
392
- },
393
- {
394
- "epoch": 0.3,
395
- "learning_rate": 0.000122,
396
- "loss": 2.9824,
397
- "step": 62
398
- },
399
- {
400
- "epoch": 0.3,
401
- "eval_loss": 3.022158145904541,
402
- "eval_runtime": 7.3702,
403
- "eval_samples_per_second": 162.953,
404
- "eval_steps_per_second": 54.408,
405
- "step": 62
406
- },
407
- {
408
- "epoch": 0.31,
409
- "learning_rate": 0.000124,
410
- "loss": 3.0207,
411
- "step": 63
412
- },
413
- {
414
- "epoch": 0.31,
415
- "learning_rate": 0.000126,
416
- "loss": 2.9048,
417
- "step": 64
418
- },
419
- {
420
- "epoch": 0.32,
421
- "learning_rate": 0.00012800000000000002,
422
- "loss": 3.0518,
423
- "step": 65
424
- },
425
- {
426
- "epoch": 0.32,
427
- "learning_rate": 0.00013000000000000002,
428
- "loss": 3.0854,
429
- "step": 66
430
- },
431
- {
432
- "epoch": 0.33,
433
- "learning_rate": 0.000132,
434
- "loss": 3.0317,
435
- "step": 67
436
- },
437
- {
438
- "epoch": 0.33,
439
- "learning_rate": 0.000134,
440
- "loss": 3.0313,
441
- "step": 68
442
- },
443
- {
444
- "epoch": 0.33,
445
- "learning_rate": 0.00013600000000000003,
446
- "loss": 3.0753,
447
- "step": 69
448
- },
449
- {
450
- "epoch": 0.34,
451
- "learning_rate": 0.000138,
452
- "loss": 2.9999,
453
- "step": 70
454
- },
455
- {
456
- "epoch": 0.34,
457
- "learning_rate": 0.00014,
458
- "loss": 3.0423,
459
- "step": 71
460
- },
461
- {
462
- "epoch": 0.35,
463
- "learning_rate": 0.000142,
464
- "loss": 2.9642,
465
- "step": 72
466
- },
467
- {
468
- "epoch": 0.35,
469
- "learning_rate": 0.000144,
470
- "loss": 2.9575,
471
- "step": 73
472
- },
473
- {
474
- "epoch": 0.36,
475
- "learning_rate": 0.000146,
476
- "loss": 2.9854,
477
- "step": 74
478
- },
479
- {
480
- "epoch": 0.36,
481
- "learning_rate": 0.000148,
482
- "loss": 2.9729,
483
- "step": 75
484
- },
485
- {
486
- "epoch": 0.37,
487
- "learning_rate": 0.00015000000000000001,
488
- "loss": 2.9176,
489
- "step": 76
490
- },
491
- {
492
- "epoch": 0.37,
493
- "learning_rate": 0.000152,
494
- "loss": 2.947,
495
- "step": 77
496
- },
497
- {
498
- "epoch": 0.38,
499
- "learning_rate": 0.000154,
500
- "loss": 3.0542,
501
- "step": 78
502
- },
503
- {
504
- "epoch": 0.38,
505
- "learning_rate": 0.00015600000000000002,
506
- "loss": 3.0718,
507
- "step": 79
508
- },
509
- {
510
- "epoch": 0.39,
511
- "learning_rate": 0.00015800000000000002,
512
- "loss": 3.027,
513
- "step": 80
514
- },
515
- {
516
- "epoch": 0.39,
517
- "learning_rate": 0.00016,
518
- "loss": 3.1764,
519
- "step": 81
520
- },
521
- {
522
- "epoch": 0.4,
523
- "learning_rate": 0.000162,
524
- "loss": 3.1091,
525
- "step": 82
526
- },
527
- {
528
- "epoch": 0.4,
529
- "learning_rate": 0.000164,
530
- "loss": 3.0931,
531
- "step": 83
532
- },
533
- {
534
- "epoch": 0.41,
535
- "learning_rate": 0.000166,
536
- "loss": 3.2712,
537
- "step": 84
538
- },
539
- {
540
- "epoch": 0.41,
541
- "learning_rate": 0.000168,
542
- "loss": 3.3353,
543
- "step": 85
544
- },
545
- {
546
- "epoch": 0.42,
547
- "learning_rate": 0.00017,
548
- "loss": 3.4876,
549
- "step": 86
550
- },
551
- {
552
- "epoch": 0.42,
553
- "learning_rate": 0.000172,
554
- "loss": 3.3383,
555
- "step": 87
556
- },
557
- {
558
- "epoch": 0.43,
559
- "learning_rate": 0.000174,
560
- "loss": 3.1497,
561
- "step": 88
562
- },
563
- {
564
- "epoch": 0.43,
565
- "learning_rate": 0.00017600000000000002,
566
- "loss": 3.1029,
567
- "step": 89
568
- },
569
- {
570
- "epoch": 0.44,
571
- "learning_rate": 0.00017800000000000002,
572
- "loss": 3.1484,
573
- "step": 90
574
- },
575
- {
576
- "epoch": 0.44,
577
- "learning_rate": 0.00018,
578
- "loss": 3.1156,
579
- "step": 91
580
- },
581
- {
582
- "epoch": 0.45,
583
- "learning_rate": 0.000182,
584
- "loss": 3.2557,
585
- "step": 92
586
- },
587
- {
588
- "epoch": 0.45,
589
- "learning_rate": 0.00018400000000000003,
590
- "loss": 3.173,
591
- "step": 93
592
- },
593
- {
594
- "epoch": 0.45,
595
- "eval_loss": 3.150902032852173,
596
- "eval_runtime": 7.5676,
597
- "eval_samples_per_second": 158.703,
598
- "eval_steps_per_second": 52.989,
599
- "step": 93
600
- },
601
- {
602
- "epoch": 0.46,
603
- "learning_rate": 0.00018600000000000002,
604
- "loss": 3.128,
605
- "step": 94
606
- },
607
- {
608
- "epoch": 0.46,
609
- "learning_rate": 0.000188,
610
- "loss": 3.146,
611
- "step": 95
612
- },
613
- {
614
- "epoch": 0.47,
615
- "learning_rate": 0.00019,
616
- "loss": 3.194,
617
- "step": 96
618
- },
619
- {
620
- "epoch": 0.47,
621
- "learning_rate": 0.000192,
622
- "loss": 3.0987,
623
- "step": 97
624
- },
625
- {
626
- "epoch": 0.48,
627
- "learning_rate": 0.000194,
628
- "loss": 3.2405,
629
- "step": 98
630
- },
631
- {
632
- "epoch": 0.48,
633
- "learning_rate": 0.000196,
634
- "loss": 3.1568,
635
- "step": 99
636
- },
637
- {
638
- "epoch": 0.49,
639
- "learning_rate": 0.00019800000000000002,
640
- "loss": 3.1488,
641
- "step": 100
642
- },
643
- {
644
- "epoch": 0.49,
645
- "learning_rate": 0.0002,
646
- "loss": 3.2105,
647
- "step": 101
648
- },
649
- {
650
- "epoch": 0.5,
651
- "learning_rate": 0.0001996138996138996,
652
- "loss": 3.2575,
653
- "step": 102
654
- },
655
- {
656
- "epoch": 0.5,
657
- "learning_rate": 0.00019922779922779924,
658
- "loss": 3.1921,
659
- "step": 103
660
- },
661
- {
662
- "epoch": 0.5,
663
- "learning_rate": 0.00019884169884169884,
664
- "loss": 3.2369,
665
- "step": 104
666
- },
667
- {
668
- "epoch": 0.51,
669
- "learning_rate": 0.00019845559845559847,
670
- "loss": 3.1031,
671
- "step": 105
672
- },
673
- {
674
- "epoch": 0.51,
675
- "learning_rate": 0.00019806949806949807,
676
- "loss": 3.2618,
677
- "step": 106
678
- },
679
- {
680
- "epoch": 0.52,
681
- "learning_rate": 0.0001976833976833977,
682
- "loss": 3.2034,
683
- "step": 107
684
- },
685
- {
686
- "epoch": 0.52,
687
- "learning_rate": 0.0001972972972972973,
688
- "loss": 3.2094,
689
- "step": 108
690
- },
691
- {
692
- "epoch": 0.53,
693
- "learning_rate": 0.00019691119691119693,
694
- "loss": 3.1894,
695
- "step": 109
696
- },
697
- {
698
- "epoch": 0.53,
699
- "learning_rate": 0.00019652509652509653,
700
- "loss": 3.1614,
701
- "step": 110
702
- },
703
- {
704
- "epoch": 0.54,
705
- "learning_rate": 0.00019613899613899616,
706
- "loss": 3.176,
707
- "step": 111
708
- },
709
- {
710
- "epoch": 0.54,
711
- "learning_rate": 0.00019575289575289576,
712
- "loss": 3.2153,
713
- "step": 112
714
- },
715
- {
716
- "epoch": 0.55,
717
- "learning_rate": 0.0001953667953667954,
718
- "loss": 3.0923,
719
- "step": 113
720
- },
721
- {
722
- "epoch": 0.55,
723
- "learning_rate": 0.000194980694980695,
724
- "loss": 3.2878,
725
- "step": 114
726
- },
727
- {
728
- "epoch": 0.56,
729
- "learning_rate": 0.00019459459459459462,
730
- "loss": 3.0605,
731
- "step": 115
732
- },
733
- {
734
- "epoch": 0.56,
735
- "learning_rate": 0.00019420849420849422,
736
- "loss": 3.1282,
737
- "step": 116
738
- },
739
- {
740
- "epoch": 0.57,
741
- "learning_rate": 0.00019382239382239382,
742
- "loss": 3.1204,
743
- "step": 117
744
- },
745
- {
746
- "epoch": 0.57,
747
- "learning_rate": 0.00019343629343629345,
748
- "loss": 3.0932,
749
- "step": 118
750
- },
751
- {
752
- "epoch": 0.58,
753
- "learning_rate": 0.00019305019305019305,
754
- "loss": 3.2913,
755
- "step": 119
756
- },
757
- {
758
- "epoch": 0.58,
759
- "learning_rate": 0.00019266409266409268,
760
- "loss": 3.1809,
761
- "step": 120
762
- },
763
- {
764
- "epoch": 0.59,
765
- "learning_rate": 0.00019227799227799228,
766
- "loss": 3.2408,
767
- "step": 121
768
- },
769
- {
770
- "epoch": 0.59,
771
- "learning_rate": 0.0001918918918918919,
772
- "loss": 3.8133,
773
- "step": 122
774
- },
775
- {
776
- "epoch": 0.6,
777
- "learning_rate": 0.0001915057915057915,
778
- "loss": 3.1869,
779
- "step": 123
780
- },
781
- {
782
- "epoch": 0.6,
783
- "learning_rate": 0.00019111969111969114,
784
- "loss": 3.1053,
785
- "step": 124
786
- },
787
- {
788
- "epoch": 0.6,
789
- "eval_loss": 3.205463409423828,
790
- "eval_runtime": 7.3677,
791
- "eval_samples_per_second": 163.009,
792
- "eval_steps_per_second": 54.427,
793
- "step": 124
794
- },
795
- {
796
- "epoch": 0.61,
797
- "learning_rate": 0.00019073359073359074,
798
- "loss": 3.1206,
799
- "step": 125
800
- },
801
- {
802
- "epoch": 0.61,
803
- "learning_rate": 0.00019034749034749037,
804
- "loss": 3.1233,
805
- "step": 126
806
- },
807
- {
808
- "epoch": 0.62,
809
- "learning_rate": 0.00018996138996138997,
810
- "loss": 3.0673,
811
- "step": 127
812
- },
813
- {
814
- "epoch": 0.62,
815
- "learning_rate": 0.0001895752895752896,
816
- "loss": 3.1314,
817
- "step": 128
818
- },
819
- {
820
- "epoch": 0.63,
821
- "learning_rate": 0.0001891891891891892,
822
- "loss": 3.1997,
823
- "step": 129
824
- },
825
- {
826
- "epoch": 0.63,
827
- "learning_rate": 0.0001888030888030888,
828
- "loss": 3.1298,
829
- "step": 130
830
- },
831
- {
832
- "epoch": 0.64,
833
- "learning_rate": 0.00018841698841698843,
834
- "loss": 3.1821,
835
- "step": 131
836
- },
837
- {
838
- "epoch": 0.64,
839
- "learning_rate": 0.00018803088803088803,
840
- "loss": 3.2418,
841
- "step": 132
842
- },
843
- {
844
- "epoch": 0.65,
845
- "learning_rate": 0.00018764478764478766,
846
- "loss": 3.1543,
847
- "step": 133
848
- },
849
- {
850
- "epoch": 0.65,
851
- "learning_rate": 0.00018725868725868726,
852
- "loss": 3.2136,
853
- "step": 134
854
- },
855
- {
856
- "epoch": 0.66,
857
- "learning_rate": 0.0001868725868725869,
858
- "loss": 3.3314,
859
- "step": 135
860
- },
861
- {
862
- "epoch": 0.66,
863
- "learning_rate": 0.0001864864864864865,
864
- "loss": 3.2328,
865
- "step": 136
866
- },
867
- {
868
- "epoch": 0.67,
869
- "learning_rate": 0.00018610038610038612,
870
- "loss": 3.2225,
871
- "step": 137
872
- },
873
- {
874
- "epoch": 0.67,
875
- "learning_rate": 0.00018571428571428572,
876
- "loss": 3.1159,
877
- "step": 138
878
- },
879
- {
880
- "epoch": 0.67,
881
- "learning_rate": 0.00018532818532818535,
882
- "loss": 3.0339,
883
- "step": 139
884
- },
885
- {
886
- "epoch": 0.68,
887
- "learning_rate": 0.00018494208494208495,
888
- "loss": 3.2672,
889
- "step": 140
890
- },
891
- {
892
- "epoch": 0.68,
893
- "learning_rate": 0.00018455598455598458,
894
- "loss": 3.1237,
895
- "step": 141
896
- },
897
- {
898
- "epoch": 0.69,
899
- "learning_rate": 0.00018416988416988418,
900
- "loss": 3.1692,
901
- "step": 142
902
- },
903
- {
904
- "epoch": 0.69,
905
- "learning_rate": 0.0001837837837837838,
906
- "loss": 3.3243,
907
- "step": 143
908
- },
909
- {
910
- "epoch": 0.7,
911
- "learning_rate": 0.0001833976833976834,
912
- "loss": 3.0264,
913
- "step": 144
914
- },
915
- {
916
- "epoch": 0.7,
917
- "learning_rate": 0.000183011583011583,
918
- "loss": 3.2045,
919
- "step": 145
920
- },
921
- {
922
- "epoch": 0.71,
923
- "learning_rate": 0.00018262548262548264,
924
- "loss": 3.1966,
925
- "step": 146
926
- },
927
- {
928
- "epoch": 0.71,
929
- "learning_rate": 0.00018223938223938224,
930
- "loss": 3.0587,
931
- "step": 147
932
- },
933
- {
934
- "epoch": 0.72,
935
- "learning_rate": 0.00018185328185328187,
936
- "loss": 3.2979,
937
- "step": 148
938
- },
939
- {
940
- "epoch": 0.72,
941
- "learning_rate": 0.00018146718146718147,
942
- "loss": 3.1549,
943
- "step": 149
944
- },
945
- {
946
- "epoch": 0.73,
947
- "learning_rate": 0.0001810810810810811,
948
- "loss": 3.1682,
949
- "step": 150
950
- },
951
- {
952
- "epoch": 0.73,
953
- "learning_rate": 0.0001806949806949807,
954
- "loss": 3.3214,
955
- "step": 151
956
- },
957
- {
958
- "epoch": 0.74,
959
- "learning_rate": 0.00018030888030888032,
960
- "loss": 3.1783,
961
- "step": 152
962
- },
963
- {
964
- "epoch": 0.74,
965
- "learning_rate": 0.00017992277992277993,
966
- "loss": 3.2268,
967
- "step": 153
968
- },
969
- {
970
- "epoch": 0.75,
971
- "learning_rate": 0.00017953667953667955,
972
- "loss": 3.2843,
973
- "step": 154
974
- },
975
- {
976
- "epoch": 0.75,
977
- "learning_rate": 0.00017915057915057916,
978
- "loss": 3.3124,
979
- "step": 155
980
- },
981
- {
982
- "epoch": 0.75,
983
- "eval_loss": 3.1898298263549805,
984
- "eval_runtime": 7.5884,
985
- "eval_samples_per_second": 158.268,
986
- "eval_steps_per_second": 52.844,
987
- "step": 155
988
- },
989
- {
990
- "epoch": 0.76,
991
- "learning_rate": 0.00017876447876447878,
992
- "loss": 3.1855,
993
- "step": 156
994
- },
995
- {
996
- "epoch": 0.76,
997
- "learning_rate": 0.00017837837837837839,
998
- "loss": 3.2096,
999
- "step": 157
1000
- },
1001
- {
1002
- "epoch": 0.77,
1003
- "learning_rate": 0.00017799227799227801,
1004
- "loss": 3.2174,
1005
- "step": 158
1006
- },
1007
- {
1008
- "epoch": 0.77,
1009
- "learning_rate": 0.00017760617760617762,
1010
- "loss": 3.2775,
1011
- "step": 159
1012
- },
1013
- {
1014
- "epoch": 0.78,
1015
- "learning_rate": 0.00017722007722007722,
1016
- "loss": 3.2065,
1017
- "step": 160
1018
- },
1019
- {
1020
- "epoch": 0.78,
1021
- "learning_rate": 0.00017683397683397684,
1022
- "loss": 3.2905,
1023
- "step": 161
1024
- },
1025
- {
1026
- "epoch": 0.79,
1027
- "learning_rate": 0.00017644787644787645,
1028
- "loss": 3.1591,
1029
- "step": 162
1030
- },
1031
- {
1032
- "epoch": 0.79,
1033
- "learning_rate": 0.00017606177606177607,
1034
- "loss": 3.2721,
1035
- "step": 163
1036
- },
1037
- {
1038
- "epoch": 0.8,
1039
- "learning_rate": 0.00017567567567567568,
1040
- "loss": 3.1743,
1041
- "step": 164
1042
- },
1043
- {
1044
- "epoch": 0.8,
1045
- "learning_rate": 0.0001752895752895753,
1046
- "loss": 3.234,
1047
- "step": 165
1048
- },
1049
- {
1050
- "epoch": 0.81,
1051
- "learning_rate": 0.0001749034749034749,
1052
- "loss": 3.2775,
1053
- "step": 166
1054
- },
1055
- {
1056
- "epoch": 0.81,
1057
- "learning_rate": 0.00017451737451737453,
1058
- "loss": 3.2317,
1059
- "step": 167
1060
- },
1061
- {
1062
- "epoch": 0.82,
1063
- "learning_rate": 0.00017413127413127413,
1064
- "loss": 3.0691,
1065
- "step": 168
1066
- },
1067
- {
1068
- "epoch": 0.82,
1069
- "learning_rate": 0.00017374517374517376,
1070
- "loss": 3.1793,
1071
- "step": 169
1072
- },
1073
- {
1074
- "epoch": 0.83,
1075
- "learning_rate": 0.00017335907335907336,
1076
- "loss": 3.2259,
1077
- "step": 170
1078
- },
1079
- {
1080
- "epoch": 0.83,
1081
- "learning_rate": 0.000172972972972973,
1082
- "loss": 3.1813,
1083
- "step": 171
1084
- },
1085
- {
1086
- "epoch": 0.83,
1087
- "learning_rate": 0.0001725868725868726,
1088
- "loss": 3.2416,
1089
- "step": 172
1090
- },
1091
- {
1092
- "epoch": 0.84,
1093
- "learning_rate": 0.0001722007722007722,
1094
- "loss": 3.2016,
1095
- "step": 173
1096
- },
1097
- {
1098
- "epoch": 0.84,
1099
- "learning_rate": 0.00017181467181467182,
1100
- "loss": 3.1766,
1101
- "step": 174
1102
- },
1103
- {
1104
- "epoch": 0.85,
1105
- "learning_rate": 0.00017142857142857143,
1106
- "loss": 3.0861,
1107
- "step": 175
1108
- },
1109
- {
1110
- "epoch": 0.85,
1111
- "learning_rate": 0.00017104247104247105,
1112
- "loss": 3.2104,
1113
- "step": 176
1114
- },
1115
- {
1116
- "epoch": 0.86,
1117
- "learning_rate": 0.00017065637065637065,
1118
- "loss": 3.273,
1119
- "step": 177
1120
- },
1121
- {
1122
- "epoch": 0.86,
1123
- "learning_rate": 0.00017027027027027028,
1124
- "loss": 3.2371,
1125
- "step": 178
1126
- },
1127
- {
1128
- "epoch": 0.87,
1129
- "learning_rate": 0.00016988416988416988,
1130
- "loss": 3.2654,
1131
- "step": 179
1132
- },
1133
- {
1134
- "epoch": 0.87,
1135
- "learning_rate": 0.0001694980694980695,
1136
- "loss": 3.1812,
1137
- "step": 180
1138
- },
1139
- {
1140
- "epoch": 0.88,
1141
- "learning_rate": 0.00016911196911196911,
1142
- "loss": 3.2781,
1143
- "step": 181
1144
- },
1145
- {
1146
- "epoch": 0.88,
1147
- "learning_rate": 0.00016872586872586874,
1148
- "loss": 3.1611,
1149
- "step": 182
1150
- },
1151
- {
1152
- "epoch": 0.89,
1153
- "learning_rate": 0.00016833976833976834,
1154
- "loss": 3.0902,
1155
- "step": 183
1156
- },
1157
- {
1158
- "epoch": 0.89,
1159
- "learning_rate": 0.00016795366795366797,
1160
- "loss": 3.2414,
1161
- "step": 184
1162
- },
1163
- {
1164
- "epoch": 0.9,
1165
- "learning_rate": 0.00016756756756756757,
1166
- "loss": 3.1472,
1167
- "step": 185
1168
- },
1169
- {
1170
- "epoch": 0.9,
1171
- "learning_rate": 0.0001671814671814672,
1172
- "loss": 3.1761,
1173
- "step": 186
1174
- },
1175
- {
1176
- "epoch": 0.9,
1177
- "eval_loss": 3.1637256145477295,
1178
- "eval_runtime": 7.3669,
1179
- "eval_samples_per_second": 163.026,
1180
- "eval_steps_per_second": 54.432,
1181
- "step": 186
1182
- },
1183
- {
1184
- "epoch": 0.91,
1185
- "learning_rate": 0.0001667953667953668,
1186
- "loss": 3.1409,
1187
- "step": 187
1188
- },
1189
- {
1190
- "epoch": 0.91,
1191
- "learning_rate": 0.0001664092664092664,
1192
- "loss": 3.2262,
1193
- "step": 188
1194
- },
1195
- {
1196
- "epoch": 0.92,
1197
- "learning_rate": 0.00016602316602316603,
1198
- "loss": 3.105,
1199
- "step": 189
1200
- },
1201
- {
1202
- "epoch": 0.92,
1203
- "learning_rate": 0.00016563706563706563,
1204
- "loss": 3.2596,
1205
- "step": 190
1206
- },
1207
- {
1208
- "epoch": 0.93,
1209
- "learning_rate": 0.00016525096525096526,
1210
- "loss": 3.1528,
1211
- "step": 191
1212
- },
1213
- {
1214
- "epoch": 0.93,
1215
- "learning_rate": 0.00016486486486486486,
1216
- "loss": 3.1561,
1217
- "step": 192
1218
- },
1219
- {
1220
- "epoch": 0.94,
1221
- "learning_rate": 0.0001644787644787645,
1222
- "loss": 3.2552,
1223
- "step": 193
1224
- },
1225
- {
1226
- "epoch": 0.94,
1227
- "learning_rate": 0.0001640926640926641,
1228
- "loss": 3.0347,
1229
- "step": 194
1230
- },
1231
- {
1232
- "epoch": 0.95,
1233
- "learning_rate": 0.00016370656370656372,
1234
- "loss": 3.0418,
1235
- "step": 195
1236
- },
1237
- {
1238
- "epoch": 0.95,
1239
- "learning_rate": 0.00016332046332046332,
1240
- "loss": 3.0838,
1241
- "step": 196
1242
- },
1243
- {
1244
- "epoch": 0.96,
1245
- "learning_rate": 0.00016293436293436295,
1246
- "loss": 3.1867,
1247
- "step": 197
1248
- },
1249
- {
1250
- "epoch": 0.96,
1251
- "learning_rate": 0.00016254826254826255,
1252
- "loss": 3.0373,
1253
- "step": 198
1254
- },
1255
- {
1256
- "epoch": 0.97,
1257
- "learning_rate": 0.00016216216216216218,
1258
- "loss": 2.9896,
1259
- "step": 199
1260
- },
1261
- {
1262
- "epoch": 0.97,
1263
- "learning_rate": 0.00016177606177606178,
1264
- "loss": 3.1511,
1265
- "step": 200
1266
- },
1267
- {
1268
- "epoch": 0.98,
1269
- "learning_rate": 0.0001613899613899614,
1270
- "loss": 3.1029,
1271
- "step": 201
1272
- },
1273
- {
1274
- "epoch": 0.98,
1275
- "learning_rate": 0.000161003861003861,
1276
- "loss": 3.2193,
1277
- "step": 202
1278
- },
1279
- {
1280
- "epoch": 0.99,
1281
- "learning_rate": 0.0001606177606177606,
1282
- "loss": 3.2214,
1283
- "step": 203
1284
- },
1285
- {
1286
- "epoch": 0.99,
1287
- "learning_rate": 0.00016023166023166024,
1288
- "loss": 3.1428,
1289
- "step": 204
1290
- },
1291
- {
1292
- "epoch": 1.0,
1293
- "learning_rate": 0.00015984555984555984,
1294
- "loss": 3.1259,
1295
- "step": 205
1296
- },
1297
- {
1298
- "epoch": 1.0,
1299
- "learning_rate": 0.00015945945945945947,
1300
- "loss": 3.2007,
1301
- "step": 206
1302
- },
1303
- {
1304
- "epoch": 1.0,
1305
- "learning_rate": 0.00015907335907335907,
1306
- "loss": 3.1123,
1307
- "step": 207
1308
- },
1309
- {
1310
- "epoch": 1.01,
1311
- "learning_rate": 0.0001586872586872587,
1312
- "loss": 3.3417,
1313
- "step": 208
1314
- },
1315
- {
1316
- "epoch": 1.01,
1317
- "learning_rate": 0.0001583011583011583,
1318
- "loss": 3.089,
1319
- "step": 209
1320
- },
1321
- {
1322
- "epoch": 1.02,
1323
- "learning_rate": 0.00015791505791505793,
1324
- "loss": 3.0972,
1325
- "step": 210
1326
- },
1327
- {
1328
- "epoch": 1.0,
1329
- "learning_rate": 0.00015752895752895753,
1330
- "loss": 2.1341,
1331
- "step": 211
1332
- },
1333
- {
1334
- "epoch": 1.01,
1335
- "learning_rate": 0.00015714285714285716,
1336
- "loss": 1.9415,
1337
- "step": 212
1338
- },
1339
- {
1340
- "epoch": 1.01,
1341
- "learning_rate": 0.00015675675675675676,
1342
- "loss": 1.959,
1343
- "step": 213
1344
- },
1345
- {
1346
- "epoch": 1.02,
1347
- "learning_rate": 0.0001563706563706564,
1348
- "loss": 1.857,
1349
- "step": 214
1350
- },
1351
- {
1352
- "epoch": 1.02,
1353
- "learning_rate": 0.000155984555984556,
1354
- "loss": 1.8255,
1355
- "step": 215
1356
- },
1357
- {
1358
- "epoch": 1.03,
1359
- "learning_rate": 0.00015559845559845562,
1360
- "loss": 1.6538,
1361
- "step": 216
1362
- },
1363
- {
1364
- "epoch": 1.03,
1365
- "learning_rate": 0.00015521235521235522,
1366
- "loss": 1.9162,
1367
- "step": 217
1368
- },
1369
- {
1370
- "epoch": 1.03,
1371
- "eval_loss": 3.399775981903076,
1372
- "eval_runtime": 7.3711,
1373
- "eval_samples_per_second": 162.933,
1374
- "eval_steps_per_second": 54.401,
1375
- "step": 217
1376
- },
1377
- {
1378
- "epoch": 1.04,
1379
- "learning_rate": 0.00015482625482625482,
1380
- "loss": 1.8293,
1381
- "step": 218
1382
- },
1383
- {
1384
- "epoch": 1.04,
1385
- "learning_rate": 0.00015444015444015445,
1386
- "loss": 1.8539,
1387
- "step": 219
1388
- },
1389
- {
1390
- "epoch": 1.05,
1391
- "learning_rate": 0.00015405405405405405,
1392
- "loss": 1.7888,
1393
- "step": 220
1394
- },
1395
- {
1396
- "epoch": 1.05,
1397
- "learning_rate": 0.00015366795366795368,
1398
- "loss": 1.7813,
1399
- "step": 221
1400
- },
1401
- {
1402
- "epoch": 1.06,
1403
- "learning_rate": 0.00015328185328185328,
1404
- "loss": 1.8911,
1405
- "step": 222
1406
- },
1407
- {
1408
- "epoch": 1.06,
1409
- "learning_rate": 0.0001528957528957529,
1410
- "loss": 1.839,
1411
- "step": 223
1412
- },
1413
- {
1414
- "epoch": 1.07,
1415
- "learning_rate": 0.0001525096525096525,
1416
- "loss": 1.8223,
1417
- "step": 224
1418
- },
1419
- {
1420
- "epoch": 1.07,
1421
- "learning_rate": 0.00015212355212355214,
1422
- "loss": 1.825,
1423
- "step": 225
1424
- },
1425
- {
1426
- "epoch": 1.08,
1427
- "learning_rate": 0.00015173745173745174,
1428
- "loss": 1.7962,
1429
- "step": 226
1430
- },
1431
- {
1432
- "epoch": 1.08,
1433
- "learning_rate": 0.00015135135135135137,
1434
- "loss": 1.8117,
1435
- "step": 227
1436
- },
1437
- {
1438
- "epoch": 1.09,
1439
- "learning_rate": 0.00015096525096525097,
1440
- "loss": 1.9061,
1441
- "step": 228
1442
- },
1443
- {
1444
- "epoch": 1.09,
1445
- "learning_rate": 0.0001505791505791506,
1446
- "loss": 1.8982,
1447
- "step": 229
1448
- },
1449
- {
1450
- "epoch": 1.1,
1451
- "learning_rate": 0.0001501930501930502,
1452
- "loss": 1.9087,
1453
- "step": 230
1454
- },
1455
- {
1456
- "epoch": 1.1,
1457
- "learning_rate": 0.0001498069498069498,
1458
- "loss": 1.9664,
1459
- "step": 231
1460
- },
1461
- {
1462
- "epoch": 1.11,
1463
- "learning_rate": 0.00014942084942084943,
1464
- "loss": 1.7921,
1465
- "step": 232
1466
- },
1467
- {
1468
- "epoch": 1.11,
1469
- "learning_rate": 0.00014903474903474903,
1470
- "loss": 1.9163,
1471
- "step": 233
1472
- },
1473
- {
1474
- "epoch": 1.12,
1475
- "learning_rate": 0.00014864864864864866,
1476
- "loss": 1.8759,
1477
- "step": 234
1478
- },
1479
- {
1480
- "epoch": 1.12,
1481
- "learning_rate": 0.00014826254826254826,
1482
- "loss": 1.9262,
1483
- "step": 235
1484
- },
1485
- {
1486
- "epoch": 1.13,
1487
- "learning_rate": 0.0001478764478764479,
1488
- "loss": 1.9063,
1489
- "step": 236
1490
- },
1491
- {
1492
- "epoch": 1.13,
1493
- "learning_rate": 0.0001474903474903475,
1494
- "loss": 1.8577,
1495
- "step": 237
1496
- },
1497
- {
1498
- "epoch": 1.14,
1499
- "learning_rate": 0.00014710424710424712,
1500
- "loss": 1.7452,
1501
- "step": 238
1502
- },
1503
- {
1504
- "epoch": 1.14,
1505
- "learning_rate": 0.00014671814671814672,
1506
- "loss": 1.9344,
1507
- "step": 239
1508
- },
1509
- {
1510
- "epoch": 1.15,
1511
- "learning_rate": 0.00014633204633204635,
1512
- "loss": 1.7575,
1513
- "step": 240
1514
- },
1515
- {
1516
- "epoch": 1.15,
1517
- "learning_rate": 0.00014594594594594595,
1518
- "loss": 1.7707,
1519
- "step": 241
1520
- },
1521
- {
1522
- "epoch": 1.16,
1523
- "learning_rate": 0.00014555984555984558,
1524
- "loss": 1.8945,
1525
- "step": 242
1526
- },
1527
- {
1528
- "epoch": 1.16,
1529
- "learning_rate": 0.00014517374517374518,
1530
- "loss": 1.8379,
1531
- "step": 243
1532
- },
1533
- {
1534
- "epoch": 1.17,
1535
- "learning_rate": 0.0001447876447876448,
1536
- "loss": 1.9021,
1537
- "step": 244
1538
- },
1539
- {
1540
- "epoch": 1.17,
1541
- "learning_rate": 0.0001444015444015444,
1542
- "loss": 1.844,
1543
- "step": 245
1544
- },
1545
- {
1546
- "epoch": 1.17,
1547
- "learning_rate": 0.000144015444015444,
1548
- "loss": 1.9396,
1549
- "step": 246
1550
- },
1551
- {
1552
- "epoch": 1.18,
1553
- "learning_rate": 0.00014362934362934364,
1554
- "loss": 2.0305,
1555
- "step": 247
1556
- },
1557
- {
1558
- "epoch": 1.18,
1559
- "learning_rate": 0.00014324324324324324,
1560
- "loss": 1.8985,
1561
- "step": 248
1562
- },
1563
- {
1564
- "epoch": 1.18,
1565
- "eval_loss": 3.330674409866333,
1566
- "eval_runtime": 7.364,
1567
- "eval_samples_per_second": 163.091,
1568
- "eval_steps_per_second": 54.454,
1569
- "step": 248
1570
- },
1571
- {
1572
- "epoch": 1.19,
1573
- "learning_rate": 0.00014285714285714287,
1574
- "loss": 1.8457,
1575
- "step": 249
1576
- },
1577
- {
1578
- "epoch": 1.19,
1579
- "learning_rate": 0.00014247104247104247,
1580
- "loss": 1.8213,
1581
- "step": 250
1582
- },
1583
- {
1584
- "epoch": 1.2,
1585
- "learning_rate": 0.0001420849420849421,
1586
- "loss": 1.7586,
1587
- "step": 251
1588
- },
1589
- {
1590
- "epoch": 1.2,
1591
- "learning_rate": 0.0001416988416988417,
1592
- "loss": 1.8669,
1593
- "step": 252
1594
- },
1595
- {
1596
- "epoch": 1.21,
1597
- "learning_rate": 0.00014131274131274133,
1598
- "loss": 1.9476,
1599
- "step": 253
1600
- },
1601
- {
1602
- "epoch": 1.21,
1603
- "learning_rate": 0.00014092664092664093,
1604
- "loss": 1.8525,
1605
- "step": 254
1606
- },
1607
- {
1608
- "epoch": 1.22,
1609
- "learning_rate": 0.00014054054054054056,
1610
- "loss": 2.0163,
1611
- "step": 255
1612
- },
1613
- {
1614
- "epoch": 1.22,
1615
- "learning_rate": 0.00014015444015444016,
1616
- "loss": 1.9186,
1617
- "step": 256
1618
- },
1619
- {
1620
- "epoch": 1.23,
1621
- "learning_rate": 0.00013976833976833979,
1622
- "loss": 1.9528,
1623
- "step": 257
1624
- },
1625
- {
1626
- "epoch": 1.23,
1627
- "learning_rate": 0.0001393822393822394,
1628
- "loss": 2.2483,
1629
- "step": 258
1630
- },
1631
- {
1632
- "epoch": 1.24,
1633
- "learning_rate": 0.00013899613899613902,
1634
- "loss": 1.8889,
1635
- "step": 259
1636
- },
1637
- {
1638
- "epoch": 1.24,
1639
- "learning_rate": 0.00013861003861003862,
1640
- "loss": 2.0137,
1641
- "step": 260
1642
- },
1643
- {
1644
- "epoch": 1.25,
1645
- "learning_rate": 0.00013822393822393822,
1646
- "loss": 1.9397,
1647
- "step": 261
1648
- },
1649
- {
1650
- "epoch": 1.25,
1651
- "learning_rate": 0.00013783783783783785,
1652
- "loss": 1.8241,
1653
- "step": 262
1654
- },
1655
- {
1656
- "epoch": 1.26,
1657
- "learning_rate": 0.00013745173745173745,
1658
- "loss": 1.9685,
1659
- "step": 263
1660
- },
1661
- {
1662
- "epoch": 1.26,
1663
- "learning_rate": 0.00013706563706563708,
1664
- "loss": 1.9909,
1665
- "step": 264
1666
- },
1667
- {
1668
- "epoch": 1.27,
1669
- "learning_rate": 0.00013667953667953668,
1670
- "loss": 1.8653,
1671
- "step": 265
1672
- },
1673
- {
1674
- "epoch": 1.27,
1675
- "learning_rate": 0.0001362934362934363,
1676
- "loss": 1.9163,
1677
- "step": 266
1678
- },
1679
- {
1680
- "epoch": 1.28,
1681
- "learning_rate": 0.0001359073359073359,
1682
- "loss": 1.9765,
1683
- "step": 267
1684
- },
1685
- {
1686
- "epoch": 1.28,
1687
- "learning_rate": 0.00013552123552123554,
1688
- "loss": 1.788,
1689
- "step": 268
1690
- },
1691
- {
1692
- "epoch": 1.29,
1693
- "learning_rate": 0.00013513513513513514,
1694
- "loss": 1.8103,
1695
- "step": 269
1696
- },
1697
- {
1698
- "epoch": 1.29,
1699
- "learning_rate": 0.00013474903474903477,
1700
- "loss": 2.0086,
1701
- "step": 270
1702
- },
1703
- {
1704
- "epoch": 1.3,
1705
- "learning_rate": 0.00013436293436293437,
1706
- "loss": 1.9448,
1707
- "step": 271
1708
- },
1709
- {
1710
- "epoch": 1.3,
1711
- "learning_rate": 0.000133976833976834,
1712
- "loss": 1.8598,
1713
- "step": 272
1714
- },
1715
- {
1716
- "epoch": 1.31,
1717
- "learning_rate": 0.0001335907335907336,
1718
- "loss": 2.0792,
1719
- "step": 273
1720
- },
1721
- {
1722
- "epoch": 1.31,
1723
- "learning_rate": 0.0001332046332046332,
1724
- "loss": 1.7766,
1725
- "step": 274
1726
- },
1727
- {
1728
- "epoch": 1.32,
1729
- "learning_rate": 0.00013281853281853283,
1730
- "loss": 1.9329,
1731
- "step": 275
1732
- },
1733
- {
1734
- "epoch": 1.32,
1735
- "learning_rate": 0.00013243243243243243,
1736
- "loss": 1.9933,
1737
- "step": 276
1738
- },
1739
- {
1740
- "epoch": 1.33,
1741
- "learning_rate": 0.00013204633204633206,
1742
- "loss": 1.9371,
1743
- "step": 277
1744
- },
1745
- {
1746
- "epoch": 1.33,
1747
- "learning_rate": 0.00013166023166023166,
1748
- "loss": 1.9629,
1749
- "step": 278
1750
- },
1751
- {
1752
- "epoch": 1.33,
1753
- "learning_rate": 0.00013127413127413129,
1754
- "loss": 2.0488,
1755
- "step": 279
1756
- },
1757
- {
1758
- "epoch": 1.33,
1759
- "eval_loss": 3.333745241165161,
1760
- "eval_runtime": 7.361,
1761
- "eval_samples_per_second": 163.157,
1762
- "eval_steps_per_second": 54.476,
1763
- "step": 279
1764
- },
1765
- {
1766
- "epoch": 1.34,
1767
- "learning_rate": 0.0001308880308880309,
1768
- "loss": 2.0148,
1769
- "step": 280
1770
- },
1771
- {
1772
- "epoch": 1.34,
1773
- "learning_rate": 0.00013050193050193052,
1774
- "loss": 1.8416,
1775
- "step": 281
1776
- },
1777
- {
1778
- "epoch": 1.35,
1779
- "learning_rate": 0.00013011583011583012,
1780
- "loss": 2.1004,
1781
- "step": 282
1782
- },
1783
- {
1784
- "epoch": 1.35,
1785
- "learning_rate": 0.00012972972972972974,
1786
- "loss": 1.8308,
1787
- "step": 283
1788
- },
1789
- {
1790
- "epoch": 1.36,
1791
- "learning_rate": 0.00012934362934362935,
1792
- "loss": 1.9441,
1793
- "step": 284
1794
- },
1795
- {
1796
- "epoch": 1.36,
1797
- "learning_rate": 0.00012895752895752897,
1798
- "loss": 2.083,
1799
- "step": 285
1800
- },
1801
- {
1802
- "epoch": 1.37,
1803
- "learning_rate": 0.00012857142857142858,
1804
- "loss": 1.8198,
1805
- "step": 286
1806
- },
1807
- {
1808
- "epoch": 1.37,
1809
- "learning_rate": 0.0001281853281853282,
1810
- "loss": 2.0069,
1811
- "step": 287
1812
- },
1813
- {
1814
- "epoch": 1.38,
1815
- "learning_rate": 0.0001277992277992278,
1816
- "loss": 2.0146,
1817
- "step": 288
1818
- },
1819
- {
1820
- "epoch": 1.38,
1821
- "learning_rate": 0.0001274131274131274,
1822
- "loss": 1.8554,
1823
- "step": 289
1824
- },
1825
- {
1826
- "epoch": 1.39,
1827
- "learning_rate": 0.00012702702702702703,
1828
- "loss": 1.972,
1829
- "step": 290
1830
- },
1831
- {
1832
- "epoch": 1.39,
1833
- "learning_rate": 0.00012664092664092664,
1834
- "loss": 1.9583,
1835
- "step": 291
1836
- },
1837
- {
1838
- "epoch": 1.4,
1839
- "learning_rate": 0.00012625482625482626,
1840
- "loss": 1.8567,
1841
- "step": 292
1842
- },
1843
- {
1844
- "epoch": 1.4,
1845
- "learning_rate": 0.00012586872586872587,
1846
- "loss": 2.0031,
1847
- "step": 293
1848
- },
1849
- {
1850
- "epoch": 1.41,
1851
- "learning_rate": 0.0001254826254826255,
1852
- "loss": 1.9725,
1853
- "step": 294
1854
- },
1855
- {
1856
- "epoch": 1.41,
1857
- "learning_rate": 0.0001250965250965251,
1858
- "loss": 1.9517,
1859
- "step": 295
1860
- },
1861
- {
1862
- "epoch": 1.42,
1863
- "learning_rate": 0.00012471042471042472,
1864
- "loss": 1.7436,
1865
- "step": 296
1866
- },
1867
- {
1868
- "epoch": 1.42,
1869
- "learning_rate": 0.00012432432432432433,
1870
- "loss": 1.9968,
1871
- "step": 297
1872
- },
1873
- {
1874
- "epoch": 1.43,
1875
- "learning_rate": 0.00012393822393822395,
1876
- "loss": 1.8299,
1877
- "step": 298
1878
- },
1879
- {
1880
- "epoch": 1.43,
1881
- "learning_rate": 0.00012355212355212355,
1882
- "loss": 2.1024,
1883
- "step": 299
1884
- },
1885
- {
1886
- "epoch": 1.44,
1887
- "learning_rate": 0.00012316602316602318,
1888
- "loss": 1.8099,
1889
- "step": 300
1890
- },
1891
- {
1892
- "epoch": 1.44,
1893
- "learning_rate": 0.00012277992277992278,
1894
- "loss": 1.9761,
1895
- "step": 301
1896
- },
1897
- {
1898
- "epoch": 1.45,
1899
- "learning_rate": 0.0001223938223938224,
1900
- "loss": 2.1201,
1901
- "step": 302
1902
- },
1903
- {
1904
- "epoch": 1.45,
1905
- "learning_rate": 0.00012200772200772201,
1906
- "loss": 1.9268,
1907
- "step": 303
1908
- },
1909
- {
1910
- "epoch": 1.46,
1911
- "learning_rate": 0.00012162162162162163,
1912
- "loss": 1.8136,
1913
- "step": 304
1914
- },
1915
- {
1916
- "epoch": 1.46,
1917
- "learning_rate": 0.00012123552123552124,
1918
- "loss": 2.0362,
1919
- "step": 305
1920
- },
1921
- {
1922
- "epoch": 1.47,
1923
- "learning_rate": 0.00012084942084942086,
1924
- "loss": 2.0653,
1925
- "step": 306
1926
- },
1927
- {
1928
- "epoch": 1.47,
1929
- "learning_rate": 0.00012046332046332047,
1930
- "loss": 2.022,
1931
- "step": 307
1932
- },
1933
- {
1934
- "epoch": 1.48,
1935
- "learning_rate": 0.00012007722007722009,
1936
- "loss": 1.9317,
1937
- "step": 308
1938
- },
1939
- {
1940
- "epoch": 1.48,
1941
- "learning_rate": 0.0001196911196911197,
1942
- "loss": 2.0455,
1943
- "step": 309
1944
- },
1945
- {
1946
- "epoch": 1.49,
1947
- "learning_rate": 0.00011930501930501932,
1948
- "loss": 2.0812,
1949
- "step": 310
1950
- },
1951
- {
1952
- "epoch": 1.49,
1953
- "eval_loss": 3.341298818588257,
1954
- "eval_runtime": 7.3644,
1955
- "eval_samples_per_second": 163.081,
1956
- "eval_steps_per_second": 54.451,
1957
- "step": 310
1958
- },
1959
- {
1960
- "epoch": 1.49,
1961
- "learning_rate": 0.00011891891891891893,
1962
- "loss": 2.0609,
1963
- "step": 311
1964
- },
1965
- {
1966
- "epoch": 1.5,
1967
- "learning_rate": 0.00011853281853281855,
1968
- "loss": 1.9708,
1969
- "step": 312
1970
- },
1971
- {
1972
- "epoch": 1.5,
1973
- "learning_rate": 0.00011814671814671816,
1974
- "loss": 1.9968,
1975
- "step": 313
1976
- },
1977
- {
1978
- "epoch": 1.5,
1979
- "learning_rate": 0.00011776061776061778,
1980
- "loss": 2.0283,
1981
- "step": 314
1982
- },
1983
- {
1984
- "epoch": 1.51,
1985
- "learning_rate": 0.00011737451737451739,
1986
- "loss": 2.0142,
1987
- "step": 315
1988
- },
1989
- {
1990
- "epoch": 1.51,
1991
- "learning_rate": 0.00011698841698841701,
1992
- "loss": 2.026,
1993
- "step": 316
1994
- },
1995
- {
1996
- "epoch": 1.52,
1997
- "learning_rate": 0.0001166023166023166,
1998
- "loss": 2.1965,
1999
- "step": 317
2000
- },
2001
- {
2002
- "epoch": 1.52,
2003
- "learning_rate": 0.00011621621621621621,
2004
- "loss": 1.984,
2005
- "step": 318
2006
- },
2007
- {
2008
- "epoch": 1.53,
2009
- "learning_rate": 0.00011583011583011582,
2010
- "loss": 2.0699,
2011
- "step": 319
2012
- },
2013
- {
2014
- "epoch": 1.53,
2015
- "learning_rate": 0.00011544401544401544,
2016
- "loss": 1.864,
2017
- "step": 320
2018
- },
2019
- {
2020
- "epoch": 1.54,
2021
- "learning_rate": 0.00011505791505791505,
2022
- "loss": 2.0219,
2023
- "step": 321
2024
- },
2025
- {
2026
- "epoch": 1.54,
2027
- "learning_rate": 0.00011467181467181467,
2028
- "loss": 1.9162,
2029
- "step": 322
2030
- },
2031
- {
2032
- "epoch": 1.55,
2033
- "learning_rate": 0.00011428571428571428,
2034
- "loss": 1.9092,
2035
- "step": 323
2036
- },
2037
- {
2038
- "epoch": 1.55,
2039
- "learning_rate": 0.0001138996138996139,
2040
- "loss": 2.0932,
2041
- "step": 324
2042
- },
2043
- {
2044
- "epoch": 1.56,
2045
- "learning_rate": 0.00011351351351351351,
2046
- "loss": 2.0975,
2047
- "step": 325
2048
- },
2049
- {
2050
- "epoch": 1.56,
2051
- "learning_rate": 0.00011312741312741313,
2052
- "loss": 2.1674,
2053
- "step": 326
2054
- },
2055
- {
2056
- "epoch": 1.57,
2057
- "learning_rate": 0.00011274131274131274,
2058
- "loss": 1.8444,
2059
- "step": 327
2060
- },
2061
- {
2062
- "epoch": 1.57,
2063
- "learning_rate": 0.00011235521235521236,
2064
- "loss": 1.9696,
2065
- "step": 328
2066
- },
2067
- {
2068
- "epoch": 1.58,
2069
- "learning_rate": 0.00011196911196911197,
2070
- "loss": 1.943,
2071
- "step": 329
2072
- },
2073
- {
2074
- "epoch": 1.58,
2075
- "learning_rate": 0.00011158301158301159,
2076
- "loss": 2.1044,
2077
- "step": 330
2078
- },
2079
- {
2080
- "epoch": 1.59,
2081
- "learning_rate": 0.0001111969111969112,
2082
- "loss": 2.2068,
2083
- "step": 331
2084
- },
2085
- {
2086
- "epoch": 1.59,
2087
- "learning_rate": 0.00011081081081081082,
2088
- "loss": 2.0958,
2089
- "step": 332
2090
- },
2091
- {
2092
- "epoch": 1.6,
2093
- "learning_rate": 0.00011042471042471043,
2094
- "loss": 1.9789,
2095
- "step": 333
2096
- },
2097
- {
2098
- "epoch": 1.6,
2099
- "learning_rate": 0.00011003861003861005,
2100
- "loss": 1.8663,
2101
- "step": 334
2102
- },
2103
- {
2104
- "epoch": 1.61,
2105
- "learning_rate": 0.00010965250965250966,
2106
- "loss": 2.0499,
2107
- "step": 335
2108
- },
2109
- {
2110
- "epoch": 1.61,
2111
- "learning_rate": 0.00010926640926640928,
2112
- "loss": 1.935,
2113
- "step": 336
2114
- },
2115
- {
2116
- "epoch": 1.62,
2117
- "learning_rate": 0.00010888030888030889,
2118
- "loss": 2.0021,
2119
- "step": 337
2120
- },
2121
- {
2122
- "epoch": 1.62,
2123
- "learning_rate": 0.0001084942084942085,
2124
- "loss": 1.953,
2125
- "step": 338
2126
- },
2127
- {
2128
- "epoch": 1.63,
2129
- "learning_rate": 0.00010810810810810812,
2130
- "loss": 2.0466,
2131
- "step": 339
2132
- },
2133
- {
2134
- "epoch": 1.63,
2135
- "learning_rate": 0.00010772200772200774,
2136
- "loss": 1.9709,
2137
- "step": 340
2138
- },
2139
- {
2140
- "epoch": 1.64,
2141
- "learning_rate": 0.00010733590733590735,
2142
- "loss": 1.8884,
2143
- "step": 341
2144
- },
2145
- {
2146
- "epoch": 1.64,
2147
- "eval_loss": 3.323758602142334,
2148
- "eval_runtime": 7.3665,
2149
- "eval_samples_per_second": 163.036,
2150
- "eval_steps_per_second": 54.436,
2151
- "step": 341
2152
- },
2153
- {
2154
- "epoch": 1.64,
2155
- "learning_rate": 0.00010694980694980697,
2156
- "loss": 2.0557,
2157
- "step": 342
2158
- },
2159
- {
2160
- "epoch": 1.65,
2161
- "learning_rate": 0.00010656370656370658,
2162
- "loss": 2.0345,
2163
- "step": 343
2164
- },
2165
- {
2166
- "epoch": 1.65,
2167
- "learning_rate": 0.0001061776061776062,
2168
- "loss": 1.8173,
2169
- "step": 344
2170
- },
2171
- {
2172
- "epoch": 1.66,
2173
- "learning_rate": 0.00010579150579150581,
2174
- "loss": 1.9598,
2175
- "step": 345
2176
- },
2177
- {
2178
- "epoch": 1.66,
2179
- "learning_rate": 0.0001054054054054054,
2180
- "loss": 2.0323,
2181
- "step": 346
2182
- },
2183
- {
2184
- "epoch": 1.67,
2185
- "learning_rate": 0.00010501930501930501,
2186
- "loss": 1.9284,
2187
- "step": 347
2188
- },
2189
- {
2190
- "epoch": 1.67,
2191
- "learning_rate": 0.00010463320463320463,
2192
- "loss": 2.1235,
2193
- "step": 348
2194
- },
2195
- {
2196
- "epoch": 1.67,
2197
- "learning_rate": 0.00010424710424710424,
2198
- "loss": 1.9426,
2199
- "step": 349
2200
- },
2201
- {
2202
- "epoch": 1.68,
2203
- "learning_rate": 0.00010386100386100386,
2204
- "loss": 1.8692,
2205
- "step": 350
2206
- },
2207
- {
2208
- "epoch": 1.68,
2209
- "learning_rate": 0.00010347490347490347,
2210
- "loss": 1.9559,
2211
- "step": 351
2212
- },
2213
- {
2214
- "epoch": 1.69,
2215
- "learning_rate": 0.00010308880308880309,
2216
- "loss": 2.0407,
2217
- "step": 352
2218
- },
2219
- {
2220
- "epoch": 1.69,
2221
- "learning_rate": 0.0001027027027027027,
2222
- "loss": 1.9356,
2223
- "step": 353
2224
- },
2225
- {
2226
- "epoch": 1.7,
2227
- "learning_rate": 0.00010231660231660232,
2228
- "loss": 1.9055,
2229
- "step": 354
2230
- },
2231
- {
2232
- "epoch": 1.7,
2233
- "learning_rate": 0.00010193050193050193,
2234
- "loss": 1.9776,
2235
- "step": 355
2236
- },
2237
- {
2238
- "epoch": 1.71,
2239
- "learning_rate": 0.00010154440154440155,
2240
- "loss": 1.8996,
2241
- "step": 356
2242
- },
2243
- {
2244
- "epoch": 1.71,
2245
- "learning_rate": 0.00010115830115830116,
2246
- "loss": 1.8893,
2247
- "step": 357
2248
- },
2249
- {
2250
- "epoch": 1.72,
2251
- "learning_rate": 0.00010077220077220078,
2252
- "loss": 1.9621,
2253
- "step": 358
2254
- },
2255
- {
2256
- "epoch": 1.72,
2257
- "learning_rate": 0.00010038610038610039,
2258
- "loss": 1.9447,
2259
- "step": 359
2260
- },
2261
- {
2262
- "epoch": 1.73,
2263
- "learning_rate": 0.0001,
2264
- "loss": 1.9646,
2265
- "step": 360
2266
- },
2267
- {
2268
- "epoch": 1.73,
2269
- "learning_rate": 9.961389961389962e-05,
2270
- "loss": 1.9459,
2271
- "step": 361
2272
- },
2273
- {
2274
- "epoch": 1.74,
2275
- "learning_rate": 9.922779922779923e-05,
2276
- "loss": 2.0168,
2277
- "step": 362
2278
- },
2279
- {
2280
- "epoch": 1.74,
2281
- "learning_rate": 9.884169884169885e-05,
2282
- "loss": 2.0021,
2283
- "step": 363
2284
- },
2285
- {
2286
- "epoch": 1.75,
2287
- "learning_rate": 9.845559845559846e-05,
2288
- "loss": 1.7642,
2289
- "step": 364
2290
- },
2291
- {
2292
- "epoch": 1.75,
2293
- "learning_rate": 9.806949806949808e-05,
2294
- "loss": 2.0138,
2295
- "step": 365
2296
- },
2297
- {
2298
- "epoch": 1.76,
2299
- "learning_rate": 9.76833976833977e-05,
2300
- "loss": 2.0108,
2301
- "step": 366
2302
- },
2303
- {
2304
- "epoch": 1.76,
2305
- "learning_rate": 9.729729729729731e-05,
2306
- "loss": 2.0955,
2307
- "step": 367
2308
- },
2309
- {
2310
- "epoch": 1.77,
2311
- "learning_rate": 9.691119691119691e-05,
2312
- "loss": 2.1705,
2313
- "step": 368
2314
- },
2315
- {
2316
- "epoch": 1.77,
2317
- "learning_rate": 9.652509652509652e-05,
2318
- "loss": 1.8494,
2319
- "step": 369
2320
- },
2321
- {
2322
- "epoch": 1.78,
2323
- "learning_rate": 9.613899613899614e-05,
2324
- "loss": 1.8676,
2325
- "step": 370
2326
- },
2327
- {
2328
- "epoch": 1.78,
2329
- "learning_rate": 9.575289575289575e-05,
2330
- "loss": 1.821,
2331
- "step": 371
2332
- },
2333
- {
2334
- "epoch": 1.79,
2335
- "learning_rate": 9.536679536679537e-05,
2336
- "loss": 2.0281,
2337
- "step": 372
2338
- },
2339
- {
2340
- "epoch": 1.79,
2341
- "eval_loss": 3.2718801498413086,
2342
- "eval_runtime": 7.3652,
2343
- "eval_samples_per_second": 163.063,
2344
- "eval_steps_per_second": 54.445,
2345
- "step": 372
2346
- },
2347
- {
2348
- "epoch": 1.79,
2349
- "learning_rate": 9.498069498069498e-05,
2350
- "loss": 2.1556,
2351
- "step": 373
2352
- },
2353
- {
2354
- "epoch": 1.8,
2355
- "learning_rate": 9.45945945945946e-05,
2356
- "loss": 1.9643,
2357
- "step": 374
2358
- },
2359
- {
2360
- "epoch": 1.8,
2361
- "learning_rate": 9.420849420849421e-05,
2362
- "loss": 2.0287,
2363
- "step": 375
2364
- },
2365
- {
2366
- "epoch": 1.81,
2367
- "learning_rate": 9.382239382239383e-05,
2368
- "loss": 1.927,
2369
- "step": 376
2370
- },
2371
- {
2372
- "epoch": 1.81,
2373
- "learning_rate": 9.343629343629344e-05,
2374
- "loss": 1.9838,
2375
- "step": 377
2376
- },
2377
- {
2378
- "epoch": 1.82,
2379
- "learning_rate": 9.305019305019306e-05,
2380
- "loss": 1.9065,
2381
- "step": 378
2382
- },
2383
- {
2384
- "epoch": 1.82,
2385
- "learning_rate": 9.266409266409267e-05,
2386
- "loss": 2.056,
2387
- "step": 379
2388
- },
2389
- {
2390
- "epoch": 1.83,
2391
- "learning_rate": 9.227799227799229e-05,
2392
- "loss": 1.9277,
2393
- "step": 380
2394
- },
2395
- {
2396
- "epoch": 1.83,
2397
- "learning_rate": 9.18918918918919e-05,
2398
- "loss": 1.7797,
2399
- "step": 381
2400
- },
2401
- {
2402
- "epoch": 1.83,
2403
- "learning_rate": 9.15057915057915e-05,
2404
- "loss": 1.8818,
2405
- "step": 382
2406
- },
2407
- {
2408
- "epoch": 1.84,
2409
- "learning_rate": 9.111969111969112e-05,
2410
- "loss": 1.947,
2411
- "step": 383
2412
- },
2413
- {
2414
- "epoch": 1.84,
2415
- "learning_rate": 9.073359073359073e-05,
2416
- "loss": 1.942,
2417
- "step": 384
2418
- },
2419
- {
2420
- "epoch": 1.85,
2421
- "learning_rate": 9.034749034749035e-05,
2422
- "loss": 1.9998,
2423
- "step": 385
2424
- },
2425
- {
2426
- "epoch": 1.85,
2427
- "learning_rate": 8.996138996138996e-05,
2428
- "loss": 1.8805,
2429
- "step": 386
2430
- },
2431
- {
2432
- "epoch": 1.86,
2433
- "learning_rate": 8.957528957528958e-05,
2434
- "loss": 1.8903,
2435
- "step": 387
2436
- },
2437
- {
2438
- "epoch": 1.86,
2439
- "learning_rate": 8.918918918918919e-05,
2440
- "loss": 1.9189,
2441
- "step": 388
2442
- },
2443
- {
2444
- "epoch": 1.87,
2445
- "learning_rate": 8.880308880308881e-05,
2446
- "loss": 2.0308,
2447
- "step": 389
2448
- },
2449
- {
2450
- "epoch": 1.87,
2451
- "learning_rate": 8.841698841698842e-05,
2452
- "loss": 2.0768,
2453
- "step": 390
2454
- },
2455
- {
2456
- "epoch": 1.88,
2457
- "learning_rate": 8.803088803088804e-05,
2458
- "loss": 1.9168,
2459
- "step": 391
2460
- },
2461
- {
2462
- "epoch": 1.88,
2463
- "learning_rate": 8.764478764478765e-05,
2464
- "loss": 1.8967,
2465
- "step": 392
2466
- },
2467
- {
2468
- "epoch": 1.89,
2469
- "learning_rate": 8.725868725868727e-05,
2470
- "loss": 1.9347,
2471
- "step": 393
2472
- },
2473
- {
2474
- "epoch": 1.89,
2475
- "learning_rate": 8.687258687258688e-05,
2476
- "loss": 1.8273,
2477
- "step": 394
2478
- },
2479
- {
2480
- "epoch": 1.9,
2481
- "learning_rate": 8.64864864864865e-05,
2482
- "loss": 1.9801,
2483
- "step": 395
2484
- },
2485
- {
2486
- "epoch": 1.9,
2487
- "learning_rate": 8.61003861003861e-05,
2488
- "loss": 2.0002,
2489
- "step": 396
2490
- },
2491
- {
2492
- "epoch": 1.91,
2493
- "learning_rate": 8.571428571428571e-05,
2494
- "loss": 2.0318,
2495
- "step": 397
2496
- },
2497
- {
2498
- "epoch": 1.91,
2499
- "learning_rate": 8.532818532818533e-05,
2500
- "loss": 1.8399,
2501
- "step": 398
2502
- },
2503
- {
2504
- "epoch": 1.92,
2505
- "learning_rate": 8.494208494208494e-05,
2506
- "loss": 1.8956,
2507
- "step": 399
2508
- },
2509
- {
2510
- "epoch": 1.92,
2511
- "learning_rate": 8.455598455598456e-05,
2512
- "loss": 2.0156,
2513
- "step": 400
2514
- },
2515
- {
2516
- "epoch": 1.93,
2517
- "learning_rate": 8.416988416988417e-05,
2518
- "loss": 1.9499,
2519
- "step": 401
2520
- },
2521
- {
2522
- "epoch": 1.93,
2523
- "learning_rate": 8.378378378378379e-05,
2524
- "loss": 1.8823,
2525
- "step": 402
2526
- },
2527
- {
2528
- "epoch": 1.94,
2529
- "learning_rate": 8.33976833976834e-05,
2530
- "loss": 2.1344,
2531
- "step": 403
2532
- },
2533
- {
2534
- "epoch": 1.94,
2535
- "eval_loss": 3.2487306594848633,
2536
- "eval_runtime": 7.3645,
2537
- "eval_samples_per_second": 163.08,
2538
- "eval_steps_per_second": 54.451,
2539
- "step": 403
2540
- },
2541
- {
2542
- "epoch": 1.94,
2543
- "learning_rate": 8.301158301158302e-05,
2544
- "loss": 1.9887,
2545
- "step": 404
2546
- },
2547
- {
2548
- "epoch": 1.95,
2549
- "learning_rate": 8.262548262548263e-05,
2550
- "loss": 2.0445,
2551
- "step": 405
2552
- },
2553
- {
2554
- "epoch": 1.95,
2555
- "learning_rate": 8.223938223938225e-05,
2556
- "loss": 1.8847,
2557
- "step": 406
2558
- },
2559
- {
2560
- "epoch": 1.96,
2561
- "learning_rate": 8.185328185328186e-05,
2562
- "loss": 1.8461,
2563
- "step": 407
2564
- },
2565
- {
2566
- "epoch": 1.96,
2567
- "learning_rate": 8.146718146718148e-05,
2568
- "loss": 1.9106,
2569
- "step": 408
2570
- },
2571
- {
2572
- "epoch": 1.97,
2573
- "learning_rate": 8.108108108108109e-05,
2574
- "loss": 2.0067,
2575
- "step": 409
2576
- },
2577
- {
2578
- "epoch": 1.97,
2579
- "learning_rate": 8.06949806949807e-05,
2580
- "loss": 1.9705,
2581
- "step": 410
2582
- },
2583
- {
2584
- "epoch": 1.98,
2585
- "learning_rate": 8.03088803088803e-05,
2586
- "loss": 1.8092,
2587
- "step": 411
2588
- },
2589
- {
2590
- "epoch": 1.98,
2591
- "learning_rate": 7.992277992277992e-05,
2592
- "loss": 1.8563,
2593
- "step": 412
2594
- },
2595
- {
2596
- "epoch": 1.99,
2597
- "learning_rate": 7.953667953667954e-05,
2598
- "loss": 1.8833,
2599
- "step": 413
2600
- },
2601
- {
2602
- "epoch": 1.99,
2603
- "learning_rate": 7.915057915057915e-05,
2604
- "loss": 1.9905,
2605
- "step": 414
2606
- },
2607
- {
2608
- "epoch": 2.0,
2609
- "learning_rate": 7.876447876447877e-05,
2610
- "loss": 2.0448,
2611
- "step": 415
2612
- },
2613
- {
2614
- "epoch": 2.0,
2615
- "learning_rate": 7.837837837837838e-05,
2616
- "loss": 1.9066,
2617
- "step": 416
2618
- },
2619
- {
2620
- "epoch": 2.0,
2621
- "learning_rate": 7.7992277992278e-05,
2622
- "loss": 1.8585,
2623
- "step": 417
2624
- },
2625
- {
2626
- "epoch": 2.01,
2627
- "learning_rate": 7.760617760617761e-05,
2628
- "loss": 2.0163,
2629
- "step": 418
2630
- },
2631
- {
2632
- "epoch": 2.01,
2633
- "learning_rate": 7.722007722007723e-05,
2634
- "loss": 1.8571,
2635
- "step": 419
2636
- },
2637
- {
2638
- "epoch": 2.02,
2639
- "learning_rate": 7.683397683397684e-05,
2640
- "loss": 2.0083,
2641
- "step": 420
2642
- },
2643
- {
2644
- "epoch": 2.0,
2645
- "learning_rate": 7.644787644787645e-05,
2646
- "loss": 0.6158,
2647
- "step": 421
2648
- },
2649
- {
2650
- "epoch": 2.01,
2651
- "learning_rate": 7.606177606177607e-05,
2652
- "loss": 0.7386,
2653
- "step": 422
2654
- },
2655
- {
2656
- "epoch": 2.01,
2657
- "learning_rate": 7.567567567567568e-05,
2658
- "loss": 0.7067,
2659
- "step": 423
2660
- },
2661
- {
2662
- "epoch": 2.02,
2663
- "learning_rate": 7.52895752895753e-05,
2664
- "loss": 0.6173,
2665
- "step": 424
2666
- },
2667
- {
2668
- "epoch": 2.02,
2669
- "learning_rate": 7.49034749034749e-05,
2670
- "loss": 0.5876,
2671
- "step": 425
2672
- },
2673
- {
2674
- "epoch": 2.03,
2675
- "learning_rate": 7.451737451737452e-05,
2676
- "loss": 0.5948,
2677
- "step": 426
2678
- },
2679
- {
2680
- "epoch": 2.03,
2681
- "learning_rate": 7.413127413127413e-05,
2682
- "loss": 0.5593,
2683
- "step": 427
2684
- },
2685
- {
2686
- "epoch": 2.04,
2687
- "learning_rate": 7.374517374517374e-05,
2688
- "loss": 0.5989,
2689
- "step": 428
2690
- },
2691
- {
2692
- "epoch": 2.04,
2693
- "learning_rate": 7.335907335907336e-05,
2694
- "loss": 0.5699,
2695
- "step": 429
2696
- },
2697
- {
2698
- "epoch": 2.05,
2699
- "learning_rate": 7.297297297297297e-05,
2700
- "loss": 0.5719,
2701
- "step": 430
2702
- },
2703
- {
2704
- "epoch": 2.05,
2705
- "learning_rate": 7.258687258687259e-05,
2706
- "loss": 0.4928,
2707
- "step": 431
2708
- },
2709
- {
2710
- "epoch": 2.06,
2711
- "learning_rate": 7.22007722007722e-05,
2712
- "loss": 0.4713,
2713
- "step": 432
2714
- },
2715
- {
2716
- "epoch": 2.06,
2717
- "learning_rate": 7.181467181467182e-05,
2718
- "loss": 0.6161,
2719
- "step": 433
2720
- },
2721
- {
2722
- "epoch": 2.07,
2723
- "learning_rate": 7.142857142857143e-05,
2724
- "loss": 0.566,
2725
- "step": 434
2726
- },
2727
- {
2728
- "epoch": 2.07,
2729
- "eval_loss": 4.280820369720459,
2730
- "eval_runtime": 7.3682,
2731
- "eval_samples_per_second": 162.998,
2732
- "eval_steps_per_second": 54.423,
2733
- "step": 434
2734
- },
2735
- {
2736
- "epoch": 2.07,
2737
- "learning_rate": 7.104247104247105e-05,
2738
- "loss": 0.5182,
2739
- "step": 435
2740
- },
2741
- {
2742
- "epoch": 2.08,
2743
- "learning_rate": 7.065637065637066e-05,
2744
- "loss": 0.6347,
2745
- "step": 436
2746
- },
2747
- {
2748
- "epoch": 2.08,
2749
- "learning_rate": 7.027027027027028e-05,
2750
- "loss": 0.6002,
2751
- "step": 437
2752
- },
2753
- {
2754
- "epoch": 2.09,
2755
- "learning_rate": 6.988416988416989e-05,
2756
- "loss": 0.5696,
2757
- "step": 438
2758
- },
2759
- {
2760
- "epoch": 2.09,
2761
- "learning_rate": 6.949806949806951e-05,
2762
- "loss": 0.5535,
2763
- "step": 439
2764
- },
2765
- {
2766
- "epoch": 2.1,
2767
- "learning_rate": 6.911196911196911e-05,
2768
- "loss": 0.5263,
2769
- "step": 440
2770
- },
2771
- {
2772
- "epoch": 2.1,
2773
- "learning_rate": 6.872586872586872e-05,
2774
- "loss": 0.5342,
2775
- "step": 441
2776
- },
2777
- {
2778
- "epoch": 2.11,
2779
- "learning_rate": 6.833976833976834e-05,
2780
- "loss": 0.4946,
2781
- "step": 442
2782
- },
2783
- {
2784
- "epoch": 2.11,
2785
- "learning_rate": 6.795366795366795e-05,
2786
- "loss": 0.5402,
2787
- "step": 443
2788
- },
2789
- {
2790
- "epoch": 2.12,
2791
- "learning_rate": 6.756756756756757e-05,
2792
- "loss": 0.5005,
2793
- "step": 444
2794
- },
2795
- {
2796
- "epoch": 2.12,
2797
- "learning_rate": 6.718146718146718e-05,
2798
- "loss": 0.6038,
2799
- "step": 445
2800
- },
2801
- {
2802
- "epoch": 2.13,
2803
- "learning_rate": 6.67953667953668e-05,
2804
- "loss": 0.5123,
2805
- "step": 446
2806
- },
2807
- {
2808
- "epoch": 2.13,
2809
- "learning_rate": 6.640926640926641e-05,
2810
- "loss": 0.558,
2811
- "step": 447
2812
- },
2813
- {
2814
- "epoch": 2.14,
2815
- "learning_rate": 6.602316602316603e-05,
2816
- "loss": 0.4858,
2817
- "step": 448
2818
- },
2819
- {
2820
- "epoch": 2.14,
2821
- "learning_rate": 6.563706563706564e-05,
2822
- "loss": 0.6183,
2823
- "step": 449
2824
- },
2825
- {
2826
- "epoch": 2.15,
2827
- "learning_rate": 6.525096525096526e-05,
2828
- "loss": 0.5093,
2829
- "step": 450
2830
- },
2831
- {
2832
- "epoch": 2.15,
2833
- "learning_rate": 6.486486486486487e-05,
2834
- "loss": 0.4336,
2835
- "step": 451
2836
- },
2837
- {
2838
- "epoch": 2.16,
2839
- "learning_rate": 6.447876447876449e-05,
2840
- "loss": 0.653,
2841
- "step": 452
2842
- },
2843
- {
2844
- "epoch": 2.16,
2845
- "learning_rate": 6.40926640926641e-05,
2846
- "loss": 0.5675,
2847
- "step": 453
2848
- },
2849
- {
2850
- "epoch": 2.17,
2851
- "learning_rate": 6.37065637065637e-05,
2852
- "loss": 0.5146,
2853
- "step": 454
2854
- },
2855
- {
2856
- "epoch": 2.17,
2857
- "learning_rate": 6.332046332046332e-05,
2858
- "loss": 0.4988,
2859
- "step": 455
2860
- },
2861
- {
2862
- "epoch": 2.17,
2863
- "learning_rate": 6.293436293436293e-05,
2864
- "loss": 0.5216,
2865
- "step": 456
2866
- },
2867
- {
2868
- "epoch": 2.18,
2869
- "learning_rate": 6.254826254826255e-05,
2870
- "loss": 0.5886,
2871
- "step": 457
2872
- },
2873
- {
2874
- "epoch": 2.18,
2875
- "learning_rate": 6.216216216216216e-05,
2876
- "loss": 0.5856,
2877
- "step": 458
2878
- },
2879
- {
2880
- "epoch": 2.19,
2881
- "learning_rate": 6.177606177606178e-05,
2882
- "loss": 0.4837,
2883
- "step": 459
2884
- },
2885
- {
2886
- "epoch": 2.19,
2887
- "learning_rate": 6.138996138996139e-05,
2888
- "loss": 0.6044,
2889
- "step": 460
2890
- },
2891
- {
2892
- "epoch": 2.2,
2893
- "learning_rate": 6.100386100386101e-05,
2894
- "loss": 0.5276,
2895
- "step": 461
2896
- },
2897
- {
2898
- "epoch": 2.2,
2899
- "learning_rate": 6.061776061776062e-05,
2900
- "loss": 0.4752,
2901
- "step": 462
2902
- },
2903
- {
2904
- "epoch": 2.21,
2905
- "learning_rate": 6.023166023166024e-05,
2906
- "loss": 0.5702,
2907
- "step": 463
2908
- },
2909
- {
2910
- "epoch": 2.21,
2911
- "learning_rate": 5.984555984555985e-05,
2912
- "loss": 0.4758,
2913
- "step": 464
2914
- },
2915
- {
2916
- "epoch": 2.22,
2917
- "learning_rate": 5.9459459459459466e-05,
2918
- "loss": 0.573,
2919
- "step": 465
2920
- },
2921
- {
2922
- "epoch": 2.22,
2923
- "eval_loss": 4.131713390350342,
2924
- "eval_runtime": 7.3741,
2925
- "eval_samples_per_second": 162.866,
2926
- "eval_steps_per_second": 54.379,
2927
- "step": 465
2928
- },
2929
- {
2930
- "epoch": 2.22,
2931
- "learning_rate": 5.907335907335908e-05,
2932
- "loss": 0.5373,
2933
- "step": 466
2934
- },
2935
- {
2936
- "epoch": 2.23,
2937
- "learning_rate": 5.8687258687258696e-05,
2938
- "loss": 0.5611,
2939
- "step": 467
2940
- },
2941
- {
2942
- "epoch": 2.23,
2943
- "learning_rate": 5.83011583011583e-05,
2944
- "loss": 0.5744,
2945
- "step": 468
2946
- },
2947
- {
2948
- "epoch": 2.24,
2949
- "learning_rate": 5.791505791505791e-05,
2950
- "loss": 0.4818,
2951
- "step": 469
2952
- },
2953
- {
2954
- "epoch": 2.24,
2955
- "learning_rate": 5.752895752895753e-05,
2956
- "loss": 0.4519,
2957
- "step": 470
2958
- },
2959
- {
2960
- "epoch": 2.25,
2961
- "learning_rate": 5.714285714285714e-05,
2962
- "loss": 0.4295,
2963
- "step": 471
2964
- },
2965
- {
2966
- "epoch": 2.25,
2967
- "learning_rate": 5.6756756756756757e-05,
2968
- "loss": 0.4755,
2969
- "step": 472
2970
- },
2971
- {
2972
- "epoch": 2.26,
2973
- "learning_rate": 5.637065637065637e-05,
2974
- "loss": 0.501,
2975
- "step": 473
2976
- },
2977
- {
2978
- "epoch": 2.26,
2979
- "learning_rate": 5.5984555984555986e-05,
2980
- "loss": 0.449,
2981
- "step": 474
2982
- },
2983
- {
2984
- "epoch": 2.27,
2985
- "learning_rate": 5.55984555984556e-05,
2986
- "loss": 0.4914,
2987
- "step": 475
2988
- },
2989
- {
2990
- "epoch": 2.27,
2991
- "learning_rate": 5.5212355212355216e-05,
2992
- "loss": 0.5153,
2993
- "step": 476
2994
- },
2995
- {
2996
- "epoch": 2.28,
2997
- "learning_rate": 5.482625482625483e-05,
2998
- "loss": 0.5433,
2999
- "step": 477
3000
- },
3001
- {
3002
- "epoch": 2.28,
3003
- "learning_rate": 5.4440154440154445e-05,
3004
- "loss": 0.5248,
3005
- "step": 478
3006
- },
3007
- {
3008
- "epoch": 2.29,
3009
- "learning_rate": 5.405405405405406e-05,
3010
- "loss": 0.5453,
3011
- "step": 479
3012
- },
3013
- {
3014
- "epoch": 2.29,
3015
- "learning_rate": 5.3667953667953675e-05,
3016
- "loss": 0.5288,
3017
- "step": 480
3018
- },
3019
- {
3020
- "epoch": 2.3,
3021
- "learning_rate": 5.328185328185329e-05,
3022
- "loss": 0.532,
3023
- "step": 481
3024
- },
3025
- {
3026
- "epoch": 2.3,
3027
- "learning_rate": 5.2895752895752905e-05,
3028
- "loss": 0.5139,
3029
- "step": 482
3030
- },
3031
- {
3032
- "epoch": 2.31,
3033
- "learning_rate": 5.2509652509652506e-05,
3034
- "loss": 0.5175,
3035
- "step": 483
3036
- },
3037
- {
3038
- "epoch": 2.31,
3039
- "learning_rate": 5.212355212355212e-05,
3040
- "loss": 0.6227,
3041
- "step": 484
3042
- },
3043
- {
3044
- "epoch": 2.32,
3045
- "learning_rate": 5.1737451737451736e-05,
3046
- "loss": 0.567,
3047
- "step": 485
3048
- },
3049
- {
3050
- "epoch": 2.32,
3051
- "learning_rate": 5.135135135135135e-05,
3052
- "loss": 0.5636,
3053
- "step": 486
3054
- },
3055
- {
3056
- "epoch": 2.33,
3057
- "learning_rate": 5.0965250965250965e-05,
3058
- "loss": 0.5367,
3059
- "step": 487
3060
- },
3061
- {
3062
- "epoch": 2.33,
3063
- "learning_rate": 5.057915057915058e-05,
3064
- "loss": 0.6016,
3065
- "step": 488
3066
- },
3067
- {
3068
- "epoch": 2.33,
3069
- "learning_rate": 5.0193050193050195e-05,
3070
- "loss": 0.4492,
3071
- "step": 489
3072
- },
3073
- {
3074
- "epoch": 2.34,
3075
- "learning_rate": 4.980694980694981e-05,
3076
- "loss": 0.5329,
3077
- "step": 490
3078
- },
3079
- {
3080
- "epoch": 2.34,
3081
- "learning_rate": 4.9420849420849425e-05,
3082
- "loss": 0.503,
3083
- "step": 491
3084
- },
3085
- {
3086
- "epoch": 2.35,
3087
- "learning_rate": 4.903474903474904e-05,
3088
- "loss": 0.4799,
3089
- "step": 492
3090
- },
3091
- {
3092
- "epoch": 2.35,
3093
- "learning_rate": 4.8648648648648654e-05,
3094
- "loss": 0.454,
3095
- "step": 493
3096
- },
3097
- {
3098
- "epoch": 2.36,
3099
- "learning_rate": 4.826254826254826e-05,
3100
- "loss": 0.5555,
3101
- "step": 494
3102
- },
3103
- {
3104
- "epoch": 2.36,
3105
- "learning_rate": 4.787644787644788e-05,
3106
- "loss": 0.5925,
3107
- "step": 495
3108
- },
3109
- {
3110
- "epoch": 2.37,
3111
- "learning_rate": 4.749034749034749e-05,
3112
- "loss": 0.5557,
3113
- "step": 496
3114
- },
3115
- {
3116
- "epoch": 2.37,
3117
- "eval_loss": 4.199349403381348,
3118
- "eval_runtime": 7.3742,
3119
- "eval_samples_per_second": 162.866,
3120
- "eval_steps_per_second": 54.379,
3121
- "step": 496
3122
- },
3123
- {
3124
- "epoch": 2.37,
3125
- "learning_rate": 4.710424710424711e-05,
3126
- "loss": 0.5157,
3127
- "step": 497
3128
- },
3129
- {
3130
- "epoch": 2.38,
3131
- "learning_rate": 4.671814671814672e-05,
3132
- "loss": 0.5538,
3133
- "step": 498
3134
- },
3135
- {
3136
- "epoch": 2.38,
3137
- "learning_rate": 4.6332046332046336e-05,
3138
- "loss": 0.6174,
3139
- "step": 499
3140
- },
3141
- {
3142
- "epoch": 2.39,
3143
- "learning_rate": 4.594594594594595e-05,
3144
- "loss": 0.4592,
3145
- "step": 500
3146
- },
3147
- {
3148
- "epoch": 2.39,
3149
- "learning_rate": 4.555984555984556e-05,
3150
- "loss": 0.4557,
3151
- "step": 501
3152
- },
3153
- {
3154
- "epoch": 2.4,
3155
- "learning_rate": 4.5173745173745174e-05,
3156
- "loss": 0.5154,
3157
- "step": 502
3158
- },
3159
- {
3160
- "epoch": 2.4,
3161
- "learning_rate": 4.478764478764479e-05,
3162
- "loss": 0.4909,
3163
- "step": 503
3164
- },
3165
- {
3166
- "epoch": 2.41,
3167
- "learning_rate": 4.4401544401544404e-05,
3168
- "loss": 0.4755,
3169
- "step": 504
3170
- },
3171
- {
3172
- "epoch": 2.41,
3173
- "learning_rate": 4.401544401544402e-05,
3174
- "loss": 0.592,
3175
- "step": 505
3176
- },
3177
- {
3178
- "epoch": 2.42,
3179
- "learning_rate": 4.3629343629343633e-05,
3180
- "loss": 0.5014,
3181
- "step": 506
3182
- },
3183
- {
3184
- "epoch": 2.42,
3185
- "learning_rate": 4.324324324324325e-05,
3186
- "loss": 0.4928,
3187
- "step": 507
3188
- },
3189
- {
3190
- "epoch": 2.43,
3191
- "learning_rate": 4.2857142857142856e-05,
3192
- "loss": 0.5352,
3193
- "step": 508
3194
- },
3195
- {
3196
- "epoch": 2.43,
3197
- "learning_rate": 4.247104247104247e-05,
3198
- "loss": 0.5457,
3199
- "step": 509
3200
- },
3201
- {
3202
- "epoch": 2.44,
3203
- "learning_rate": 4.2084942084942086e-05,
3204
- "loss": 0.5182,
3205
- "step": 510
3206
- },
3207
- {
3208
- "epoch": 2.44,
3209
- "learning_rate": 4.16988416988417e-05,
3210
- "loss": 0.527,
3211
- "step": 511
3212
- },
3213
- {
3214
- "epoch": 2.45,
3215
- "learning_rate": 4.1312741312741316e-05,
3216
- "loss": 0.4961,
3217
- "step": 512
3218
- },
3219
- {
3220
- "epoch": 2.45,
3221
- "learning_rate": 4.092664092664093e-05,
3222
- "loss": 0.4988,
3223
- "step": 513
3224
- },
3225
- {
3226
- "epoch": 2.46,
3227
- "learning_rate": 4.0540540540540545e-05,
3228
- "loss": 0.5314,
3229
- "step": 514
3230
- },
3231
- {
3232
- "epoch": 2.46,
3233
- "learning_rate": 4.015444015444015e-05,
3234
- "loss": 0.5523,
3235
- "step": 515
3236
- },
3237
- {
3238
- "epoch": 2.47,
3239
- "learning_rate": 3.976833976833977e-05,
3240
- "loss": 0.4368,
3241
- "step": 516
3242
- },
3243
- {
3244
- "epoch": 2.47,
3245
- "learning_rate": 3.938223938223938e-05,
3246
- "loss": 0.5184,
3247
- "step": 517
3248
- },
3249
- {
3250
- "epoch": 2.48,
3251
- "learning_rate": 3.8996138996139e-05,
3252
- "loss": 0.6171,
3253
- "step": 518
3254
- },
3255
- {
3256
- "epoch": 2.48,
3257
- "learning_rate": 3.861003861003861e-05,
3258
- "loss": 0.5357,
3259
- "step": 519
3260
- },
3261
- {
3262
- "epoch": 2.49,
3263
- "learning_rate": 3.822393822393823e-05,
3264
- "loss": 0.5664,
3265
- "step": 520
3266
- },
3267
- {
3268
- "epoch": 2.49,
3269
- "learning_rate": 3.783783783783784e-05,
3270
- "loss": 0.4805,
3271
- "step": 521
3272
- },
3273
- {
3274
- "epoch": 2.5,
3275
- "learning_rate": 3.745173745173745e-05,
3276
- "loss": 0.4562,
3277
- "step": 522
3278
- },
3279
- {
3280
- "epoch": 2.5,
3281
- "learning_rate": 3.7065637065637065e-05,
3282
- "loss": 0.5238,
3283
- "step": 523
3284
- },
3285
- {
3286
- "epoch": 2.5,
3287
- "learning_rate": 3.667953667953668e-05,
3288
- "loss": 0.4338,
3289
- "step": 524
3290
- },
3291
- {
3292
- "epoch": 2.51,
3293
- "learning_rate": 3.6293436293436295e-05,
3294
- "loss": 0.5656,
3295
- "step": 525
3296
- },
3297
- {
3298
- "epoch": 2.51,
3299
- "learning_rate": 3.590733590733591e-05,
3300
- "loss": 0.4496,
3301
- "step": 526
3302
- },
3303
- {
3304
- "epoch": 2.52,
3305
- "learning_rate": 3.5521235521235524e-05,
3306
- "loss": 0.4997,
3307
- "step": 527
3308
- },
3309
- {
3310
- "epoch": 2.52,
3311
- "eval_loss": 4.19807767868042,
3312
- "eval_runtime": 7.3698,
3313
- "eval_samples_per_second": 162.962,
3314
- "eval_steps_per_second": 54.411,
3315
- "step": 527
3316
- },
3317
- {
3318
- "epoch": 2.52,
3319
- "learning_rate": 3.513513513513514e-05,
3320
- "loss": 0.4531,
3321
- "step": 528
3322
- },
3323
- {
3324
- "epoch": 2.53,
3325
- "learning_rate": 3.4749034749034754e-05,
3326
- "loss": 0.5048,
3327
- "step": 529
3328
- },
3329
- {
3330
- "epoch": 2.53,
3331
- "learning_rate": 3.436293436293436e-05,
3332
- "loss": 0.5195,
3333
- "step": 530
3334
- },
3335
- {
3336
- "epoch": 2.54,
3337
- "learning_rate": 3.397683397683398e-05,
3338
- "loss": 0.4885,
3339
- "step": 531
3340
- },
3341
- {
3342
- "epoch": 2.54,
3343
- "learning_rate": 3.359073359073359e-05,
3344
- "loss": 0.6774,
3345
- "step": 532
3346
- },
3347
- {
3348
- "epoch": 2.55,
3349
- "learning_rate": 3.3204633204633207e-05,
3350
- "loss": 0.4755,
3351
- "step": 533
3352
- },
3353
- {
3354
- "epoch": 2.55,
3355
- "learning_rate": 3.281853281853282e-05,
3356
- "loss": 0.5164,
3357
- "step": 534
3358
- },
3359
- {
3360
- "epoch": 2.56,
3361
- "learning_rate": 3.2432432432432436e-05,
3362
- "loss": 0.4748,
3363
- "step": 535
3364
- },
3365
- {
3366
- "epoch": 2.56,
3367
- "learning_rate": 3.204633204633205e-05,
3368
- "loss": 0.5656,
3369
- "step": 536
3370
- },
3371
- {
3372
- "epoch": 2.57,
3373
- "learning_rate": 3.166023166023166e-05,
3374
- "loss": 0.5167,
3375
- "step": 537
3376
- },
3377
- {
3378
- "epoch": 2.57,
3379
- "learning_rate": 3.1274131274131274e-05,
3380
- "loss": 0.5101,
3381
- "step": 538
3382
- },
3383
- {
3384
- "epoch": 2.58,
3385
- "learning_rate": 3.088803088803089e-05,
3386
- "loss": 0.4965,
3387
- "step": 539
3388
- },
3389
- {
3390
- "epoch": 2.58,
3391
- "learning_rate": 3.0501930501930504e-05,
3392
- "loss": 0.5549,
3393
- "step": 540
3394
- },
3395
- {
3396
- "epoch": 2.59,
3397
- "learning_rate": 3.011583011583012e-05,
3398
- "loss": 0.4873,
3399
- "step": 541
3400
- },
3401
- {
3402
- "epoch": 2.59,
3403
- "learning_rate": 2.9729729729729733e-05,
3404
- "loss": 0.5093,
3405
- "step": 542
3406
- },
3407
- {
3408
- "epoch": 2.6,
3409
- "learning_rate": 2.9343629343629348e-05,
3410
- "loss": 0.4897,
3411
- "step": 543
3412
- },
3413
- {
3414
- "epoch": 2.6,
3415
- "learning_rate": 2.8957528957528956e-05,
3416
- "loss": 0.5128,
3417
- "step": 544
3418
- },
3419
- {
3420
- "epoch": 2.61,
3421
- "learning_rate": 2.857142857142857e-05,
3422
- "loss": 0.4829,
3423
- "step": 545
3424
- },
3425
- {
3426
- "epoch": 2.61,
3427
- "learning_rate": 2.8185328185328186e-05,
3428
- "loss": 0.4853,
3429
- "step": 546
3430
- },
3431
- {
3432
- "epoch": 2.62,
3433
- "learning_rate": 2.77992277992278e-05,
3434
- "loss": 0.5499,
3435
- "step": 547
3436
- },
3437
- {
3438
- "epoch": 2.62,
3439
- "learning_rate": 2.7413127413127415e-05,
3440
- "loss": 0.59,
3441
- "step": 548
3442
- },
3443
- {
3444
- "epoch": 2.63,
3445
- "learning_rate": 2.702702702702703e-05,
3446
- "loss": 0.5169,
3447
- "step": 549
3448
- },
3449
- {
3450
- "epoch": 2.63,
3451
- "learning_rate": 2.6640926640926645e-05,
3452
- "loss": 0.5642,
3453
- "step": 550
3454
- },
3455
- {
3456
- "epoch": 2.64,
3457
- "learning_rate": 2.6254826254826253e-05,
3458
- "loss": 0.5745,
3459
- "step": 551
3460
- },
3461
- {
3462
- "epoch": 2.64,
3463
- "learning_rate": 2.5868725868725868e-05,
3464
- "loss": 0.4845,
3465
- "step": 552
3466
- },
3467
- {
3468
- "epoch": 2.65,
3469
- "learning_rate": 2.5482625482625483e-05,
3470
- "loss": 0.5198,
3471
- "step": 553
3472
- },
3473
- {
3474
- "epoch": 2.65,
3475
- "learning_rate": 2.5096525096525097e-05,
3476
- "loss": 0.5402,
3477
- "step": 554
3478
- },
3479
- {
3480
- "epoch": 2.66,
3481
- "learning_rate": 2.4710424710424712e-05,
3482
- "loss": 0.5122,
3483
- "step": 555
3484
- },
3485
- {
3486
- "epoch": 2.66,
3487
- "learning_rate": 2.4324324324324327e-05,
3488
- "loss": 0.5769,
3489
- "step": 556
3490
- },
3491
- {
3492
- "epoch": 2.67,
3493
- "learning_rate": 2.393822393822394e-05,
3494
- "loss": 0.5519,
3495
- "step": 557
3496
- },
3497
- {
3498
- "epoch": 2.67,
3499
- "learning_rate": 2.3552123552123553e-05,
3500
- "loss": 0.465,
3501
- "step": 558
3502
- },
3503
- {
3504
- "epoch": 2.67,
3505
- "eval_loss": 4.177217483520508,
3506
- "eval_runtime": 7.3742,
3507
- "eval_samples_per_second": 162.865,
3508
- "eval_steps_per_second": 54.379,
3509
- "step": 558
3510
- },
3511
- {
3512
- "epoch": 2.67,
3513
- "learning_rate": 2.3166023166023168e-05,
3514
- "loss": 0.4816,
3515
- "step": 559
3516
- },
3517
- {
3518
- "epoch": 2.68,
3519
- "learning_rate": 2.277992277992278e-05,
3520
- "loss": 0.4428,
3521
- "step": 560
3522
- },
3523
- {
3524
- "epoch": 2.68,
3525
- "learning_rate": 2.2393822393822394e-05,
3526
- "loss": 0.4969,
3527
- "step": 561
3528
- },
3529
- {
3530
- "epoch": 2.69,
3531
- "learning_rate": 2.200772200772201e-05,
3532
- "loss": 0.4891,
3533
- "step": 562
3534
- },
3535
- {
3536
- "epoch": 2.69,
3537
- "learning_rate": 2.1621621621621624e-05,
3538
- "loss": 0.4082,
3539
- "step": 563
3540
- },
3541
- {
3542
- "epoch": 2.7,
3543
- "learning_rate": 2.1235521235521236e-05,
3544
- "loss": 0.4735,
3545
- "step": 564
3546
- },
3547
- {
3548
- "epoch": 2.7,
3549
- "learning_rate": 2.084942084942085e-05,
3550
- "loss": 0.5121,
3551
- "step": 565
3552
- },
3553
- {
3554
- "epoch": 2.71,
3555
- "learning_rate": 2.0463320463320465e-05,
3556
- "loss": 0.4696,
3557
- "step": 566
3558
- },
3559
- {
3560
- "epoch": 2.71,
3561
- "learning_rate": 2.0077220077220077e-05,
3562
- "loss": 0.397,
3563
- "step": 567
3564
- },
3565
- {
3566
- "epoch": 2.72,
3567
- "learning_rate": 1.969111969111969e-05,
3568
- "loss": 0.5271,
3569
- "step": 568
3570
- },
3571
- {
3572
- "epoch": 2.72,
3573
- "learning_rate": 1.9305019305019306e-05,
3574
- "loss": 0.4974,
3575
- "step": 569
3576
- },
3577
- {
3578
- "epoch": 2.73,
3579
- "learning_rate": 1.891891891891892e-05,
3580
- "loss": 0.4814,
3581
- "step": 570
3582
- },
3583
- {
3584
- "epoch": 2.73,
3585
- "learning_rate": 1.8532818532818533e-05,
3586
- "loss": 0.5565,
3587
- "step": 571
3588
- },
3589
- {
3590
- "epoch": 2.74,
3591
- "learning_rate": 1.8146718146718147e-05,
3592
- "loss": 0.4737,
3593
- "step": 572
3594
- },
3595
- {
3596
- "epoch": 2.74,
3597
- "learning_rate": 1.7760617760617762e-05,
3598
- "loss": 0.4448,
3599
- "step": 573
3600
- },
3601
- {
3602
- "epoch": 2.75,
3603
- "learning_rate": 1.7374517374517377e-05,
3604
- "loss": 0.4886,
3605
- "step": 574
3606
- },
3607
- {
3608
- "epoch": 2.75,
3609
- "learning_rate": 1.698841698841699e-05,
3610
- "loss": 0.5197,
3611
- "step": 575
3612
- },
3613
- {
3614
- "epoch": 2.76,
3615
- "learning_rate": 1.6602316602316603e-05,
3616
- "loss": 0.4688,
3617
- "step": 576
3618
- },
3619
- {
3620
- "epoch": 2.76,
3621
- "learning_rate": 1.6216216216216218e-05,
3622
- "loss": 0.5649,
3623
- "step": 577
3624
- },
3625
- {
3626
- "epoch": 2.77,
3627
- "learning_rate": 1.583011583011583e-05,
3628
- "loss": 0.5026,
3629
- "step": 578
3630
- },
3631
- {
3632
- "epoch": 2.77,
3633
- "learning_rate": 1.5444015444015444e-05,
3634
- "loss": 0.5832,
3635
- "step": 579
3636
- },
3637
- {
3638
- "epoch": 2.78,
3639
- "learning_rate": 1.505791505791506e-05,
3640
- "loss": 0.5995,
3641
- "step": 580
3642
- },
3643
- {
3644
- "epoch": 2.78,
3645
- "learning_rate": 1.4671814671814674e-05,
3646
- "loss": 0.5342,
3647
- "step": 581
3648
- },
3649
- {
3650
- "epoch": 2.79,
3651
- "learning_rate": 1.4285714285714285e-05,
3652
- "loss": 0.5465,
3653
- "step": 582
3654
- },
3655
- {
3656
- "epoch": 2.79,
3657
- "learning_rate": 1.38996138996139e-05,
3658
- "loss": 0.5165,
3659
- "step": 583
3660
- },
3661
- {
3662
- "epoch": 2.8,
3663
- "learning_rate": 1.3513513513513515e-05,
3664
- "loss": 0.4594,
3665
- "step": 584
3666
- },
3667
- {
3668
- "epoch": 2.8,
3669
- "learning_rate": 1.3127413127413127e-05,
3670
- "loss": 0.4448,
3671
- "step": 585
3672
- },
3673
- {
3674
- "epoch": 2.81,
3675
- "learning_rate": 1.2741312741312741e-05,
3676
- "loss": 0.5148,
3677
- "step": 586
3678
- },
3679
- {
3680
- "epoch": 2.81,
3681
- "learning_rate": 1.2355212355212356e-05,
3682
- "loss": 0.5255,
3683
- "step": 587
3684
- },
3685
- {
3686
- "epoch": 2.82,
3687
- "learning_rate": 1.196911196911197e-05,
3688
- "loss": 0.4979,
3689
- "step": 588
3690
- },
3691
- {
3692
- "epoch": 2.82,
3693
- "learning_rate": 1.1583011583011584e-05,
3694
- "loss": 0.4531,
3695
- "step": 589
3696
- },
3697
- {
3698
- "epoch": 2.82,
3699
- "eval_loss": 4.171577453613281,
3700
- "eval_runtime": 7.3719,
3701
- "eval_samples_per_second": 162.916,
3702
- "eval_steps_per_second": 54.396,
3703
- "step": 589
3704
- },
3705
- {
3706
- "epoch": 2.83,
3707
- "learning_rate": 1.1196911196911197e-05,
3708
- "loss": 0.5339,
3709
- "step": 590
3710
- },
3711
- {
3712
- "epoch": 2.83,
3713
- "learning_rate": 1.0810810810810812e-05,
3714
- "loss": 0.5242,
3715
- "step": 591
3716
- },
3717
- {
3718
- "epoch": 2.83,
3719
- "learning_rate": 1.0424710424710425e-05,
3720
- "loss": 0.5266,
3721
- "step": 592
3722
- },
3723
- {
3724
- "epoch": 2.84,
3725
- "learning_rate": 1.0038610038610038e-05,
3726
- "loss": 0.5188,
3727
- "step": 593
3728
- },
3729
- {
3730
- "epoch": 2.84,
3731
- "learning_rate": 9.652509652509653e-06,
3732
- "loss": 0.459,
3733
- "step": 594
3734
- },
3735
- {
3736
- "epoch": 2.85,
3737
- "learning_rate": 9.266409266409266e-06,
3738
- "loss": 0.3489,
3739
- "step": 595
3740
- },
3741
- {
3742
- "epoch": 2.85,
3743
- "learning_rate": 8.880308880308881e-06,
3744
- "loss": 0.5022,
3745
- "step": 596
3746
- },
3747
- {
3748
- "epoch": 2.86,
3749
- "learning_rate": 8.494208494208494e-06,
3750
- "loss": 0.4513,
3751
- "step": 597
3752
- },
3753
- {
3754
- "epoch": 2.86,
3755
- "learning_rate": 8.108108108108109e-06,
3756
- "loss": 0.4338,
3757
- "step": 598
3758
- },
3759
- {
3760
- "epoch": 2.87,
3761
- "learning_rate": 7.722007722007722e-06,
3762
- "loss": 0.5263,
3763
- "step": 599
3764
- },
3765
- {
3766
- "epoch": 2.87,
3767
- "learning_rate": 7.335907335907337e-06,
3768
- "loss": 0.4898,
3769
- "step": 600
3770
- },
3771
- {
3772
- "epoch": 2.88,
3773
- "learning_rate": 6.94980694980695e-06,
3774
- "loss": 0.5212,
3775
- "step": 601
3776
- },
3777
- {
3778
- "epoch": 2.88,
3779
- "learning_rate": 6.563706563706563e-06,
3780
- "loss": 0.5355,
3781
- "step": 602
3782
- },
3783
- {
3784
- "epoch": 2.89,
3785
- "learning_rate": 6.177606177606178e-06,
3786
- "loss": 0.5444,
3787
- "step": 603
3788
- },
3789
- {
3790
- "epoch": 2.89,
3791
- "learning_rate": 5.791505791505792e-06,
3792
- "loss": 0.5875,
3793
- "step": 604
3794
- },
3795
- {
3796
- "epoch": 2.9,
3797
- "learning_rate": 5.405405405405406e-06,
3798
- "loss": 0.4989,
3799
- "step": 605
3800
- },
3801
- {
3802
- "epoch": 2.9,
3803
- "learning_rate": 5.019305019305019e-06,
3804
- "loss": 0.5159,
3805
- "step": 606
3806
- },
3807
- {
3808
- "epoch": 2.91,
3809
- "learning_rate": 4.633204633204633e-06,
3810
- "loss": 0.3956,
3811
- "step": 607
3812
- },
3813
- {
3814
- "epoch": 2.91,
3815
- "learning_rate": 4.247104247104247e-06,
3816
- "loss": 0.491,
3817
- "step": 608
3818
- },
3819
- {
3820
- "epoch": 2.92,
3821
- "learning_rate": 3.861003861003861e-06,
3822
- "loss": 0.4454,
3823
- "step": 609
3824
- },
3825
- {
3826
- "epoch": 2.92,
3827
- "learning_rate": 3.474903474903475e-06,
3828
- "loss": 0.4844,
3829
- "step": 610
3830
- },
3831
- {
3832
- "epoch": 2.93,
3833
- "learning_rate": 3.088803088803089e-06,
3834
- "loss": 0.4972,
3835
- "step": 611
3836
- },
3837
- {
3838
- "epoch": 2.93,
3839
- "learning_rate": 2.702702702702703e-06,
3840
- "loss": 0.4617,
3841
- "step": 612
3842
- },
3843
- {
3844
- "epoch": 2.94,
3845
- "learning_rate": 2.3166023166023166e-06,
3846
- "loss": 0.4683,
3847
- "step": 613
3848
- },
3849
- {
3850
- "epoch": 2.94,
3851
- "learning_rate": 1.9305019305019305e-06,
3852
- "loss": 0.5815,
3853
- "step": 614
3854
- },
3855
- {
3856
- "epoch": 2.95,
3857
- "learning_rate": 1.5444015444015445e-06,
3858
- "loss": 0.4502,
3859
- "step": 615
3860
- },
3861
- {
3862
- "epoch": 2.95,
3863
- "learning_rate": 1.1583011583011583e-06,
3864
- "loss": 0.477,
3865
- "step": 616
3866
- },
3867
- {
3868
- "epoch": 2.96,
3869
- "learning_rate": 7.722007722007723e-07,
3870
- "loss": 0.4836,
3871
- "step": 617
3872
- },
3873
- {
3874
- "epoch": 2.96,
3875
- "learning_rate": 3.8610038610038613e-07,
3876
- "loss": 0.4523,
3877
- "step": 618
3878
- }
3879
- ],
3880
- "logging_steps": 1,
3881
- "max_steps": 618,
3882
- "num_train_epochs": 3,
3883
- "save_steps": 500,
3884
- "total_flos": 3.072196311043277e+17,
3885
- "trial_name": null,
3886
- "trial_params": null
3887
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
checkpoint-618/training_args.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:40caa3ffe88e39fb8f17ca2f4b2952df2344fe3de9435a3b5cb8662a65ff745d
3
- size 6011
 
 
 
 
checkpoint-618/zero_to_fp32.py DELETED
@@ -1,587 +0,0 @@
1
- #!/usr/bin/env python
2
-
3
- # Copyright (c) Microsoft Corporation.
4
- # SPDX-License-Identifier: Apache-2.0
5
-
6
- # DeepSpeed Team
7
-
8
- # This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
9
- # copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
10
- # the future. Once extracted, the weights don't require DeepSpeed and can be used in any
11
- # application.
12
- #
13
- # example: python zero_to_fp32.py . pytorch_model.bin
14
-
15
- import argparse
16
- import torch
17
- import glob
18
- import math
19
- import os
20
- import re
21
- from collections import OrderedDict
22
- from dataclasses import dataclass
23
-
24
- # while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
25
- # DeepSpeed data structures it has to be available in the current python environment.
26
- from deepspeed.utils import logger
27
- from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
28
- FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
29
- FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
30
-
31
-
32
- @dataclass
33
- class zero_model_state:
34
- buffers: dict()
35
- param_shapes: dict()
36
- shared_params: list
37
- ds_version: int
38
- frozen_param_shapes: dict()
39
- frozen_param_fragments: dict()
40
-
41
-
42
- debug = 0
43
-
44
- # load to cpu
45
- device = torch.device('cpu')
46
-
47
-
48
- def atoi(text):
49
- return int(text) if text.isdigit() else text
50
-
51
-
52
- def natural_keys(text):
53
- '''
54
- alist.sort(key=natural_keys) sorts in human order
55
- http://nedbatchelder.com/blog/200712/human_sorting.html
56
- (See Toothy's implementation in the comments)
57
- '''
58
- return [atoi(c) for c in re.split(r'(\d+)', text)]
59
-
60
-
61
- def get_model_state_file(checkpoint_dir, zero_stage):
62
- if not os.path.isdir(checkpoint_dir):
63
- raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
64
-
65
- # there should be only one file
66
- if zero_stage <= 2:
67
- file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
68
- elif zero_stage == 3:
69
- file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
70
-
71
- if not os.path.exists(file):
72
- raise FileNotFoundError(f"can't find model states file at '{file}'")
73
-
74
- return file
75
-
76
-
77
- def get_checkpoint_files(checkpoint_dir, glob_pattern):
78
- # XXX: need to test that this simple glob rule works for multi-node setup too
79
- ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
80
-
81
- if len(ckpt_files) == 0:
82
- raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
83
-
84
- return ckpt_files
85
-
86
-
87
- def get_optim_files(checkpoint_dir):
88
- return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
89
-
90
-
91
- def get_model_state_files(checkpoint_dir):
92
- return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
93
-
94
-
95
- def parse_model_states(files):
96
- zero_model_states = []
97
- for file in files:
98
- state_dict = torch.load(file, map_location=device)
99
-
100
- if BUFFER_NAMES not in state_dict:
101
- raise ValueError(f"{file} is not a model state checkpoint")
102
- buffer_names = state_dict[BUFFER_NAMES]
103
- if debug:
104
- print("Found buffers:", buffer_names)
105
-
106
- # recover just the buffers while restoring them to fp32 if they were saved in fp16
107
- buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
108
- param_shapes = state_dict[PARAM_SHAPES]
109
-
110
- # collect parameters that are included in param_shapes
111
- param_names = []
112
- for s in param_shapes:
113
- for name in s.keys():
114
- param_names.append(name)
115
-
116
- # update with frozen parameters
117
- frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
118
- if frozen_param_shapes is not None:
119
- if debug:
120
- print(f"Found frozen_param_shapes: {frozen_param_shapes}")
121
- param_names += list(frozen_param_shapes.keys())
122
-
123
- # handle shared params
124
- shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
125
-
126
- ds_version = state_dict.get(DS_VERSION, None)
127
-
128
- frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
129
-
130
- z_model_state = zero_model_state(buffers=buffers,
131
- param_shapes=param_shapes,
132
- shared_params=shared_params,
133
- ds_version=ds_version,
134
- frozen_param_shapes=frozen_param_shapes,
135
- frozen_param_fragments=frozen_param_fragments)
136
- zero_model_states.append(z_model_state)
137
-
138
- return zero_model_states
139
-
140
-
141
- def parse_optim_states(files, ds_checkpoint_dir):
142
-
143
- total_files = len(files)
144
- state_dicts = []
145
- for f in files:
146
- state_dict = torch.load(f, map_location=device)
147
- # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
148
- # and also handle the case where it was already removed by another helper script
149
- state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
150
- state_dicts.append(state_dict)
151
-
152
- if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
153
- raise ValueError(f"{files[0]} is not a zero checkpoint")
154
- zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
155
- world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
156
-
157
- # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
158
- # parameters can be different from data parallelism for non-expert parameters. So we can just
159
- # use the max of the partition_count to get the dp world_size.
160
-
161
- if type(world_size) is list:
162
- world_size = max(world_size)
163
-
164
- if world_size != total_files:
165
- raise ValueError(
166
- f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
167
- "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
168
- )
169
-
170
- # the groups are named differently in each stage
171
- if zero_stage <= 2:
172
- fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
173
- elif zero_stage == 3:
174
- fp32_groups_key = FP32_FLAT_GROUPS
175
- else:
176
- raise ValueError(f"unknown zero stage {zero_stage}")
177
-
178
- if zero_stage <= 2:
179
- fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
180
- elif zero_stage == 3:
181
- # if there is more than one param group, there will be multiple flattened tensors - one
182
- # flattened tensor per group - for simplicity merge them into a single tensor
183
- #
184
- # XXX: could make the script more memory efficient for when there are multiple groups - it
185
- # will require matching the sub-lists of param_shapes for each param group flattened tensor
186
-
187
- fp32_flat_groups = [
188
- torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts))
189
- ]
190
-
191
- return zero_stage, world_size, fp32_flat_groups
192
-
193
-
194
- def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir):
195
- """
196
- Returns fp32 state_dict reconstructed from ds checkpoint
197
-
198
- Args:
199
- - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
200
-
201
- """
202
- print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
203
-
204
- optim_files = get_optim_files(ds_checkpoint_dir)
205
- zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
206
- print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
207
-
208
- model_files = get_model_state_files(ds_checkpoint_dir)
209
-
210
- zero_model_states = parse_model_states(model_files)
211
- print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
212
-
213
- if zero_stage <= 2:
214
- return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states)
215
- elif zero_stage == 3:
216
- return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states)
217
-
218
-
219
- def _zero2_merge_frozen_params(state_dict, zero_model_states):
220
- if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
221
- return
222
-
223
- frozen_param_shapes = zero_model_states[0].frozen_param_shapes
224
- frozen_param_fragments = zero_model_states[0].frozen_param_fragments
225
-
226
- if debug:
227
- num_elem = sum(s.numel() for s in frozen_param_shapes.values())
228
- print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
229
-
230
- wanted_params = len(frozen_param_shapes)
231
- wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
232
- avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
233
- print(f'Frozen params: Have {avail_numel} numels to process.')
234
- print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
235
-
236
- total_params = 0
237
- total_numel = 0
238
- for name, shape in frozen_param_shapes.items():
239
- total_params += 1
240
- unpartitioned_numel = shape.numel()
241
- total_numel += unpartitioned_numel
242
-
243
- state_dict[name] = frozen_param_fragments[name]
244
-
245
- if debug:
246
- print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
247
-
248
- print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
249
-
250
-
251
- def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
252
- param_shapes = zero_model_states[0].param_shapes
253
-
254
- # Reconstruction protocol:
255
- #
256
- # XXX: document this
257
-
258
- if debug:
259
- for i in range(world_size):
260
- for j in range(len(fp32_flat_groups[0])):
261
- print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
262
-
263
- # XXX: memory usage doubles here (zero2)
264
- num_param_groups = len(fp32_flat_groups[0])
265
- merged_single_partition_of_fp32_groups = []
266
- for i in range(num_param_groups):
267
- merged_partitions = [sd[i] for sd in fp32_flat_groups]
268
- full_single_fp32_vector = torch.cat(merged_partitions, 0)
269
- merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
270
- avail_numel = sum(
271
- [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
272
-
273
- if debug:
274
- wanted_params = sum([len(shapes) for shapes in param_shapes])
275
- wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
276
- # not asserting if there is a mismatch due to possible padding
277
- print(f"Have {avail_numel} numels to process.")
278
- print(f"Need {wanted_numel} numels in {wanted_params} params.")
279
-
280
- # params
281
- # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
282
- # out-of-core computing solution
283
- total_numel = 0
284
- total_params = 0
285
- for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
286
- offset = 0
287
- avail_numel = full_single_fp32_vector.numel()
288
- for name, shape in shapes.items():
289
-
290
- unpartitioned_numel = shape.numel()
291
- total_numel += unpartitioned_numel
292
- total_params += 1
293
-
294
- if debug:
295
- print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
296
- state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
297
- offset += unpartitioned_numel
298
-
299
- # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
300
- # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
301
- # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
302
- # live optimizer object, so we are checking that the numbers are within the right range
303
- align_to = 2 * world_size
304
-
305
- def zero2_align(x):
306
- return align_to * math.ceil(x / align_to)
307
-
308
- if debug:
309
- print(f"original offset={offset}, avail_numel={avail_numel}")
310
-
311
- offset = zero2_align(offset)
312
- avail_numel = zero2_align(avail_numel)
313
-
314
- if debug:
315
- print(f"aligned offset={offset}, avail_numel={avail_numel}")
316
-
317
- # Sanity check
318
- if offset != avail_numel:
319
- raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
320
-
321
- print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
322
-
323
-
324
- def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states):
325
- state_dict = OrderedDict()
326
-
327
- # buffers
328
- buffers = zero_model_states[0].buffers
329
- state_dict.update(buffers)
330
- if debug:
331
- print(f"added {len(buffers)} buffers")
332
-
333
- _zero2_merge_frozen_params(state_dict, zero_model_states)
334
-
335
- _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
336
-
337
- # recover shared parameters
338
- for pair in zero_model_states[0].shared_params:
339
- if pair[1] in state_dict:
340
- state_dict[pair[0]] = state_dict[pair[1]]
341
-
342
- return state_dict
343
-
344
-
345
- def zero3_partitioned_param_info(unpartitioned_numel, world_size):
346
- remainder = unpartitioned_numel % world_size
347
- padding_numel = (world_size - remainder) if remainder else 0
348
- partitioned_numel = math.ceil(unpartitioned_numel / world_size)
349
- return partitioned_numel, padding_numel
350
-
351
-
352
- def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
353
- if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
354
- return
355
-
356
- if debug:
357
- for i in range(world_size):
358
- num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
359
- print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
360
-
361
- frozen_param_shapes = zero_model_states[0].frozen_param_shapes
362
- wanted_params = len(frozen_param_shapes)
363
- wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
364
- avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
365
- print(f'Frozen params: Have {avail_numel} numels to process.')
366
- print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
367
-
368
- total_params = 0
369
- total_numel = 0
370
- for name, shape in zero_model_states[0].frozen_param_shapes.items():
371
- total_params += 1
372
- unpartitioned_numel = shape.numel()
373
- total_numel += unpartitioned_numel
374
-
375
- param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
376
- state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
377
-
378
- partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
379
-
380
- if debug:
381
- print(
382
- f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
383
- )
384
-
385
- print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
386
-
387
-
388
- def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
389
- param_shapes = zero_model_states[0].param_shapes
390
- avail_numel = fp32_flat_groups[0].numel() * world_size
391
- # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
392
- # param, re-consolidating each param, while dealing with padding if any
393
-
394
- # merge list of dicts, preserving order
395
- param_shapes = {k: v for d in param_shapes for k, v in d.items()}
396
-
397
- if debug:
398
- for i in range(world_size):
399
- print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
400
-
401
- wanted_params = len(param_shapes)
402
- wanted_numel = sum(shape.numel() for shape in param_shapes.values())
403
- # not asserting if there is a mismatch due to possible padding
404
- avail_numel = fp32_flat_groups[0].numel() * world_size
405
- print(f"Trainable params: Have {avail_numel} numels to process.")
406
- print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
407
-
408
- # params
409
- # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
410
- # out-of-core computing solution
411
- offset = 0
412
- total_numel = 0
413
- total_params = 0
414
- for name, shape in param_shapes.items():
415
-
416
- unpartitioned_numel = shape.numel()
417
- total_numel += unpartitioned_numel
418
- total_params += 1
419
-
420
- partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
421
-
422
- if debug:
423
- print(
424
- f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
425
- )
426
-
427
- # XXX: memory usage doubles here
428
- state_dict[name] = torch.cat(
429
- tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)),
430
- 0).narrow(0, 0, unpartitioned_numel).view(shape)
431
- offset += partitioned_numel
432
-
433
- offset *= world_size
434
-
435
- # Sanity check
436
- if offset != avail_numel:
437
- raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
438
-
439
- print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
440
-
441
-
442
- def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states):
443
- state_dict = OrderedDict()
444
-
445
- # buffers
446
- buffers = zero_model_states[0].buffers
447
- state_dict.update(buffers)
448
- if debug:
449
- print(f"added {len(buffers)} buffers")
450
-
451
- _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
452
-
453
- _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
454
-
455
- # recover shared parameters
456
- for pair in zero_model_states[0].shared_params:
457
- if pair[1] in state_dict:
458
- state_dict[pair[0]] = state_dict[pair[1]]
459
-
460
- return state_dict
461
-
462
-
463
- def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None):
464
- """
465
- Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
466
- ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
467
- via a model hub.
468
-
469
- Args:
470
- - ``checkpoint_dir``: path to the desired checkpoint folder
471
- - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
472
-
473
- Returns:
474
- - pytorch ``state_dict``
475
-
476
- Note: this approach may not work if your application doesn't have sufficient free CPU memory and
477
- you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
478
- the checkpoint.
479
-
480
- A typical usage might be ::
481
-
482
- from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
483
- # do the training and checkpoint saving
484
- state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
485
- model = model.cpu() # move to cpu
486
- model.load_state_dict(state_dict)
487
- # submit to model hub or save the model to share with others
488
-
489
- In this example the ``model`` will no longer be usable in the deepspeed context of the same
490
- application. i.e. you will need to re-initialize the deepspeed engine, since
491
- ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
492
-
493
- If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
494
-
495
- """
496
- if tag is None:
497
- latest_path = os.path.join(checkpoint_dir, 'latest')
498
- if os.path.isfile(latest_path):
499
- with open(latest_path, 'r') as fd:
500
- tag = fd.read().strip()
501
- else:
502
- raise ValueError(f"Unable to find 'latest' file at {latest_path}")
503
-
504
- ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
505
-
506
- if not os.path.isdir(ds_checkpoint_dir):
507
- raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
508
-
509
- return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir)
510
-
511
-
512
- def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None):
513
- """
514
- Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
515
- loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
516
-
517
- Args:
518
- - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
519
- - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin)
520
- - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
521
- """
522
-
523
- state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
524
- print(f"Saving fp32 state dict to {output_file}")
525
- torch.save(state_dict, output_file)
526
-
527
-
528
- def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
529
- """
530
- 1. Put the provided model to cpu
531
- 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
532
- 3. Load it into the provided model
533
-
534
- Args:
535
- - ``model``: the model object to update
536
- - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
537
- - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
538
-
539
- Returns:
540
- - ``model`: modified model
541
-
542
- Make sure you have plenty of CPU memory available before you call this function. If you don't
543
- have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
544
- conveniently placed for you in the checkpoint folder.
545
-
546
- A typical usage might be ::
547
-
548
- from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
549
- model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
550
- # submit to model hub or save the model to share with others
551
-
552
- Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
553
- of the same application. i.e. you will need to re-initialize the deepspeed engine, since
554
- ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
555
-
556
- """
557
- logger.info(f"Extracting fp32 weights")
558
- state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
559
-
560
- logger.info(f"Overwriting model with fp32 weights")
561
- model = model.cpu()
562
- model.load_state_dict(state_dict, strict=False)
563
-
564
- return model
565
-
566
-
567
- if __name__ == "__main__":
568
-
569
- parser = argparse.ArgumentParser()
570
- parser.add_argument("checkpoint_dir",
571
- type=str,
572
- help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
573
- parser.add_argument(
574
- "output_file",
575
- type=str,
576
- help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)")
577
- parser.add_argument("-t",
578
- "--tag",
579
- type=str,
580
- default=None,
581
- help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
582
- parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
583
- args = parser.parse_args()
584
-
585
- debug = args.debug
586
-
587
- convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir, args.output_file, tag=args.tag)