dimasik87 commited on
Commit
d92e5f0
1 Parent(s): 1368922

Training in progress, step 25, checkpoint

Browse files
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:05b3a5dd23a8ba5febeaefbfdd7893eb2946c384ba9fc0ad0b8ca5b13934f9a5
3
  size 148047722
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ac773f5d65faa92a2f6980f7b33dd39dca4a8f514716e18e3e86af0ab16c6c1
3
  size 148047722
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e489a61cf79a2a014b1066e7ba5a23873d72c453c37a5d97dc70ee7e933edb99
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5adcf5cc8e912a0d13dfa423afe108e871c6e903d226e02a8ca3b4e58c883f0a
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1d95f3fb1f9df4cdd5f470d1684a242bf1d1940d47b9622802f603a91ffa5bc0
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4521b8db9cc205e54aa606d85e707c024abd2d8ad4a20bec4b2cff365dc59cdf
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.18285714285714286,
5
  "eval_steps": 3,
6
- "global_step": 20,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -203,6 +203,57 @@
203
  "learning_rate": 5.000000000000002e-05,
204
  "loss": 0.0,
205
  "step": 20
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
206
  }
207
  ],
208
  "logging_steps": 1,
@@ -217,12 +268,12 @@
217
  "should_evaluate": false,
218
  "should_log": false,
219
  "should_save": true,
220
- "should_training_stop": false
221
  },
222
  "attributes": {}
223
  }
224
  },
225
- "total_flos": 5225075347292160.0,
226
  "train_batch_size": 2,
227
  "trial_name": null,
228
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.22857142857142856,
5
  "eval_steps": 3,
6
+ "global_step": 25,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
203
  "learning_rate": 5.000000000000002e-05,
204
  "loss": 0.0,
205
  "step": 20
206
+ },
207
+ {
208
+ "epoch": 0.192,
209
+ "grad_norm": NaN,
210
+ "learning_rate": 3.308693936411421e-05,
211
+ "loss": 0.0,
212
+ "step": 21
213
+ },
214
+ {
215
+ "epoch": 0.192,
216
+ "eval_loss": NaN,
217
+ "eval_runtime": 4.0268,
218
+ "eval_samples_per_second": 23.095,
219
+ "eval_steps_per_second": 11.672,
220
+ "step": 21
221
+ },
222
+ {
223
+ "epoch": 0.20114285714285715,
224
+ "grad_norm": NaN,
225
+ "learning_rate": 1.9098300562505266e-05,
226
+ "loss": 0.0,
227
+ "step": 22
228
+ },
229
+ {
230
+ "epoch": 0.2102857142857143,
231
+ "grad_norm": NaN,
232
+ "learning_rate": 8.645454235739903e-06,
233
+ "loss": 0.0,
234
+ "step": 23
235
+ },
236
+ {
237
+ "epoch": 0.21942857142857142,
238
+ "grad_norm": NaN,
239
+ "learning_rate": 2.1852399266194314e-06,
240
+ "loss": 0.0,
241
+ "step": 24
242
+ },
243
+ {
244
+ "epoch": 0.21942857142857142,
245
+ "eval_loss": NaN,
246
+ "eval_runtime": 4.0035,
247
+ "eval_samples_per_second": 23.23,
248
+ "eval_steps_per_second": 11.74,
249
+ "step": 24
250
+ },
251
+ {
252
+ "epoch": 0.22857142857142856,
253
+ "grad_norm": NaN,
254
+ "learning_rate": 0.0,
255
+ "loss": 0.0,
256
+ "step": 25
257
  }
258
  ],
259
  "logging_steps": 1,
 
268
  "should_evaluate": false,
269
  "should_log": false,
270
  "should_save": true,
271
+ "should_training_stop": true
272
  },
273
  "attributes": {}
274
  }
275
  },
276
+ "total_flos": 6531344184115200.0,
277
  "train_batch_size": 2,
278
  "trial_name": null,
279
  "trial_params": null