vdos commited on
Commit
2c7dba1
1 Parent(s): ed7a8cd

Training in progress, step 50, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a65c595581f0d3a1494caa6f10b9e8d46b41ba2c9d917153ac0c612eacc84a0a
3
  size 147770496
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4121d8d0eacc28e6c94c43289be8dbdc657bef849e5478d09510a1b1c6ce57bf
3
  size 147770496
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:98ca22d830f36f278c1f395eed34f115e1d9636308ea4bfbd2b559d313952aaa
3
  size 295765866
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:abc1274ec6d6f4ac4ae3604274b1ae01e8bea0244e6cf7baea8f0c4fba358bb9
3
  size 295765866
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ca61128a5a0a3f14528f211bf07a9b88b439d88a1d6bdb67f4fdeef73784f0b3
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0fe25d20f7e50f294e573d227ab50c7fa8ecf8efa8628e4f35843f607c9f75bb
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1ca869ba7e9d13e59e5ff27a335fe77755d6ef0e20846155b9ad47060142cae4
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a47e9904f04ba420811ef8c9a750aa174cefbe2700dad81b84216ec46031907b
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8e41be5ba667c192351ba37feac2a33dd02821b5d8d779a2b1dd45f1bd9225ef
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e7b770d93a313a221075f74ce39546e98544c0a2afce1f81bdc66623c5b39254
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a931e05bdc836c3fbe2ddb7d4cb473a15f4479ea373b93c66ab939a519c95bbd
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e25316dac459baca287ca0329aad4cc789447f30aaee094ffd4e71690c4c5c72
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f37b2aa490ccb1598b01e14cda36e9081f7ce646deab4d3c2d03de0d2169a755
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b1df0528620c07325b8faa7567e59b0c1e86a1f1ee6af1245a69c6c0463fe4e2
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 1.7648794651031494,
3
- "best_model_checkpoint": "miner_id_24/checkpoint-25",
4
- "epoch": 1.1799410029498525,
5
  "eval_steps": 25,
6
- "global_step": 25,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -198,6 +198,189 @@
198
  "eval_samples_per_second": 39.159,
199
  "eval_steps_per_second": 4.929,
200
  "step": 25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
201
  }
202
  ],
203
  "logging_steps": 1,
@@ -221,12 +404,12 @@
221
  "should_evaluate": false,
222
  "should_log": false,
223
  "should_save": true,
224
- "should_training_stop": false
225
  },
226
  "attributes": {}
227
  }
228
  },
229
- "total_flos": 5.29768176943104e+16,
230
  "train_batch_size": 2,
231
  "trial_name": null,
232
  "trial_params": null
 
1
  {
2
+ "best_metric": 1.746710181236267,
3
+ "best_model_checkpoint": "miner_id_24/checkpoint-50",
4
+ "epoch": 2.359882005899705,
5
  "eval_steps": 25,
6
+ "global_step": 50,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
198
  "eval_samples_per_second": 39.159,
199
  "eval_steps_per_second": 4.929,
200
  "step": 25
201
+ },
202
+ {
203
+ "epoch": 1.2271386430678466,
204
+ "grad_norm": 1.1068404912948608,
205
+ "learning_rate": 5e-05,
206
+ "loss": 1.7302,
207
+ "step": 26
208
+ },
209
+ {
210
+ "epoch": 1.2743362831858407,
211
+ "grad_norm": 1.2672423124313354,
212
+ "learning_rate": 4.6729843538492847e-05,
213
+ "loss": 1.7341,
214
+ "step": 27
215
+ },
216
+ {
217
+ "epoch": 1.3215339233038348,
218
+ "grad_norm": 1.0532811880111694,
219
+ "learning_rate": 4.347369038899744e-05,
220
+ "loss": 1.6848,
221
+ "step": 28
222
+ },
223
+ {
224
+ "epoch": 1.368731563421829,
225
+ "grad_norm": 1.037347674369812,
226
+ "learning_rate": 4.0245483899193595e-05,
227
+ "loss": 1.6998,
228
+ "step": 29
229
+ },
230
+ {
231
+ "epoch": 1.415929203539823,
232
+ "grad_norm": 1.1231845617294312,
233
+ "learning_rate": 3.705904774487396e-05,
234
+ "loss": 1.6996,
235
+ "step": 30
236
+ },
237
+ {
238
+ "epoch": 1.463126843657817,
239
+ "grad_norm": 1.1563193798065186,
240
+ "learning_rate": 3.392802673484193e-05,
241
+ "loss": 1.655,
242
+ "step": 31
243
+ },
244
+ {
245
+ "epoch": 1.5103244837758112,
246
+ "grad_norm": 1.0125848054885864,
247
+ "learning_rate": 3.086582838174551e-05,
248
+ "loss": 1.5737,
249
+ "step": 32
250
+ },
251
+ {
252
+ "epoch": 1.5575221238938053,
253
+ "grad_norm": 1.0071817636489868,
254
+ "learning_rate": 2.7885565489049946e-05,
255
+ "loss": 1.8132,
256
+ "step": 33
257
+ },
258
+ {
259
+ "epoch": 1.6047197640117994,
260
+ "grad_norm": 0.9246135354042053,
261
+ "learning_rate": 2.500000000000001e-05,
262
+ "loss": 1.6492,
263
+ "step": 34
264
+ },
265
+ {
266
+ "epoch": 1.6519174041297935,
267
+ "grad_norm": 1.0794720649719238,
268
+ "learning_rate": 2.2221488349019903e-05,
269
+ "loss": 1.6322,
270
+ "step": 35
271
+ },
272
+ {
273
+ "epoch": 1.6991150442477876,
274
+ "grad_norm": 1.4046562910079956,
275
+ "learning_rate": 1.9561928549563968e-05,
276
+ "loss": 1.6497,
277
+ "step": 36
278
+ },
279
+ {
280
+ "epoch": 1.7463126843657817,
281
+ "grad_norm": 1.0453946590423584,
282
+ "learning_rate": 1.703270924499656e-05,
283
+ "loss": 1.5673,
284
+ "step": 37
285
+ },
286
+ {
287
+ "epoch": 1.7935103244837758,
288
+ "grad_norm": 1.0008294582366943,
289
+ "learning_rate": 1.4644660940672627e-05,
290
+ "loss": 1.8059,
291
+ "step": 38
292
+ },
293
+ {
294
+ "epoch": 1.8407079646017699,
295
+ "grad_norm": 0.9396019577980042,
296
+ "learning_rate": 1.2408009626051137e-05,
297
+ "loss": 1.5804,
298
+ "step": 39
299
+ },
300
+ {
301
+ "epoch": 1.887905604719764,
302
+ "grad_norm": 1.0129765272140503,
303
+ "learning_rate": 1.0332332985438248e-05,
304
+ "loss": 1.6912,
305
+ "step": 40
306
+ },
307
+ {
308
+ "epoch": 1.935103244837758,
309
+ "grad_norm": 1.0260602235794067,
310
+ "learning_rate": 8.426519384872733e-06,
311
+ "loss": 1.6417,
312
+ "step": 41
313
+ },
314
+ {
315
+ "epoch": 1.9823008849557522,
316
+ "grad_norm": 1.5143276453018188,
317
+ "learning_rate": 6.698729810778065e-06,
318
+ "loss": 1.7454,
319
+ "step": 42
320
+ },
321
+ {
322
+ "epoch": 2.0294985250737465,
323
+ "grad_norm": 1.6464427709579468,
324
+ "learning_rate": 5.156362923365588e-06,
325
+ "loss": 2.9107,
326
+ "step": 43
327
+ },
328
+ {
329
+ "epoch": 2.0766961651917404,
330
+ "grad_norm": 0.8135335445404053,
331
+ "learning_rate": 3.8060233744356633e-06,
332
+ "loss": 1.6117,
333
+ "step": 44
334
+ },
335
+ {
336
+ "epoch": 2.1238938053097347,
337
+ "grad_norm": 0.8873955011367798,
338
+ "learning_rate": 2.653493525244721e-06,
339
+ "loss": 1.6066,
340
+ "step": 45
341
+ },
342
+ {
343
+ "epoch": 2.1710914454277286,
344
+ "grad_norm": 1.112796664237976,
345
+ "learning_rate": 1.70370868554659e-06,
346
+ "loss": 1.7035,
347
+ "step": 46
348
+ },
349
+ {
350
+ "epoch": 2.218289085545723,
351
+ "grad_norm": 1.3752461671829224,
352
+ "learning_rate": 9.607359798384785e-07,
353
+ "loss": 1.7328,
354
+ "step": 47
355
+ },
356
+ {
357
+ "epoch": 2.265486725663717,
358
+ "grad_norm": 0.9514968395233154,
359
+ "learning_rate": 4.277569313094809e-07,
360
+ "loss": 1.5891,
361
+ "step": 48
362
+ },
363
+ {
364
+ "epoch": 2.312684365781711,
365
+ "grad_norm": 0.9039832353591919,
366
+ "learning_rate": 1.0705383806982606e-07,
367
+ "loss": 1.6777,
368
+ "step": 49
369
+ },
370
+ {
371
+ "epoch": 2.359882005899705,
372
+ "grad_norm": 0.9195019006729126,
373
+ "learning_rate": 0.0,
374
+ "loss": 1.6891,
375
+ "step": 50
376
+ },
377
+ {
378
+ "epoch": 2.359882005899705,
379
+ "eval_loss": 1.746710181236267,
380
+ "eval_runtime": 3.624,
381
+ "eval_samples_per_second": 39.459,
382
+ "eval_steps_per_second": 4.967,
383
+ "step": 50
384
  }
385
  ],
386
  "logging_steps": 1,
 
404
  "should_evaluate": false,
405
  "should_log": false,
406
  "should_save": true,
407
+ "should_training_stop": true
408
  },
409
  "attributes": {}
410
  }
411
  },
412
+ "total_flos": 1.059536353886208e+17,
413
  "train_batch_size": 2,
414
  "trial_name": null,
415
  "trial_params": null