dsakerkwq commited on
Commit
5f68c30
·
verified ·
1 Parent(s): 91a70e8

Training in progress, step 34, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:20476cd7c594a37b87e2e5f066938d80b47aa5fe31ec4c688576410f064e4600
3
  size 156926880
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3a7edddc1af76be37f02e3e538e92012e04bdbe252253ac446610a57566dc8f5
3
  size 156926880
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7c7866ca77b2dd2c3cecf0a7c8915e217fe8c318e13cec824512e1b991230be6
3
  size 313998650
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:afd631a925f42608d56247ae67b3f0ef6ca8b77b463668920f747762dca00752
3
  size 313998650
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0ba1c31d151d1b9bd34b79b863f17b9cd94fd1935e125ecf02e890fa08d06d9a
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c415ab21661709deb54d3f5b1af59ad0ef3f71a6592bce15af188ce87af38de2
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9e70538ae8d0e462178176fbf6fa0f463390010edd21a43cb478729b26f490af
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5fc585dc9fd222c4cbd66e7bc9788f3fb115a2fd8960ceb98cd48dadae2e4e6d
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:64376fe408be1bca07c5cc5f7754bdbbb6f62ddfe83e14d2a20bfb156884983a
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:980a12c04d8c8602e71f6d5b227e8a075dd6823b3cce79a1951691aa3ee8ee04
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9efe98928de10d1ff2431faf4c63ac7b49860544415532c5c6120ad606c07f3f
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:28bf423b3d035c0f41731474a4874edb06ff4eb8889d8f11f5ef3a8e39c35181
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c220d2fbe9a2870f9ac93749b3d4b3852ce94b5983d948fa8de3d35e453f98ce
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7312f49c1e1b01f68cdaee175129535fda49cd22ed1f251a2745d2c8c42996be
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.3648199141025543,
3
  "best_model_checkpoint": "miner_id_24/checkpoint-25",
4
- "epoch": 2.2598870056497176,
5
  "eval_steps": 25,
6
- "global_step": 25,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -198,6 +198,69 @@
198
  "eval_samples_per_second": 57.021,
199
  "eval_steps_per_second": 14.826,
200
  "step": 25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
201
  }
202
  ],
203
  "logging_steps": 1,
@@ -221,12 +284,12 @@
221
  "should_evaluate": false,
222
  "should_log": false,
223
  "should_save": true,
224
- "should_training_stop": false
225
  },
226
  "attributes": {}
227
  }
228
  },
229
- "total_flos": 7.94731890081792e+16,
230
  "train_batch_size": 2,
231
  "trial_name": null,
232
  "trial_params": null
 
1
  {
2
  "best_metric": 0.3648199141025543,
3
  "best_model_checkpoint": "miner_id_24/checkpoint-25",
4
+ "epoch": 3.073446327683616,
5
  "eval_steps": 25,
6
+ "global_step": 34,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
198
  "eval_samples_per_second": 57.021,
199
  "eval_steps_per_second": 14.826,
200
  "step": 25
201
+ },
202
+ {
203
+ "epoch": 2.3502824858757063,
204
+ "grad_norm": 1.5733007192611694,
205
+ "learning_rate": 2.3180194846605367e-05,
206
+ "loss": 0.3583,
207
+ "step": 26
208
+ },
209
+ {
210
+ "epoch": 2.440677966101695,
211
+ "grad_norm": 0.6812260150909424,
212
+ "learning_rate": 2.0214529598676836e-05,
213
+ "loss": 0.362,
214
+ "step": 27
215
+ },
216
+ {
217
+ "epoch": 2.5310734463276834,
218
+ "grad_norm": 1.1533381938934326,
219
+ "learning_rate": 1.758386744638546e-05,
220
+ "loss": 0.3392,
221
+ "step": 28
222
+ },
223
+ {
224
+ "epoch": 2.621468926553672,
225
+ "grad_norm": 0.9058969020843506,
226
+ "learning_rate": 1.531354310432403e-05,
227
+ "loss": 0.3486,
228
+ "step": 29
229
+ },
230
+ {
231
+ "epoch": 2.711864406779661,
232
+ "grad_norm": 1.0298035144805908,
233
+ "learning_rate": 1.3425421036992098e-05,
234
+ "loss": 0.3511,
235
+ "step": 30
236
+ },
237
+ {
238
+ "epoch": 2.8022598870056497,
239
+ "grad_norm": 0.258158802986145,
240
+ "learning_rate": 1.1937684892050604e-05,
241
+ "loss": 0.339,
242
+ "step": 31
243
+ },
244
+ {
245
+ "epoch": 2.8926553672316384,
246
+ "grad_norm": 0.6644238829612732,
247
+ "learning_rate": 1.0864662381854632e-05,
248
+ "loss": 0.3545,
249
+ "step": 32
250
+ },
251
+ {
252
+ "epoch": 2.983050847457627,
253
+ "grad_norm": 1.3119802474975586,
254
+ "learning_rate": 1.0216687299751144e-05,
255
+ "loss": 0.3612,
256
+ "step": 33
257
+ },
258
+ {
259
+ "epoch": 3.073446327683616,
260
+ "grad_norm": 1.9420535564422607,
261
+ "learning_rate": 1e-05,
262
+ "loss": 0.6584,
263
+ "step": 34
264
  }
265
  ],
266
  "logging_steps": 1,
 
284
  "should_evaluate": false,
285
  "should_log": false,
286
  "should_save": true,
287
+ "should_training_stop": true
288
  },
289
  "attributes": {}
290
  }
291
  },
292
+ "total_flos": 1.0808353705112371e+17,
293
  "train_batch_size": 2,
294
  "trial_name": null,
295
  "trial_params": null