ardaspear commited on
Commit
126e6ed
1 Parent(s): 1a90c15

Training in progress, step 45, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:405937c3b68f2cbf01f101b5da53d9a860aff5e380eb20f68c9dfc0bde471ea1
3
  size 191968
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:436fac77b5a8e8ce7b500ef09266584084305b6839231848a0e796464aa4fa3c
3
  size 191968
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3486f49d8575e622ada0c28486e2480262b205d940674cca7b1825986baa3bd5
3
  size 253144
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:750c23204f141f2fc7a8292881d41d97b35958ea0220941f73545735119833ad
3
  size 253144
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9bc9d59fade5c32432d168a1733323120abe43da9328cb3b9ed170adf092d2bd
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b8f44828df8830151d896be841876dc9cee379b27bb94793e2085a5033e38e02
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bd18e5325bf99a4b662a5314e5f491b65903ffc83b0317f63835d8305a50591e
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ec0bb6efeeb25000ef7426abb9d88265daa212fa2a170e1fb3f58996efa27f6
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.13745704467353953,
5
  "eval_steps": 5,
6
- "global_step": 40,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -170,6 +170,28 @@
170
  "eval_samples_per_second": 271.037,
171
  "eval_steps_per_second": 68.036,
172
  "step": 40
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
173
  }
174
  ],
175
  "logging_steps": 3,
@@ -189,7 +211,7 @@
189
  "attributes": {}
190
  }
191
  },
192
- "total_flos": 7780658577408.0,
193
  "train_batch_size": 4,
194
  "trial_name": null,
195
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.15463917525773196,
5
  "eval_steps": 5,
6
+ "global_step": 45,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
170
  "eval_samples_per_second": 271.037,
171
  "eval_steps_per_second": 68.036,
172
  "step": 40
173
+ },
174
+ {
175
+ "epoch": 0.14432989690721648,
176
+ "grad_norm": 0.6259255409240723,
177
+ "learning_rate": 9.549150281252633e-06,
178
+ "loss": 10.2686,
179
+ "step": 42
180
+ },
181
+ {
182
+ "epoch": 0.15463917525773196,
183
+ "grad_norm": 0.6533415913581848,
184
+ "learning_rate": 3.8060233744356633e-06,
185
+ "loss": 10.2656,
186
+ "step": 45
187
+ },
188
+ {
189
+ "epoch": 0.15463917525773196,
190
+ "eval_loss": 10.265573501586914,
191
+ "eval_runtime": 1.806,
192
+ "eval_samples_per_second": 271.324,
193
+ "eval_steps_per_second": 68.108,
194
+ "step": 45
195
  }
196
  ],
197
  "logging_steps": 3,
 
211
  "attributes": {}
212
  }
213
  },
214
+ "total_flos": 8714895360000.0,
215
  "train_batch_size": 4,
216
  "trial_name": null,
217
  "trial_params": null