Nexspear commited on
Commit
2681601
·
verified ·
1 Parent(s): a198676

Training in progress, step 30, checkpoint

Browse files
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:46f1c4b3f3e865acb10f07ea3213d8b2d2b7afbf288f443165a2b62a5fb00d46
3
  size 150486964
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d6507eba1614933a24c2ad24894a842b52b623e2d470742817d195929814e661
3
  size 150486964
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:67124e1bb49f0d1ecfd196b6845c01ded38adeb7a043cc29e559851d658d090a
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5140c4e7690dfd5c2a867cfa97142c17be010090a2de97d92131e03d7c9fa73f
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6676fe28230ae15b45fb334c871c6fdf1a7984a935952b9f8650896c37a8c106
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d37b0dad6b9c48822da5c83a071d50252502799ad22c4c4907147ad4e4f8e2f4
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.13966480446927373,
5
  "eval_steps": 5,
6
- "global_step": 25,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -111,6 +111,28 @@
111
  "eval_samples_per_second": 36.747,
112
  "eval_steps_per_second": 4.624,
113
  "step": 25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
114
  }
115
  ],
116
  "logging_steps": 3,
@@ -130,7 +152,7 @@
130
  "attributes": {}
131
  }
132
  },
133
- "total_flos": 6803618267136000.0,
134
  "train_batch_size": 8,
135
  "trial_name": null,
136
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.16759776536312848,
5
  "eval_steps": 5,
6
+ "global_step": 30,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
111
  "eval_samples_per_second": 36.747,
112
  "eval_steps_per_second": 4.624,
113
  "step": 25
114
+ },
115
+ {
116
+ "epoch": 0.15083798882681565,
117
+ "grad_norm": NaN,
118
+ "learning_rate": 6.167226819279528e-05,
119
+ "loss": 0.0,
120
+ "step": 27
121
+ },
122
+ {
123
+ "epoch": 0.16759776536312848,
124
+ "grad_norm": NaN,
125
+ "learning_rate": 5e-05,
126
+ "loss": 0.0,
127
+ "step": 30
128
+ },
129
+ {
130
+ "epoch": 0.16759776536312848,
131
+ "eval_loss": NaN,
132
+ "eval_runtime": 8.2227,
133
+ "eval_samples_per_second": 36.728,
134
+ "eval_steps_per_second": 4.621,
135
+ "step": 30
136
  }
137
  ],
138
  "logging_steps": 3,
 
152
  "attributes": {}
153
  }
154
  },
155
+ "total_flos": 8164341920563200.0,
156
  "train_batch_size": 8,
157
  "trial_name": null,
158
  "trial_params": null