VERSIL91 commited on
Commit
0d04e3a
1 Parent(s): 72e84f3

Training in progress, step 20, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f8e55c3be663cea5f316f75b26d0b9f2b186eb2a3a48a5830b922e95b7bc1176
3
  size 50624
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:44caaf7ef7e33ddb52bdac5c015394aad33dc90cebb3fd594f8f17485acdc313
3
  size 50624
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d58357a799aa2bc8139fdedce4aa015f85c4e38ba39a5605316c3fa05595aee3
3
  size 111142
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ac022dbb3fc2d99a24d41479434566b8564cc2d853b691e27319d1227d89ee6e
3
  size 111142
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:59b1d024865d1dba4562580f93959a5a2b1a3bf90213c135f90f60d80b11a78f
3
  size 15984
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:208c51ec2766c73fe42741b329e4379bc0b0f1175f703ac486a32838bf514b13
3
  size 15984
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:deb5f3237ae88e14981a5756b110880ad688694abd0ba11e55e250f561e35622
3
  size 15984
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3e1bff8b595773356fdc187f56d5a0c0121d69fdacf1e76950fba64dacfd5fc3
3
  size 15984
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:003ef98a2b528861f4d1487387f772d8aae3ed53d6011e15552c6470cc539ea0
3
  size 15984
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5b8c9e9e3a6b690529f7d95e945c2b416648318213cfc38263c918379e5990e2
3
  size 15984
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ef69ffd171970264b348a13a56d94173b99be6aba5621fe1853099ef1072f676
3
  size 15984
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3eb7fbfe916e55d37e1bd35f58e8ece0a1823cf09f502145e19e13903c1c78ef
3
  size 15984
last-checkpoint/rng_state_4.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ceaad68722e6ed00d9beef3e50e10f519480af690b63e499fc9f8f89515238f0
3
  size 15984
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:829f7d98c51d3dce78cbe15c038b6184a0ffadc9bc4e660d07cdee375d9e34c1
3
  size 15984
last-checkpoint/rng_state_5.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fa0bb24fa34cfe9d00b78207938758f5303233d6bbff724241e845b94631cc63
3
  size 15984
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d7ede1fa0bb4fcf80064a832977f960fd7cf29d0059e918b0611ce520d7d52de
3
  size 15984
last-checkpoint/rng_state_6.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:454e536cf797be7dff220575f47b93c8cc77ccc1c55d5df7d2ad74af6d2539ec
3
  size 15984
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dbe41f49e8a4d0757f45695e57ded169e0314da0412d34517669df2113c9304c
3
  size 15984
last-checkpoint/rng_state_7.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e2a659e458bf3cb1ed92f049ddb671d64664d3fce5039b5ec93eee2595122a80
3
  size 15984
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ed52cd9a3377c33a6718ff5e7c1e8c573d9b4931bb139825840f14ccdc01ba5b
3
  size 15984
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ff2736979009751c0c6b0ddcc5f6544d6f723aa752b4798eab0b70fb76cf0083
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:532138a5ca880d8da393ae449e5715b2766def36b8838785ca08d07228b119b7
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.0881057268722467,
5
  "eval_steps": 5,
6
- "global_step": 15,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -144,6 +144,49 @@
144
  "eval_samples_per_second": 796.022,
145
  "eval_steps_per_second": 99.936,
146
  "step": 15
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
147
  }
148
  ],
149
  "logging_steps": 1,
@@ -158,12 +201,12 @@
158
  "should_evaluate": false,
159
  "should_log": false,
160
  "should_save": true,
161
- "should_training_stop": false
162
  },
163
  "attributes": {}
164
  }
165
  },
166
- "total_flos": 25105174364160.0,
167
  "train_batch_size": 1,
168
  "trial_name": null,
169
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.11747430249632893,
5
  "eval_steps": 5,
6
+ "global_step": 20,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
144
  "eval_samples_per_second": 796.022,
145
  "eval_steps_per_second": 99.936,
146
  "step": 15
147
+ },
148
+ {
149
+ "epoch": 0.09397944199706314,
150
+ "grad_norm": 0.07468053698539734,
151
+ "learning_rate": 3.4549150281252636e-05,
152
+ "loss": 10.3794,
153
+ "step": 16
154
+ },
155
+ {
156
+ "epoch": 0.09985315712187959,
157
+ "grad_norm": 0.07500291615724564,
158
+ "learning_rate": 2.061073738537635e-05,
159
+ "loss": 10.3783,
160
+ "step": 17
161
+ },
162
+ {
163
+ "epoch": 0.10572687224669604,
164
+ "grad_norm": 0.07731679826974869,
165
+ "learning_rate": 9.549150281252633e-06,
166
+ "loss": 10.3793,
167
+ "step": 18
168
+ },
169
+ {
170
+ "epoch": 0.11160058737151249,
171
+ "grad_norm": 0.07920215278863907,
172
+ "learning_rate": 2.4471741852423237e-06,
173
+ "loss": 10.3792,
174
+ "step": 19
175
+ },
176
+ {
177
+ "epoch": 0.11747430249632893,
178
+ "grad_norm": 0.07347231358289719,
179
+ "learning_rate": 0.0,
180
+ "loss": 10.3791,
181
+ "step": 20
182
+ },
183
+ {
184
+ "epoch": 0.11747430249632893,
185
+ "eval_loss": NaN,
186
+ "eval_runtime": 1.4584,
187
+ "eval_samples_per_second": 786.475,
188
+ "eval_steps_per_second": 98.738,
189
+ "step": 20
190
  }
191
  ],
192
  "logging_steps": 1,
 
201
  "should_evaluate": false,
202
  "should_log": false,
203
  "should_save": true,
204
+ "should_training_stop": true
205
  },
206
  "attributes": {}
207
  }
208
  },
209
+ "total_flos": 33473565818880.0,
210
  "train_batch_size": 1,
211
  "trial_name": null,
212
  "trial_params": null