dimasik87 commited on
Commit
c4b4e8b
·
verified ·
1 Parent(s): 85b247c

Training in progress, step 16, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6240728159d513faa05abf6b5d7ed2406f17cba8a6ca15979a18ce62688bde0f
3
  size 167832240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3923c6c2d238cd3361024c80cac4a57bea4c1dd24732d212c7c0481b868aeeb4
3
  size 167832240
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e3647ec73c1e57bf25af24da7e11057a0a6dc36496110229d4e9f1d78ece690b
3
  size 335922386
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca49e2c253d38e24ae4972e1bc709e59f2e1296805f1052620ce8f42596247ac
3
  size 335922386
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a9273107ed22f2b0460ae677b61e5921af160c5c9d230441f93d948a850671aa
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e841c8d708d2d450b0eab7be6e2c4e08cbedd4e297878439ca4360fd0b733323
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:68888158764ed5e658b457a541f86335ea31432325308674d2962aa98e037fa4
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5b52924c88c1c80163d374a6650253dd74b8b46052a3da6fc5e4076e57b5effa
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.003593083314619358,
5
  "eval_steps": 4,
6
- "global_step": 12,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -123,6 +123,42 @@
123
  "eval_samples_per_second": 8.264,
124
  "eval_steps_per_second": 8.264,
125
  "step": 12
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
126
  }
127
  ],
128
  "logging_steps": 1,
@@ -142,7 +178,7 @@
142
  "attributes": {}
143
  }
144
  },
145
- "total_flos": 4451323701362688.0,
146
  "train_batch_size": 1,
147
  "trial_name": null,
148
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.00479077775282581,
5
  "eval_steps": 4,
6
+ "global_step": 16,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
123
  "eval_samples_per_second": 8.264,
124
  "eval_steps_per_second": 8.264,
125
  "step": 12
126
+ },
127
+ {
128
+ "epoch": 0.003892506924170971,
129
+ "grad_norm": 17.937646865844727,
130
+ "learning_rate": 0.00019723699203976766,
131
+ "loss": 4.7382,
132
+ "step": 13
133
+ },
134
+ {
135
+ "epoch": 0.004191930533722584,
136
+ "grad_norm": 21.441980361938477,
137
+ "learning_rate": 0.00019510565162951537,
138
+ "loss": 5.3976,
139
+ "step": 14
140
+ },
141
+ {
142
+ "epoch": 0.0044913541432741975,
143
+ "grad_norm": 32.05086898803711,
144
+ "learning_rate": 0.0001923879532511287,
145
+ "loss": 3.6275,
146
+ "step": 15
147
+ },
148
+ {
149
+ "epoch": 0.00479077775282581,
150
+ "grad_norm": 43.66167449951172,
151
+ "learning_rate": 0.0001891006524188368,
152
+ "loss": 2.7349,
153
+ "step": 16
154
+ },
155
+ {
156
+ "epoch": 0.00479077775282581,
157
+ "eval_loss": 4.855716705322266,
158
+ "eval_runtime": 83.6415,
159
+ "eval_samples_per_second": 8.417,
160
+ "eval_steps_per_second": 8.417,
161
+ "step": 16
162
  }
163
  ],
164
  "logging_steps": 1,
 
178
  "attributes": {}
179
  }
180
  },
181
+ "total_flos": 5935098268483584.0,
182
  "train_batch_size": 1,
183
  "trial_name": null,
184
  "trial_params": null