genejalston commited on
Commit
81d78b2
1 Parent(s): 0b37129

Training in progress, step 4500, checkpoint

Browse files
last-checkpoint/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:045c16e47039390d0e2734471cdf7746fb258c03504e612e02a3fec9c1b52ba8
3
  size 1625426996
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eb6a3997803eebdd2cab8b05d3e11fc7b1b5e72b059d14b88afbc5e9d2e960a2
3
  size 1625426996
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f212138839ebf6d65b023eb6f28323e060842ac2ac6a20ac36fe0e3c3072de58
3
  size 3250759951
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0659d84d42f67f7d0614c9db8ed7e1b90dc5467ce646c45a244117e0d8b281e6
3
  size 3250759951
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f8b4ea2c4c54d35eeb98e5de64c587665341689d2da447151bedb693d9c7d031
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:10d14f7de59f48f4ade3e9464aacd8c0fa09164a39807a7c587e8743b637b06e
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b2ce6403fa18d43c8c37eb8b7614dadb54a09efc4dec6faa2966730a5140c780
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6032f9ebe8c8c14bc64b0775131b92bc24d785e7deee32c5a13741aeca0eff6f
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.08727336198808719,
5
  "eval_steps": 500,
6
- "global_step": 4000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -55,13 +55,19 @@
55
  "learning_rate": 1.941958993083083e-05,
56
  "loss": 0.0909,
57
  "step": 4000
 
 
 
 
 
 
58
  }
59
  ],
60
  "logging_steps": 500,
61
  "max_steps": 137499,
62
  "num_train_epochs": 3,
63
  "save_steps": 500,
64
- "total_flos": 6772201881600000.0,
65
  "trial_name": null,
66
  "trial_params": null
67
  }
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.09818253223659809,
5
  "eval_steps": 500,
6
+ "global_step": 4500,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
55
  "learning_rate": 1.941958993083083e-05,
56
  "loss": 0.0909,
57
  "step": 4000
58
+ },
59
+ {
60
+ "epoch": 0.1,
61
+ "learning_rate": 1.934685683945625e-05,
62
+ "loss": 0.1167,
63
+ "step": 4500
64
  }
65
  ],
66
  "logging_steps": 500,
67
  "max_steps": 137499,
68
  "num_train_epochs": 3,
69
  "save_steps": 500,
70
+ "total_flos": 7618727116800000.0,
71
  "trial_name": null,
72
  "trial_params": null
73
  }