Nexspear commited on
Commit
a9d8a18
·
verified ·
1 Parent(s): 88a4ad7

Training in progress, step 25, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f685d1b73db37255c28b0937b165cf8eafb0099434ebb5026f9d5f086049f370
3
  size 156926880
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c53254f7ea39fe8f31247ca88cb1caeabdacc9a6e1133aeda2a0e5a64a20f392
3
  size 156926880
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:85ccbe8f52b64656a2fae23bebc063ec4f55dfced1911be4f5675713b7a3b725
3
  size 79968772
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3cfe9e8e0441d1088dc84ff17fd775ff5fe66a0f007e919d9ba407a9f990ffd0
3
  size 79968772
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fb6cd3789a603cc9db1f0b416612fba074ab96fe35f4ad309fac449135e2f22d
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c620901f4f18db9d2766b73b498690238d01bc9e321c63a96cb81351dc958c9
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:02d97e2af7e51ba89efcffc7f2a661ca0885e61db7f78987db5df30514ab62df
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6676fe28230ae15b45fb334c871c6fdf1a7984a935952b9f8650896c37a8c106
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.03759398496240601,
5
  "eval_steps": 5,
6
- "global_step": 20,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -89,6 +89,28 @@
89
  "eval_samples_per_second": 27.926,
90
  "eval_steps_per_second": 6.982,
91
  "step": 20
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
92
  }
93
  ],
94
  "logging_steps": 3,
@@ -108,7 +130,7 @@
108
  "attributes": {}
109
  }
110
  },
111
- "total_flos": 3973659450408960.0,
112
  "train_batch_size": 4,
113
  "trial_name": null,
114
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.046992481203007516,
5
  "eval_steps": 5,
6
+ "global_step": 25,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
89
  "eval_samples_per_second": 27.926,
90
  "eval_steps_per_second": 6.982,
91
  "step": 20
92
+ },
93
+ {
94
+ "epoch": 0.039473684210526314,
95
+ "grad_norm": 1.2767959833145142,
96
+ "learning_rate": 8.247240241650918e-05,
97
+ "loss": 1.724,
98
+ "step": 21
99
+ },
100
+ {
101
+ "epoch": 0.045112781954887216,
102
+ "grad_norm": 1.445420265197754,
103
+ "learning_rate": 7.269952498697734e-05,
104
+ "loss": 1.7438,
105
+ "step": 24
106
+ },
107
+ {
108
+ "epoch": 0.046992481203007516,
109
+ "eval_loss": 1.6554962396621704,
110
+ "eval_runtime": 16.0491,
111
+ "eval_samples_per_second": 27.914,
112
+ "eval_steps_per_second": 6.979,
113
+ "step": 25
114
  }
115
  ],
116
  "logging_steps": 3,
 
130
  "attributes": {}
131
  }
132
  },
133
+ "total_flos": 4967074313011200.0,
134
  "train_batch_size": 4,
135
  "trial_name": null,
136
  "trial_params": null