dzanbek commited on
Commit
b11996e
1 Parent(s): e3c6ca6

Training in progress, step 10, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:495d3244df0d5c425b1057ec38c0263268b229bbab30df9f00d22f3106346f3e
3
  size 80013120
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:82eaf68cb2b7738118e0fe2db0a695219566f54d44c7b0fbaf0cb57dee99cf8d
3
  size 80013120
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a99b509f62716c94c46b0bbfb1991353248b4bfc2c7e8b697ad5f0af49f7bb22
3
  size 160284754
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1f1bcfe34c8daeff7460447a9d3a8946825d8dab5df0c066b20d6d05d485cdbf
3
  size 160284754
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1da65c6fd85f4aa14aa34f294c78fa464be27f768771994054ee1aa907752aba
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4555e4c65e128e3c73410113c546065354d2476ccf2887955b4785922a9a9a7c
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ab098d97568c94b26c087e3ff4fd649c3aaa775049e6dbf18e927dde2c5feee8
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:06829946ff807c7143728448cc1bf384944e638865601317db4b29a92003bb11
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.010040160642570281,
5
  "eval_steps": 2,
6
- "global_step": 5,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -66,6 +66,65 @@
66
  "learning_rate": 0.0001,
67
  "loss": 5.6022,
68
  "step": 5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
69
  }
70
  ],
71
  "logging_steps": 1,
@@ -85,7 +144,7 @@
85
  "attributes": {}
86
  }
87
  },
88
- "total_flos": 1791624166244352.0,
89
  "train_batch_size": 2,
90
  "trial_name": null,
91
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.020080321285140562,
5
  "eval_steps": 2,
6
+ "global_step": 10,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
66
  "learning_rate": 0.0001,
67
  "loss": 5.6022,
68
  "step": 5
69
+ },
70
+ {
71
+ "epoch": 0.012048192771084338,
72
+ "grad_norm": 1.1300783157348633,
73
+ "learning_rate": 9.755282581475769e-05,
74
+ "loss": 6.2136,
75
+ "step": 6
76
+ },
77
+ {
78
+ "epoch": 0.012048192771084338,
79
+ "eval_loss": 1.5139317512512207,
80
+ "eval_runtime": 24.4198,
81
+ "eval_samples_per_second": 8.6,
82
+ "eval_steps_per_second": 4.3,
83
+ "step": 6
84
+ },
85
+ {
86
+ "epoch": 0.014056224899598393,
87
+ "grad_norm": 0.7629399299621582,
88
+ "learning_rate": 9.045084971874738e-05,
89
+ "loss": 5.591,
90
+ "step": 7
91
+ },
92
+ {
93
+ "epoch": 0.01606425702811245,
94
+ "grad_norm": 0.991378664970398,
95
+ "learning_rate": 7.938926261462366e-05,
96
+ "loss": 5.6169,
97
+ "step": 8
98
+ },
99
+ {
100
+ "epoch": 0.01606425702811245,
101
+ "eval_loss": 1.4971964359283447,
102
+ "eval_runtime": 24.1606,
103
+ "eval_samples_per_second": 8.692,
104
+ "eval_steps_per_second": 4.346,
105
+ "step": 8
106
+ },
107
+ {
108
+ "epoch": 0.018072289156626505,
109
+ "grad_norm": 1.3900601863861084,
110
+ "learning_rate": 6.545084971874738e-05,
111
+ "loss": 5.0918,
112
+ "step": 9
113
+ },
114
+ {
115
+ "epoch": 0.020080321285140562,
116
+ "grad_norm": 1.6595548391342163,
117
+ "learning_rate": 5e-05,
118
+ "loss": 5.4715,
119
+ "step": 10
120
+ },
121
+ {
122
+ "epoch": 0.020080321285140562,
123
+ "eval_loss": 1.4780693054199219,
124
+ "eval_runtime": 24.0817,
125
+ "eval_samples_per_second": 8.72,
126
+ "eval_steps_per_second": 4.36,
127
+ "step": 10
128
  }
129
  ],
130
  "logging_steps": 1,
 
144
  "attributes": {}
145
  }
146
  },
147
+ "total_flos": 3501810870386688.0,
148
  "train_batch_size": 2,
149
  "trial_name": null,
150
  "trial_params": null