kevinoli commited on
Commit
cf9f9b6
1 Parent(s): de0ea21

Training in progress, step 4000, checkpoint

Browse files
checkpoint-4000/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d32326140983fe06df8a4feebe9254db82fddb9e5b81e5fc2852cf4950ae893e
3
  size 1711848436
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:79156abefde4e4882460600714ae0567406deac99590a0ff234ba1048e2f2339
3
  size 1711848436
checkpoint-4000/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:537ddc2ec13ff07e50acd17af98e66d697c093e9931cb5f6975b703e1c8246e7
3
  size 3424043887
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cc99807d330457613b7b4fbcc2866880b518f725723ad1f96fed56361fa06c7f
3
  size 3424043887
checkpoint-4000/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:90a1b90840e0ce94ab1e7091ab856cfbbbce2e497912ae668cbbf9a86e863b92
3
  size 623
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3fede1d7806deae69687da3edc51ede9ae3128bb8735c897888c46d7a052e8dd
3
  size 623
checkpoint-4000/trainer_state.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
- "best_metric": 0.6952972412109375,
3
  "best_model_checkpoint": "./output/clip-finetuned-csu-p14-336-e4l57-l/checkpoint-4000",
4
- "epoch": 0.7369196757553427,
5
  "eval_steps": 500,
6
  "global_step": 4000,
7
  "is_hyper_param_search": false,
@@ -9,128 +9,128 @@
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
- "epoch": 0.09211495946941783,
13
- "grad_norm": 3.0293657779693604,
14
- "learning_rate": 4.884856300663227e-07,
15
- "loss": 0.271,
16
  "step": 500
17
  },
18
  {
19
- "epoch": 0.09211495946941783,
20
- "eval_loss": 1.069305419921875,
21
- "eval_runtime": 75.1409,
22
- "eval_samples_per_second": 16.063,
23
- "eval_steps_per_second": 2.01,
24
  "step": 500
25
  },
26
  {
27
- "epoch": 0.18422991893883567,
28
- "grad_norm": 7.3040852546691895,
29
- "learning_rate": 4.769712601326456e-07,
30
- "loss": 0.2493,
31
  "step": 1000
32
  },
33
  {
34
- "epoch": 0.18422991893883567,
35
- "eval_loss": 0.9427077770233154,
36
- "eval_runtime": 74.8926,
37
- "eval_samples_per_second": 16.116,
38
- "eval_steps_per_second": 2.016,
39
  "step": 1000
40
  },
41
  {
42
- "epoch": 0.2763448784082535,
43
- "grad_norm": 0.020736657083034515,
44
- "learning_rate": 4.654568901989683e-07,
45
- "loss": 0.2348,
46
  "step": 1500
47
  },
48
  {
49
- "epoch": 0.2763448784082535,
50
- "eval_loss": 0.8726764917373657,
51
- "eval_runtime": 76.6621,
52
- "eval_samples_per_second": 15.744,
53
- "eval_steps_per_second": 1.97,
54
  "step": 1500
55
  },
56
  {
57
- "epoch": 0.36845983787767134,
58
- "grad_norm": 5.3738909628009424e-05,
59
- "learning_rate": 4.5394252026529107e-07,
60
- "loss": 0.1552,
61
  "step": 2000
62
  },
63
  {
64
- "epoch": 0.36845983787767134,
65
- "eval_loss": 0.8325753808021545,
66
- "eval_runtime": 76.6127,
67
- "eval_samples_per_second": 15.755,
68
- "eval_steps_per_second": 1.971,
69
  "step": 2000
70
  },
71
  {
72
- "epoch": 0.46057479734708917,
73
- "grad_norm": 404.510009765625,
74
- "learning_rate": 4.4242815033161386e-07,
75
- "loss": 0.1753,
76
  "step": 2500
77
  },
78
  {
79
- "epoch": 0.46057479734708917,
80
- "eval_loss": 0.7550467848777771,
81
- "eval_runtime": 76.8024,
82
- "eval_samples_per_second": 15.716,
83
- "eval_steps_per_second": 1.966,
84
  "step": 2500
85
  },
86
  {
87
- "epoch": 0.552689756816507,
88
- "grad_norm": 370.7579650878906,
89
- "learning_rate": 4.3091378039793665e-07,
90
- "loss": 0.1659,
91
  "step": 3000
92
  },
93
  {
94
- "epoch": 0.552689756816507,
95
- "eval_loss": 0.719167172908783,
96
- "eval_runtime": 76.6327,
97
- "eval_samples_per_second": 15.75,
98
- "eval_steps_per_second": 1.97,
99
  "step": 3000
100
  },
101
  {
102
- "epoch": 0.6448047162859248,
103
- "grad_norm": 24.97766876220703,
104
- "learning_rate": 4.193994104642594e-07,
105
- "loss": 0.105,
106
  "step": 3500
107
  },
108
  {
109
- "epoch": 0.6448047162859248,
110
- "eval_loss": 0.7117515802383423,
111
- "eval_runtime": 77.1286,
112
- "eval_samples_per_second": 15.649,
113
- "eval_steps_per_second": 1.958,
114
  "step": 3500
115
  },
116
  {
117
- "epoch": 0.7369196757553427,
118
- "grad_norm": 305.62060546875,
119
- "learning_rate": 4.0788504053058217e-07,
120
- "loss": 0.1336,
121
  "step": 4000
122
  },
123
  {
124
- "epoch": 0.7369196757553427,
125
- "eval_loss": 0.6952972412109375,
126
- "eval_runtime": 76.7253,
127
- "eval_samples_per_second": 15.731,
128
- "eval_steps_per_second": 1.968,
129
  "step": 4000
130
  }
131
  ],
132
  "logging_steps": 500,
133
- "max_steps": 21712,
134
  "num_input_tokens_seen": 0,
135
  "num_train_epochs": 4,
136
  "save_steps": 500,
 
1
  {
2
+ "best_metric": 0.7490401864051819,
3
  "best_model_checkpoint": "./output/clip-finetuned-csu-p14-336-e4l57-l/checkpoint-4000",
4
+ "epoch": 0.4502983226387482,
5
  "eval_steps": 500,
6
  "global_step": 4000,
7
  "is_hyper_param_search": false,
 
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
+ "epoch": 0.056287290329843524,
13
+ "grad_norm": 150.1642303466797,
14
+ "learning_rate": 4.929640887087696e-07,
15
+ "loss": 0.3486,
16
  "step": 500
17
  },
18
  {
19
+ "epoch": 0.056287290329843524,
20
+ "eval_loss": 1.1265727281570435,
21
+ "eval_runtime": 123.7806,
22
+ "eval_samples_per_second": 15.948,
23
+ "eval_steps_per_second": 1.995,
24
  "step": 500
25
  },
26
  {
27
+ "epoch": 0.11257458065968705,
28
+ "grad_norm": 366.6913146972656,
29
+ "learning_rate": 4.859281774175391e-07,
30
+ "loss": 0.2733,
31
  "step": 1000
32
  },
33
  {
34
+ "epoch": 0.11257458065968705,
35
+ "eval_loss": 0.9742079377174377,
36
+ "eval_runtime": 125.8047,
37
+ "eval_samples_per_second": 15.691,
38
+ "eval_steps_per_second": 1.963,
39
  "step": 1000
40
  },
41
  {
42
+ "epoch": 0.16886187098953057,
43
+ "grad_norm": 555.5016479492188,
44
+ "learning_rate": 4.788922661263087e-07,
45
+ "loss": 0.1851,
46
  "step": 1500
47
  },
48
  {
49
+ "epoch": 0.16886187098953057,
50
+ "eval_loss": 0.9162159562110901,
51
+ "eval_runtime": 126.7606,
52
+ "eval_samples_per_second": 15.573,
53
+ "eval_steps_per_second": 1.949,
54
  "step": 1500
55
  },
56
  {
57
+ "epoch": 0.2251491613193741,
58
+ "grad_norm": 0.423260897397995,
59
+ "learning_rate": 4.7185635483507824e-07,
60
+ "loss": 0.1973,
61
  "step": 2000
62
  },
63
  {
64
+ "epoch": 0.2251491613193741,
65
+ "eval_loss": 0.8716973662376404,
66
+ "eval_runtime": 127.6955,
67
+ "eval_samples_per_second": 15.459,
68
+ "eval_steps_per_second": 1.934,
69
  "step": 2000
70
  },
71
  {
72
+ "epoch": 0.2814364516492176,
73
+ "grad_norm": 6.840139389038086,
74
+ "learning_rate": 4.6482044354384774e-07,
75
+ "loss": 0.1881,
76
  "step": 2500
77
  },
78
  {
79
+ "epoch": 0.2814364516492176,
80
+ "eval_loss": 0.830047070980072,
81
+ "eval_runtime": 128.0771,
82
+ "eval_samples_per_second": 15.413,
83
+ "eval_steps_per_second": 1.929,
84
  "step": 2500
85
  },
86
  {
87
+ "epoch": 0.33772374197906113,
88
+ "grad_norm": 729.8377685546875,
89
+ "learning_rate": 4.577845322526173e-07,
90
+ "loss": 0.1695,
91
  "step": 3000
92
  },
93
  {
94
+ "epoch": 0.33772374197906113,
95
+ "eval_loss": 0.8265627026557922,
96
+ "eval_runtime": 128.052,
97
+ "eval_samples_per_second": 15.416,
98
+ "eval_steps_per_second": 1.929,
99
  "step": 3000
100
  },
101
  {
102
+ "epoch": 0.39401103230890466,
103
+ "grad_norm": 7.96999938756926e-06,
104
+ "learning_rate": 4.507486209613869e-07,
105
+ "loss": 0.155,
106
  "step": 3500
107
  },
108
  {
109
+ "epoch": 0.39401103230890466,
110
+ "eval_loss": 0.7721038460731506,
111
+ "eval_runtime": 128.1871,
112
+ "eval_samples_per_second": 15.399,
113
+ "eval_steps_per_second": 1.927,
114
  "step": 3500
115
  },
116
  {
117
+ "epoch": 0.4502983226387482,
118
+ "grad_norm": 0.006555848754942417,
119
+ "learning_rate": 4.4371270967015645e-07,
120
+ "loss": 0.1216,
121
  "step": 4000
122
  },
123
  {
124
+ "epoch": 0.4502983226387482,
125
+ "eval_loss": 0.7490401864051819,
126
+ "eval_runtime": 127.5516,
127
+ "eval_samples_per_second": 15.476,
128
+ "eval_steps_per_second": 1.936,
129
  "step": 4000
130
  }
131
  ],
132
  "logging_steps": 500,
133
+ "max_steps": 35532,
134
  "num_input_tokens_seen": 0,
135
  "num_train_epochs": 4,
136
  "save_steps": 500,
checkpoint-4000/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bf76416e4a857628175385db7f617e54451cd7ffb62c1f9590ee9f30268f8c5b
3
  size 4847
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:319be2a769ef7117b6826908976f080f9082f159f4f0ad75da1a151ad29d9bff
3
  size 4847