dima806 commited on
Commit
b296297
1 Parent(s): 7869331

Upload folder using huggingface_hub

Browse files
checkpoint-12105/config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/vit-base-patch16-224-in21k",
3
+ "architectures": [
4
+ "ViTForImageClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.0,
7
+ "encoder_stride": 16,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.0,
10
+ "hidden_size": 768,
11
+ "id2label": {
12
+ "0": "E",
13
+ "1": "S",
14
+ "2": "SB"
15
+ },
16
+ "image_size": 224,
17
+ "initializer_range": 0.02,
18
+ "intermediate_size": 3072,
19
+ "label2id": {
20
+ "E": 0,
21
+ "S": 1,
22
+ "SB": 2
23
+ },
24
+ "layer_norm_eps": 1e-12,
25
+ "model_type": "vit",
26
+ "num_attention_heads": 12,
27
+ "num_channels": 3,
28
+ "num_hidden_layers": 12,
29
+ "patch_size": 16,
30
+ "problem_type": "single_label_classification",
31
+ "qkv_bias": true,
32
+ "torch_dtype": "float32",
33
+ "transformers_version": "4.44.2"
34
+ }
checkpoint-12105/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b33b2b1054d985b32e22181fbf4e011685b83317ec0dac8a1ee688ac78a0142c
3
+ size 343227052
checkpoint-12105/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:53c4939c19e28752212e5ab8c3d2f524ecb2a434c556335f768095d1fc441fdd
3
+ size 686574597
checkpoint-12105/preprocessor_config.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "do_rescale": true,
4
+ "do_resize": true,
5
+ "image_mean": [
6
+ 0.5,
7
+ 0.5,
8
+ 0.5
9
+ ],
10
+ "image_processor_type": "ViTImageProcessor",
11
+ "image_std": [
12
+ 0.5,
13
+ 0.5,
14
+ 0.5
15
+ ],
16
+ "resample": 2,
17
+ "rescale_factor": 0.00392156862745098,
18
+ "size": {
19
+ "height": 224,
20
+ "width": 224
21
+ }
22
+ }
checkpoint-12105/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1134a5d235acceaffa481a3ced304b415607b38245b6bc4dd32ed4b161a60938
3
+ size 14575
checkpoint-12105/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c40ca2519339243454f1747334687ba9e05c256d23ca3fa83abba65e3a91ce6a
3
+ size 627
checkpoint-12105/trainer_state.json ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.5624491572380066,
3
+ "best_model_checkpoint": "galaxy_type_image_detection/checkpoint-12105",
4
+ "epoch": 1.0,
5
+ "eval_steps": 500,
6
+ "global_step": 12105,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.041305245766212306,
13
+ "grad_norm": 1.0393656492233276,
14
+ "learning_rate": 8.66403981750311e-07,
15
+ "loss": 0.967,
16
+ "step": 500
17
+ },
18
+ {
19
+ "epoch": 0.08261049153242461,
20
+ "grad_norm": 2.2510411739349365,
21
+ "learning_rate": 8.2907507258399e-07,
22
+ "loss": 0.8018,
23
+ "step": 1000
24
+ },
25
+ {
26
+ "epoch": 0.12391573729863693,
27
+ "grad_norm": 2.212888479232788,
28
+ "learning_rate": 7.91746163417669e-07,
29
+ "loss": 0.7377,
30
+ "step": 1500
31
+ },
32
+ {
33
+ "epoch": 0.16522098306484923,
34
+ "grad_norm": 2.6769936084747314,
35
+ "learning_rate": 7.54417254251348e-07,
36
+ "loss": 0.6944,
37
+ "step": 2000
38
+ },
39
+ {
40
+ "epoch": 0.20652622883106153,
41
+ "grad_norm": 1.5611211061477661,
42
+ "learning_rate": 7.170883450850269e-07,
43
+ "loss": 0.6649,
44
+ "step": 2500
45
+ },
46
+ {
47
+ "epoch": 0.24783147459727387,
48
+ "grad_norm": 2.753368616104126,
49
+ "learning_rate": 6.797594359187059e-07,
50
+ "loss": 0.6495,
51
+ "step": 3000
52
+ },
53
+ {
54
+ "epoch": 0.28913672036348614,
55
+ "grad_norm": 2.5022592544555664,
56
+ "learning_rate": 6.424305267523848e-07,
57
+ "loss": 0.6282,
58
+ "step": 3500
59
+ },
60
+ {
61
+ "epoch": 0.33044196612969845,
62
+ "grad_norm": 3.5701966285705566,
63
+ "learning_rate": 6.051016175860639e-07,
64
+ "loss": 0.6171,
65
+ "step": 4000
66
+ },
67
+ {
68
+ "epoch": 0.37174721189591076,
69
+ "grad_norm": 2.316419839859009,
70
+ "learning_rate": 5.677727084197428e-07,
71
+ "loss": 0.609,
72
+ "step": 4500
73
+ },
74
+ {
75
+ "epoch": 0.41305245766212306,
76
+ "grad_norm": 2.764505624771118,
77
+ "learning_rate": 5.304437992534219e-07,
78
+ "loss": 0.5999,
79
+ "step": 5000
80
+ },
81
+ {
82
+ "epoch": 0.4543577034283354,
83
+ "grad_norm": 3.0616867542266846,
84
+ "learning_rate": 4.931148900871008e-07,
85
+ "loss": 0.5925,
86
+ "step": 5500
87
+ },
88
+ {
89
+ "epoch": 0.49566294919454773,
90
+ "grad_norm": 3.7850115299224854,
91
+ "learning_rate": 4.557859809207797e-07,
92
+ "loss": 0.589,
93
+ "step": 6000
94
+ },
95
+ {
96
+ "epoch": 0.53696819496076,
97
+ "grad_norm": 2.5644021034240723,
98
+ "learning_rate": 4.1845707175445867e-07,
99
+ "loss": 0.5829,
100
+ "step": 6500
101
+ },
102
+ {
103
+ "epoch": 0.5782734407269723,
104
+ "grad_norm": 2.6693475246429443,
105
+ "learning_rate": 3.8112816258813767e-07,
106
+ "loss": 0.5768,
107
+ "step": 7000
108
+ },
109
+ {
110
+ "epoch": 0.6195786864931846,
111
+ "grad_norm": 4.514222145080566,
112
+ "learning_rate": 3.437992534218167e-07,
113
+ "loss": 0.5728,
114
+ "step": 7500
115
+ },
116
+ {
117
+ "epoch": 0.6608839322593969,
118
+ "grad_norm": 3.1888978481292725,
119
+ "learning_rate": 3.064703442554956e-07,
120
+ "loss": 0.5698,
121
+ "step": 8000
122
+ },
123
+ {
124
+ "epoch": 0.7021891780256092,
125
+ "grad_norm": 3.4488654136657715,
126
+ "learning_rate": 2.691414350891746e-07,
127
+ "loss": 0.5737,
128
+ "step": 8500
129
+ },
130
+ {
131
+ "epoch": 0.7434944237918215,
132
+ "grad_norm": 4.471464157104492,
133
+ "learning_rate": 2.3181252592285358e-07,
134
+ "loss": 0.5643,
135
+ "step": 9000
136
+ },
137
+ {
138
+ "epoch": 0.7847996695580338,
139
+ "grad_norm": 3.535975217819214,
140
+ "learning_rate": 1.9448361675653255e-07,
141
+ "loss": 0.5682,
142
+ "step": 9500
143
+ },
144
+ {
145
+ "epoch": 0.8261049153242461,
146
+ "grad_norm": 5.046966075897217,
147
+ "learning_rate": 1.571547075902115e-07,
148
+ "loss": 0.5628,
149
+ "step": 10000
150
+ },
151
+ {
152
+ "epoch": 0.8674101610904585,
153
+ "grad_norm": 3.240630626678467,
154
+ "learning_rate": 1.198257984238905e-07,
155
+ "loss": 0.5613,
156
+ "step": 10500
157
+ },
158
+ {
159
+ "epoch": 0.9087154068566708,
160
+ "grad_norm": 4.973729133605957,
161
+ "learning_rate": 8.249688925756947e-08,
162
+ "loss": 0.5602,
163
+ "step": 11000
164
+ },
165
+ {
166
+ "epoch": 0.9500206526228832,
167
+ "grad_norm": 4.114513397216797,
168
+ "learning_rate": 4.516798009124844e-08,
169
+ "loss": 0.5622,
170
+ "step": 11500
171
+ },
172
+ {
173
+ "epoch": 0.9913258983890955,
174
+ "grad_norm": 3.011930227279663,
175
+ "learning_rate": 7.839070924927416e-09,
176
+ "loss": 0.5592,
177
+ "step": 12000
178
+ },
179
+ {
180
+ "epoch": 1.0,
181
+ "eval_accuracy": 0.7654878108598617,
182
+ "eval_loss": 0.5624491572380066,
183
+ "eval_model_preparation_time": 0.006,
184
+ "eval_runtime": 413.2412,
185
+ "eval_samples_per_second": 98.669,
186
+ "eval_steps_per_second": 3.085,
187
+ "step": 12105
188
+ }
189
+ ],
190
+ "logging_steps": 500,
191
+ "max_steps": 12105,
192
+ "num_input_tokens_seen": 0,
193
+ "num_train_epochs": 1,
194
+ "save_steps": 500,
195
+ "stateful_callbacks": {
196
+ "TrainerControl": {
197
+ "args": {
198
+ "should_epoch_stop": false,
199
+ "should_evaluate": false,
200
+ "should_log": false,
201
+ "should_save": true,
202
+ "should_training_stop": true
203
+ },
204
+ "attributes": {}
205
+ }
206
+ },
207
+ "total_flos": 6.003272999208671e+19,
208
+ "train_batch_size": 64,
209
+ "trial_name": null,
210
+ "trial_params": null
211
+ }
checkpoint-12105/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b6dc03e977829a9fe1632bfde9dc8286b9b4d9e269bca46e17c31045d8f211d2
3
+ size 4667
config.json CHANGED
@@ -9,17 +9,17 @@
9
  "hidden_dropout_prob": 0.0,
10
  "hidden_size": 768,
11
  "id2label": {
12
- "0": "S",
13
- "1": "SB",
14
- "2": "E"
15
  },
16
  "image_size": 224,
17
  "initializer_range": 0.02,
18
  "intermediate_size": 3072,
19
  "label2id": {
20
- "E": 2,
21
- "S": 0,
22
- "SB": 1
23
  },
24
  "layer_norm_eps": 1e-12,
25
  "model_type": "vit",
@@ -30,5 +30,5 @@
30
  "problem_type": "single_label_classification",
31
  "qkv_bias": true,
32
  "torch_dtype": "float32",
33
- "transformers_version": "4.32.1"
34
  }
 
9
  "hidden_dropout_prob": 0.0,
10
  "hidden_size": 768,
11
  "id2label": {
12
+ "0": "E",
13
+ "1": "S",
14
+ "2": "SB"
15
  },
16
  "image_size": 224,
17
  "initializer_range": 0.02,
18
  "intermediate_size": 3072,
19
  "label2id": {
20
+ "E": 0,
21
+ "S": 1,
22
+ "SB": 2
23
  },
24
  "layer_norm_eps": 1e-12,
25
  "model_type": "vit",
 
30
  "problem_type": "single_label_classification",
31
  "qkv_bias": true,
32
  "torch_dtype": "float32",
33
+ "transformers_version": "4.44.2"
34
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:565bb0644333a10c5d28c3cf40111f193c66d72510527072d071b50f07adca81
3
  size 343227052
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b33b2b1054d985b32e22181fbf4e011685b83317ec0dac8a1ee688ac78a0142c
3
  size 343227052
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:84330c3b6e1bac0306f601121d873a8d06d7a1eac19d3e58de78cde0aa8316db
3
- size 4027
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b6dc03e977829a9fe1632bfde9dc8286b9b4d9e269bca46e17c31045d8f211d2
3
+ size 4667