Upload folder using huggingface_hub
Browse files- README.md +42 -0
- checkpoint-5/config.json +41 -0
- checkpoint-5/model.safetensors +3 -0
- checkpoint-5/optimizer.pt +3 -0
- checkpoint-5/rng_state.pth +3 -0
- checkpoint-5/scheduler.pt +3 -0
- checkpoint-5/trainer_state.json +69 -0
- checkpoint-5/training_args.bin +3 -0
- config.json +41 -0
- model.safetensors +3 -0
- preprocessor_config.json +22 -0
- training_args.bin +3 -0
- training_params.json +29 -0
README.md
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
---
|
3 |
+
tags:
|
4 |
+
- autotrain
|
5 |
+
- image-classification
|
6 |
+
widget:
|
7 |
+
- src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/tiger.jpg
|
8 |
+
example_title: Tiger
|
9 |
+
- src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/teapot.jpg
|
10 |
+
example_title: Teapot
|
11 |
+
- src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/palace.jpg
|
12 |
+
example_title: Palace
|
13 |
+
datasets:
|
14 |
+
- tangfei/autotrain-data-autotrain-fo40u-s5iv3
|
15 |
+
---
|
16 |
+
|
17 |
+
# Model Trained Using AutoTrain
|
18 |
+
|
19 |
+
- Problem type: Image Classification
|
20 |
+
|
21 |
+
## Validation Metricsg
|
22 |
+
loss: nan
|
23 |
+
|
24 |
+
f1_macro: 0.03333333333333333
|
25 |
+
|
26 |
+
f1_micro: 0.1111111111111111
|
27 |
+
|
28 |
+
f1_weighted: 0.02222222222222222
|
29 |
+
|
30 |
+
precision_macro: 0.018518518518518517
|
31 |
+
|
32 |
+
precision_micro: 0.1111111111111111
|
33 |
+
|
34 |
+
precision_weighted: 0.012345679012345678
|
35 |
+
|
36 |
+
recall_macro: 0.16666666666666666
|
37 |
+
|
38 |
+
recall_micro: 0.1111111111111111
|
39 |
+
|
40 |
+
recall_weighted: 0.1111111111111111
|
41 |
+
|
42 |
+
accuracy: 0.1111111111111111
|
checkpoint-5/config.json
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "google/vit-large-patch16-224",
|
3 |
+
"_num_labels": 6,
|
4 |
+
"architectures": [
|
5 |
+
"ViTForImageClassification"
|
6 |
+
],
|
7 |
+
"attention_probs_dropout_prob": 0.0,
|
8 |
+
"encoder_stride": 16,
|
9 |
+
"hidden_act": "gelu",
|
10 |
+
"hidden_dropout_prob": 0.0,
|
11 |
+
"hidden_size": 1024,
|
12 |
+
"id2label": {
|
13 |
+
"0": "a",
|
14 |
+
"1": "b",
|
15 |
+
"2": "c",
|
16 |
+
"3": "d",
|
17 |
+
"4": "e",
|
18 |
+
"5": "f"
|
19 |
+
},
|
20 |
+
"image_size": 224,
|
21 |
+
"initializer_range": 0.02,
|
22 |
+
"intermediate_size": 4096,
|
23 |
+
"label2id": {
|
24 |
+
"a": 0,
|
25 |
+
"b": 1,
|
26 |
+
"c": 2,
|
27 |
+
"d": 3,
|
28 |
+
"e": 4,
|
29 |
+
"f": 5
|
30 |
+
},
|
31 |
+
"layer_norm_eps": 1e-12,
|
32 |
+
"model_type": "vit",
|
33 |
+
"num_attention_heads": 16,
|
34 |
+
"num_channels": 3,
|
35 |
+
"num_hidden_layers": 24,
|
36 |
+
"patch_size": 16,
|
37 |
+
"problem_type": "single_label_classification",
|
38 |
+
"qkv_bias": true,
|
39 |
+
"torch_dtype": "float32",
|
40 |
+
"transformers_version": "4.36.1"
|
41 |
+
}
|
checkpoint-5/model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fd0a29f9734bcd496263af309d84d89c0c5664d27f20a25881e215ac2a1746c8
|
3 |
+
size 1213277672
|
checkpoint-5/optimizer.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:71601208d3dbe07c2f1aea3cae0ed4c98bf9914a4beeba3095d0c5764ab4510a
|
3 |
+
size 2426779306
|
checkpoint-5/rng_state.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:39f7b53fa5ece4e04085693f35df30b668805d836c0fe10d692de6efafa98832
|
3 |
+
size 13990
|
checkpoint-5/scheduler.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e9e6cc9e2bbfd55b279092f1e7dca7d1edf31d36ea1e594195628b26a9264ad2
|
3 |
+
size 1064
|
checkpoint-5/trainer_state.json
ADDED
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"best_metric": NaN,
|
3 |
+
"best_model_checkpoint": "/tmp/model/checkpoint-5",
|
4 |
+
"epoch": 1.0,
|
5 |
+
"eval_steps": 500,
|
6 |
+
"global_step": 5,
|
7 |
+
"is_hyper_param_search": false,
|
8 |
+
"is_local_process_zero": true,
|
9 |
+
"is_world_process_zero": true,
|
10 |
+
"log_history": [
|
11 |
+
{
|
12 |
+
"epoch": 0.2,
|
13 |
+
"learning_rate": 2.5e-05,
|
14 |
+
"loss": 0.0,
|
15 |
+
"step": 1
|
16 |
+
},
|
17 |
+
{
|
18 |
+
"epoch": 0.4,
|
19 |
+
"learning_rate": 5e-05,
|
20 |
+
"loss": 0.0,
|
21 |
+
"step": 2
|
22 |
+
},
|
23 |
+
{
|
24 |
+
"epoch": 0.6,
|
25 |
+
"learning_rate": 4.615384615384616e-05,
|
26 |
+
"loss": 0.0,
|
27 |
+
"step": 3
|
28 |
+
},
|
29 |
+
{
|
30 |
+
"epoch": 0.8,
|
31 |
+
"learning_rate": 4.230769230769231e-05,
|
32 |
+
"loss": 0.0,
|
33 |
+
"step": 4
|
34 |
+
},
|
35 |
+
{
|
36 |
+
"epoch": 1.0,
|
37 |
+
"learning_rate": 3.846153846153846e-05,
|
38 |
+
"loss": 0.0,
|
39 |
+
"step": 5
|
40 |
+
},
|
41 |
+
{
|
42 |
+
"epoch": 1.0,
|
43 |
+
"eval_accuracy": 0.1111111111111111,
|
44 |
+
"eval_f1_macro": 0.03333333333333333,
|
45 |
+
"eval_f1_micro": 0.1111111111111111,
|
46 |
+
"eval_f1_weighted": 0.02222222222222222,
|
47 |
+
"eval_loss": NaN,
|
48 |
+
"eval_precision_macro": 0.018518518518518517,
|
49 |
+
"eval_precision_micro": 0.1111111111111111,
|
50 |
+
"eval_precision_weighted": 0.012345679012345678,
|
51 |
+
"eval_recall_macro": 0.16666666666666666,
|
52 |
+
"eval_recall_micro": 0.1111111111111111,
|
53 |
+
"eval_recall_weighted": 0.1111111111111111,
|
54 |
+
"eval_runtime": 9.0185,
|
55 |
+
"eval_samples_per_second": 0.998,
|
56 |
+
"eval_steps_per_second": 0.111,
|
57 |
+
"step": 5
|
58 |
+
}
|
59 |
+
],
|
60 |
+
"logging_steps": 1,
|
61 |
+
"max_steps": 15,
|
62 |
+
"num_input_tokens_seen": 0,
|
63 |
+
"num_train_epochs": 3,
|
64 |
+
"save_steps": 500,
|
65 |
+
"total_flos": 9861763782721536.0,
|
66 |
+
"train_batch_size": 8,
|
67 |
+
"trial_name": null,
|
68 |
+
"trial_params": null
|
69 |
+
}
|
checkpoint-5/training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:35649f87edcfeccdac96eeefaf034c55a9d716ea30e82babb00887765df3f942
|
3 |
+
size 4728
|
config.json
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "google/vit-large-patch16-224",
|
3 |
+
"_num_labels": 6,
|
4 |
+
"architectures": [
|
5 |
+
"ViTForImageClassification"
|
6 |
+
],
|
7 |
+
"attention_probs_dropout_prob": 0.0,
|
8 |
+
"encoder_stride": 16,
|
9 |
+
"hidden_act": "gelu",
|
10 |
+
"hidden_dropout_prob": 0.0,
|
11 |
+
"hidden_size": 1024,
|
12 |
+
"id2label": {
|
13 |
+
"0": "a",
|
14 |
+
"1": "b",
|
15 |
+
"2": "c",
|
16 |
+
"3": "d",
|
17 |
+
"4": "e",
|
18 |
+
"5": "f"
|
19 |
+
},
|
20 |
+
"image_size": 224,
|
21 |
+
"initializer_range": 0.02,
|
22 |
+
"intermediate_size": 4096,
|
23 |
+
"label2id": {
|
24 |
+
"a": 0,
|
25 |
+
"b": 1,
|
26 |
+
"c": 2,
|
27 |
+
"d": 3,
|
28 |
+
"e": 4,
|
29 |
+
"f": 5
|
30 |
+
},
|
31 |
+
"layer_norm_eps": 1e-12,
|
32 |
+
"model_type": "vit",
|
33 |
+
"num_attention_heads": 16,
|
34 |
+
"num_channels": 3,
|
35 |
+
"num_hidden_layers": 24,
|
36 |
+
"patch_size": 16,
|
37 |
+
"problem_type": "single_label_classification",
|
38 |
+
"qkv_bias": true,
|
39 |
+
"torch_dtype": "float32",
|
40 |
+
"transformers_version": "4.36.1"
|
41 |
+
}
|
model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fd0a29f9734bcd496263af309d84d89c0c5664d27f20a25881e215ac2a1746c8
|
3 |
+
size 1213277672
|
preprocessor_config.json
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"do_normalize": true,
|
3 |
+
"do_rescale": true,
|
4 |
+
"do_resize": true,
|
5 |
+
"image_mean": [
|
6 |
+
0.5,
|
7 |
+
0.5,
|
8 |
+
0.5
|
9 |
+
],
|
10 |
+
"image_processor_type": "ViTImageProcessor",
|
11 |
+
"image_std": [
|
12 |
+
0.5,
|
13 |
+
0.5,
|
14 |
+
0.5
|
15 |
+
],
|
16 |
+
"resample": 2,
|
17 |
+
"rescale_factor": 0.00392156862745098,
|
18 |
+
"size": {
|
19 |
+
"height": 224,
|
20 |
+
"width": 224
|
21 |
+
}
|
22 |
+
}
|
training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:35649f87edcfeccdac96eeefaf034c55a9d716ea30e82babb00887765df3f942
|
3 |
+
size 4728
|
training_params.json
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"data_path": "tangfei/autotrain-data-autotrain-fo40u-s5iv3",
|
3 |
+
"model": "google/vit-large-patch16-224",
|
4 |
+
"username": "tangfei",
|
5 |
+
"lr": 5e-05,
|
6 |
+
"epochs": 3,
|
7 |
+
"batch_size": 8,
|
8 |
+
"warmup_ratio": 0.1,
|
9 |
+
"gradient_accumulation": 1,
|
10 |
+
"optimizer": "adamw_torch",
|
11 |
+
"scheduler": "linear",
|
12 |
+
"weight_decay": 0.0,
|
13 |
+
"max_grad_norm": 1.0,
|
14 |
+
"seed": 42,
|
15 |
+
"train_split": "train",
|
16 |
+
"valid_split": "validation",
|
17 |
+
"logging_steps": -1,
|
18 |
+
"project_name": "/tmp/model",
|
19 |
+
"auto_find_batch_size": false,
|
20 |
+
"mixed_precision": "fp32",
|
21 |
+
"save_total_limit": 1,
|
22 |
+
"save_strategy": "epoch",
|
23 |
+
"push_to_hub": true,
|
24 |
+
"repo_id": "tangfei/autotrain-fo40u-s5iv3",
|
25 |
+
"evaluation_strategy": "epoch",
|
26 |
+
"image_column": "autotrain_image",
|
27 |
+
"target_column": "autotrain_label",
|
28 |
+
"log": "none"
|
29 |
+
}
|