hossay commited on
Commit
aa26950
·
1 Parent(s): 948eb34

🍻 cheers

Browse files
README.md ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: google/vit-base-patch16-224-in21k
4
+ tags:
5
+ - image-classification
6
+ - generated_from_trainer
7
+ datasets:
8
+ - generator
9
+ metrics:
10
+ - accuracy
11
+ - f1
12
+ model-index:
13
+ - name: stool-condition-classification
14
+ results:
15
+ - task:
16
+ name: Image Classification
17
+ type: image-classification
18
+ dataset:
19
+ name: stool-image
20
+ type: generator
21
+ config: default
22
+ split: train
23
+ args: default
24
+ metrics:
25
+ - name: Accuracy
26
+ type: accuracy
27
+ value: 0.8580527752502275
28
+ - name: F1
29
+ type: f1
30
+ value: 0.8173302107728336
31
+ ---
32
+
33
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
34
+ should probably proofread and complete it, then remove this comment. -->
35
+
36
+ # stool-condition-classification
37
+
38
+ This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the stool-image dataset.
39
+ It achieves the following results on the evaluation set:
40
+ - Loss: 0.3669
41
+ - Auroc: 0.9121
42
+ - Accuracy: 0.8581
43
+ - Sensitivity: 0.7756
44
+ - Specificty: 0.9153
45
+ - F1: 0.8173
46
+
47
+ ## Model description
48
+
49
+ More information needed
50
+
51
+ ## Intended uses & limitations
52
+
53
+ More information needed
54
+
55
+ ## Training and evaluation data
56
+
57
+ More information needed
58
+
59
+ ## Training procedure
60
+
61
+ ### Training hyperparameters
62
+
63
+ The following hyperparameters were used during training:
64
+ - learning_rate: 0.0002
65
+ - train_batch_size: 16
66
+ - eval_batch_size: 8
67
+ - seed: 42
68
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
69
+ - lr_scheduler_type: linear
70
+ - num_epochs: 1
71
+ - mixed_precision_training: Native AMP
72
+
73
+ ### Training results
74
+
75
+ | Training Loss | Epoch | Step | Validation Loss | Auroc | Accuracy | Sensitivity | Specificty | F1 |
76
+ |:-------------:|:-----:|:----:|:---------------:|:------:|:--------:|:-----------:|:----------:|:------:|
77
+ | 0.4071 | 0.98 | 100 | 0.4415 | 0.8876 | 0.8179 | 0.6629 | 0.9552 | 0.7738 |
78
+
79
+
80
+ ### Framework versions
81
+
82
+ - Transformers 4.36.1
83
+ - Pytorch 2.0.1
84
+ - Datasets 2.15.0
85
+ - Tokenizers 0.15.0
all_results.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "eval_accuracy": 0.8580527752502275,
4
+ "eval_auroc": 0.912140044512926,
5
+ "eval_f1": 0.8173302107728336,
6
+ "eval_loss": 0.366916298866272,
7
+ "eval_runtime": 68.1665,
8
+ "eval_samples_per_second": 16.122,
9
+ "eval_sensitivity": 0.7755555555555556,
10
+ "eval_specificty": 0.9152542372881356,
11
+ "eval_steps_per_second": 2.024,
12
+ "train_loss": 0.4936492688515607,
13
+ "train_runtime": 82.0698,
14
+ "train_samples_per_second": 19.8,
15
+ "train_steps_per_second": 1.243
16
+ }
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "./vit-base-UCEIS-strong-aug/checkpoint-2000",
3
  "architectures": [
4
  "ViTForImageClassification"
5
  ],
@@ -9,8 +9,8 @@
9
  "hidden_dropout_prob": 0.0,
10
  "hidden_size": 768,
11
  "id2label": {
12
- "0": "Normal",
13
- "1": "Abnormal"
14
  },
15
  "image_size": 224,
16
  "initializer_range": 0.02,
 
1
  {
2
+ "_name_or_path": "google/vit-base-patch16-224-in21k",
3
  "architectures": [
4
  "ViTForImageClassification"
5
  ],
 
9
  "hidden_dropout_prob": 0.0,
10
  "hidden_size": 768,
11
  "id2label": {
12
+ "0": "0",
13
+ "1": "1"
14
  },
15
  "image_size": 224,
16
  "initializer_range": 0.02,
eval_results.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "eval_accuracy": 0.8580527752502275,
4
+ "eval_auroc": 0.912140044512926,
5
+ "eval_f1": 0.8173302107728336,
6
+ "eval_loss": 0.366916298866272,
7
+ "eval_runtime": 68.1665,
8
+ "eval_samples_per_second": 16.122,
9
+ "eval_sensitivity": 0.7755555555555556,
10
+ "eval_specificty": 0.9152542372881356,
11
+ "eval_steps_per_second": 2.024
12
+ }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7eff3b0a5fe7e6eb0b633bd5ee7de7b8970ed276a00429d75c84e7e149f0b9cc
3
  size 343223968
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:67e325baa94230e86d326557616c9574e14bc969ba434562d3a52bc97b6bbe6a
3
  size 343223968
runs/Jan05_17-12-10_DESKTOP-BDBS5RV/events.out.tfevents.1704442331.DESKTOP-BDBS5RV ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f2c397fcf5e38bf12ed1ead4460427d233c83b4f80fc3400763b76d9140ff17a
3
+ size 6732
runs/Jan05_17-12-10_DESKTOP-BDBS5RV/events.out.tfevents.1704442481.DESKTOP-BDBS5RV ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a81b132f789c2ad0ed1bbcf06af19bfedcb7cab2578529e47b56f2e50bd6d3ad
3
+ size 338
train_results.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "train_loss": 0.4936492688515607,
4
+ "train_runtime": 82.0698,
5
+ "train_samples_per_second": 19.8,
6
+ "train_steps_per_second": 1.243
7
+ }
trainer_state.json ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.8875705740958131,
3
+ "best_model_checkpoint": "./stool-condition-classification\\checkpoint-100",
4
+ "epoch": 1.0,
5
+ "eval_steps": 100,
6
+ "global_step": 102,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.1,
13
+ "learning_rate": 0.0001803921568627451,
14
+ "loss": 0.5897,
15
+ "step": 10
16
+ },
17
+ {
18
+ "epoch": 0.2,
19
+ "learning_rate": 0.00016078431372549022,
20
+ "loss": 0.479,
21
+ "step": 20
22
+ },
23
+ {
24
+ "epoch": 0.29,
25
+ "learning_rate": 0.0001411764705882353,
26
+ "loss": 0.5314,
27
+ "step": 30
28
+ },
29
+ {
30
+ "epoch": 0.39,
31
+ "learning_rate": 0.00012156862745098039,
32
+ "loss": 0.5547,
33
+ "step": 40
34
+ },
35
+ {
36
+ "epoch": 0.49,
37
+ "learning_rate": 0.00010196078431372549,
38
+ "loss": 0.4872,
39
+ "step": 50
40
+ },
41
+ {
42
+ "epoch": 0.59,
43
+ "learning_rate": 8.23529411764706e-05,
44
+ "loss": 0.505,
45
+ "step": 60
46
+ },
47
+ {
48
+ "epoch": 0.69,
49
+ "learning_rate": 6.274509803921569e-05,
50
+ "loss": 0.5257,
51
+ "step": 70
52
+ },
53
+ {
54
+ "epoch": 0.78,
55
+ "learning_rate": 4.313725490196079e-05,
56
+ "loss": 0.4404,
57
+ "step": 80
58
+ },
59
+ {
60
+ "epoch": 0.88,
61
+ "learning_rate": 2.3529411764705884e-05,
62
+ "loss": 0.4018,
63
+ "step": 90
64
+ },
65
+ {
66
+ "epoch": 0.98,
67
+ "learning_rate": 3.92156862745098e-06,
68
+ "loss": 0.4071,
69
+ "step": 100
70
+ },
71
+ {
72
+ "epoch": 0.98,
73
+ "eval_accuracy": 0.8179419525065963,
74
+ "eval_auroc": 0.8875705740958131,
75
+ "eval_f1": 0.7737704918032786,
76
+ "eval_loss": 0.44151929020881653,
77
+ "eval_runtime": 19.7299,
78
+ "eval_samples_per_second": 19.209,
79
+ "eval_sensitivity": 0.6629213483146067,
80
+ "eval_specificty": 0.9552238805970149,
81
+ "eval_steps_per_second": 2.433,
82
+ "step": 100
83
+ },
84
+ {
85
+ "epoch": 1.0,
86
+ "step": 102,
87
+ "total_flos": 1.25924483123712e+17,
88
+ "train_loss": 0.4936492688515607,
89
+ "train_runtime": 82.0698,
90
+ "train_samples_per_second": 19.8,
91
+ "train_steps_per_second": 1.243
92
+ }
93
+ ],
94
+ "logging_steps": 10,
95
+ "max_steps": 102,
96
+ "num_input_tokens_seen": 0,
97
+ "num_train_epochs": 1,
98
+ "save_steps": 100,
99
+ "total_flos": 1.25924483123712e+17,
100
+ "train_batch_size": 16,
101
+ "trial_name": null,
102
+ "trial_params": null
103
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bd4157da5d8d9f13bebf7070514c0e1f61d4b4e845588f68818e3014effdf48d
3
+ size 4283