akseljoonas commited on
Commit
d846a64
1 Parent(s): c8a35c4

End of training

Browse files
README.md ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: cc-by-nc-sa-4.0
3
+ base_model: microsoft/layoutlmv3-base
4
+ tags:
5
+ - generated_from_trainer
6
+ datasets:
7
+ - format_dataset
8
+ metrics:
9
+ - precision
10
+ - recall
11
+ - f1
12
+ - accuracy
13
+ model-index:
14
+ - name: reciept-model-2500
15
+ results:
16
+ - task:
17
+ name: Token Classification
18
+ type: token-classification
19
+ dataset:
20
+ name: format_dataset
21
+ type: format_dataset
22
+ config: assesment dataset
23
+ split: test
24
+ args: assesment dataset
25
+ metrics:
26
+ - name: Precision
27
+ type: precision
28
+ value: 0.9673366834170855
29
+ - name: Recall
30
+ type: recall
31
+ value: 0.9625
32
+ - name: F1
33
+ type: f1
34
+ value: 0.9649122807017544
35
+ - name: Accuracy
36
+ type: accuracy
37
+ value: 0.9993105033325672
38
+ ---
39
+
40
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
41
+ should probably proofread and complete it, then remove this comment. -->
42
+
43
+ # reciept-model-2500
44
+
45
+ This model is a fine-tuned version of [microsoft/layoutlmv3-base](https://huggingface.co/microsoft/layoutlmv3-base) on the format_dataset dataset.
46
+ It achieves the following results on the evaluation set:
47
+ - Loss: 0.0043
48
+ - Precision: 0.9673
49
+ - Recall: 0.9625
50
+ - F1: 0.9649
51
+ - Accuracy: 0.9993
52
+
53
+ ## Model description
54
+
55
+ More information needed
56
+
57
+ ## Intended uses & limitations
58
+
59
+ More information needed
60
+
61
+ ## Training and evaluation data
62
+
63
+ More information needed
64
+
65
+ ## Training procedure
66
+
67
+ ### Training hyperparameters
68
+
69
+ The following hyperparameters were used during training:
70
+ - learning_rate: 1e-05
71
+ - train_batch_size: 2
72
+ - eval_batch_size: 2
73
+ - seed: 42
74
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
75
+ - lr_scheduler_type: linear
76
+ - training_steps: 2500
77
+
78
+ ### Training results
79
+
80
+ | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy |
81
+ |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:|
82
+ | No log | 0.62 | 100 | 0.0150 | 0.8575 | 0.8725 | 0.8649 | 0.9972 |
83
+ | No log | 1.25 | 200 | 0.0075 | 0.8756 | 0.9325 | 0.9031 | 0.9979 |
84
+ | No log | 1.88 | 300 | 0.0154 | 0.8744 | 0.8875 | 0.8809 | 0.9973 |
85
+ | No log | 2.5 | 400 | 0.0118 | 0.8881 | 0.9525 | 0.9192 | 0.9982 |
86
+ | 0.0029 | 3.12 | 500 | 0.0091 | 0.9158 | 0.925 | 0.9204 | 0.9983 |
87
+ | 0.0029 | 3.75 | 600 | 0.0167 | 0.8720 | 0.9025 | 0.8870 | 0.9975 |
88
+ | 0.0029 | 4.38 | 700 | 0.0092 | 0.9183 | 0.9275 | 0.9229 | 0.9983 |
89
+ | 0.0029 | 5.0 | 800 | 0.0113 | 0.8843 | 0.9175 | 0.9006 | 0.9979 |
90
+ | 0.0029 | 5.62 | 900 | 0.0106 | 0.9349 | 0.8975 | 0.9158 | 0.9982 |
91
+ | 0.0017 | 6.25 | 1000 | 0.0043 | 0.9673 | 0.9625 | 0.9649 | 0.9993 |
92
+ | 0.0017 | 6.88 | 1100 | 0.0044 | 0.9602 | 0.965 | 0.9626 | 0.9993 |
93
+ | 0.0017 | 7.5 | 1200 | 0.0118 | 0.9246 | 0.92 | 0.9223 | 0.9982 |
94
+ | 0.0017 | 8.12 | 1300 | 0.0067 | 0.9406 | 0.95 | 0.9453 | 0.9988 |
95
+ | 0.0017 | 8.75 | 1400 | 0.0083 | 0.9409 | 0.955 | 0.9479 | 0.9989 |
96
+ | 0.001 | 9.38 | 1500 | 0.0060 | 0.9495 | 0.94 | 0.9447 | 0.9988 |
97
+ | 0.001 | 10.0 | 1600 | 0.0078 | 0.9369 | 0.9275 | 0.9322 | 0.9985 |
98
+ | 0.001 | 10.62 | 1700 | 0.0093 | 0.9248 | 0.9525 | 0.9384 | 0.9986 |
99
+ | 0.001 | 11.25 | 1800 | 0.0097 | 0.9062 | 0.9425 | 0.9240 | 0.9983 |
100
+ | 0.001 | 11.88 | 1900 | 0.0100 | 0.9098 | 0.9325 | 0.9210 | 0.9982 |
101
+ | 0.0006 | 12.5 | 2000 | 0.0111 | 0.9113 | 0.925 | 0.9181 | 0.9981 |
102
+ | 0.0006 | 13.12 | 2100 | 0.0107 | 0.9275 | 0.9275 | 0.9275 | 0.9983 |
103
+ | 0.0006 | 13.75 | 2200 | 0.0105 | 0.9279 | 0.9325 | 0.9302 | 0.9984 |
104
+ | 0.0006 | 14.38 | 2300 | 0.0109 | 0.9325 | 0.9325 | 0.9325 | 0.9985 |
105
+ | 0.0006 | 15.0 | 2400 | 0.0109 | 0.9325 | 0.9325 | 0.9325 | 0.9985 |
106
+ | 0.0003 | 15.62 | 2500 | 0.0109 | 0.9325 | 0.9325 | 0.9325 | 0.9985 |
107
+
108
+
109
+ ### Framework versions
110
+
111
+ - Transformers 4.38.0.dev0
112
+ - Pytorch 2.1.0+cu121
113
+ - Datasets 2.16.1
114
+ - Tokenizers 0.15.1
config.json ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "microsoft/layoutlmv3-base",
3
+ "architectures": [
4
+ "LayoutLMv3ForTokenClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "classifier_dropout": null,
9
+ "coordinate_size": 128,
10
+ "eos_token_id": 2,
11
+ "has_relative_attention_bias": true,
12
+ "has_spatial_attention_bias": true,
13
+ "hidden_act": "gelu",
14
+ "hidden_dropout_prob": 0.1,
15
+ "hidden_size": 768,
16
+ "id2label": {
17
+ "0": "Ignore",
18
+ "1": "B-total_amount",
19
+ "2": "I-total_amount"
20
+ },
21
+ "initializer_range": 0.02,
22
+ "input_size": 224,
23
+ "intermediate_size": 3072,
24
+ "label2id": {
25
+ "B-total_amount": 1,
26
+ "I-total_amount": 2,
27
+ "Ignore": 0
28
+ },
29
+ "layer_norm_eps": 1e-05,
30
+ "max_2d_position_embeddings": 1024,
31
+ "max_position_embeddings": 514,
32
+ "max_rel_2d_pos": 256,
33
+ "max_rel_pos": 128,
34
+ "model_type": "layoutlmv3",
35
+ "num_attention_heads": 12,
36
+ "num_channels": 3,
37
+ "num_hidden_layers": 12,
38
+ "pad_token_id": 1,
39
+ "patch_size": 16,
40
+ "rel_2d_pos_bins": 64,
41
+ "rel_pos_bins": 32,
42
+ "second_input_size": 112,
43
+ "shape_size": 128,
44
+ "text_embed": true,
45
+ "torch_dtype": "float32",
46
+ "transformers_version": "4.38.0.dev0",
47
+ "type_vocab_size": 1,
48
+ "visual_embed": true,
49
+ "vocab_size": 50265
50
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:204d9049f51ebfaba83eef77c268d78f9630657e40dbcdee4a0b2c8010f199b1
3
+ size 501343228
preprocessor_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "apply_ocr": false,
3
+ "do_normalize": true,
4
+ "do_rescale": true,
5
+ "do_resize": true,
6
+ "image_mean": [
7
+ 0.5,
8
+ 0.5,
9
+ 0.5
10
+ ],
11
+ "image_processor_type": "LayoutLMv3ImageProcessor",
12
+ "image_std": [
13
+ 0.5,
14
+ 0.5,
15
+ 0.5
16
+ ],
17
+ "ocr_lang": null,
18
+ "processor_class": "LayoutLMv3Processor",
19
+ "resample": 2,
20
+ "rescale_factor": 0.00392156862745098,
21
+ "size": {
22
+ "height": 224,
23
+ "width": 224
24
+ },
25
+ "tesseract_config": ""
26
+ }
runs/Feb05_15-24-58_b6fc4add67b8/events.out.tfevents.1707146704.b6fc4add67b8.155.4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9a24fdf0c33d2960dfcd1850545cbd97ff0e1feb857d6ea5827a26cc00b07dcb
3
+ size 17899
runs/Feb05_15-24-58_b6fc4add67b8/events.out.tfevents.1707148705.b6fc4add67b8.155.5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e4fc8d3d67dd85c1cd876f8c6f4237ca60dd036353bab7e50b8a223bfbda1454
3
+ size 560
special_tokens_map.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "cls_token": {
10
+ "content": "<s>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "eos_token": {
17
+ "content": "</s>",
18
+ "lstrip": false,
19
+ "normalized": true,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "mask_token": {
24
+ "content": "<mask>",
25
+ "lstrip": true,
26
+ "normalized": true,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "pad_token": {
31
+ "content": "<pad>",
32
+ "lstrip": false,
33
+ "normalized": true,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ },
37
+ "sep_token": {
38
+ "content": "</s>",
39
+ "lstrip": false,
40
+ "normalized": true,
41
+ "rstrip": false,
42
+ "single_word": false
43
+ },
44
+ "unk_token": {
45
+ "content": "<unk>",
46
+ "lstrip": false,
47
+ "normalized": true,
48
+ "rstrip": false,
49
+ "single_word": false
50
+ }
51
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": true,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "<s>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "1": {
13
+ "content": "<pad>",
14
+ "lstrip": false,
15
+ "normalized": true,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "2": {
21
+ "content": "</s>",
22
+ "lstrip": false,
23
+ "normalized": true,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "3": {
29
+ "content": "<unk>",
30
+ "lstrip": false,
31
+ "normalized": true,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ },
36
+ "50264": {
37
+ "content": "<mask>",
38
+ "lstrip": true,
39
+ "normalized": true,
40
+ "rstrip": false,
41
+ "single_word": false,
42
+ "special": true
43
+ }
44
+ },
45
+ "apply_ocr": false,
46
+ "bos_token": "<s>",
47
+ "clean_up_tokenization_spaces": true,
48
+ "cls_token": "<s>",
49
+ "cls_token_box": [
50
+ 0,
51
+ 0,
52
+ 0,
53
+ 0
54
+ ],
55
+ "eos_token": "</s>",
56
+ "errors": "replace",
57
+ "mask_token": "<mask>",
58
+ "model_max_length": 512,
59
+ "only_label_first_subword": true,
60
+ "pad_token": "<pad>",
61
+ "pad_token_box": [
62
+ 0,
63
+ 0,
64
+ 0,
65
+ 0
66
+ ],
67
+ "pad_token_label": -100,
68
+ "processor_class": "LayoutLMv3Processor",
69
+ "sep_token": "</s>",
70
+ "sep_token_box": [
71
+ 0,
72
+ 0,
73
+ 0,
74
+ 0
75
+ ],
76
+ "tokenizer_class": "LayoutLMv3Tokenizer",
77
+ "trim_offsets": true,
78
+ "unk_token": "<unk>"
79
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c0e95e9756a122371c0b59c24ba36af03ec91294133c771320b1594e798c5e1b
3
+ size 4728
vocab.json ADDED
The diff for this file is too large to render. See raw diff