End of training
Browse files- README.md +137 -0
- added_tokens.json +3 -0
- bpe.codes +0 -0
- config.json +37 -0
- model.safetensors +3 -0
- special_tokens_map.json +9 -0
- tokenizer_config.json +54 -0
- training_args.bin +3 -0
- vocab.txt +0 -0
README.md
ADDED
@@ -0,0 +1,137 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
base_model: vinai/phobert-base-v2
|
3 |
+
tags:
|
4 |
+
- generated_from_trainer
|
5 |
+
metrics:
|
6 |
+
- accuracy
|
7 |
+
- f1
|
8 |
+
model-index:
|
9 |
+
- name: PhoBert_Lexical_lc
|
10 |
+
results: []
|
11 |
+
---
|
12 |
+
|
13 |
+
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
14 |
+
should probably proofread and complete it, then remove this comment. -->
|
15 |
+
|
16 |
+
# PhoBert_Lexical_lc
|
17 |
+
|
18 |
+
This model is a fine-tuned version of [vinai/phobert-base-v2](https://huggingface.co/vinai/phobert-base-v2) on an unknown dataset.
|
19 |
+
It achieves the following results on the evaluation set:
|
20 |
+
- Loss: 0.6002
|
21 |
+
- Accuracy: 0.8324
|
22 |
+
- F1: 0.8697
|
23 |
+
|
24 |
+
## Model description
|
25 |
+
|
26 |
+
More information needed
|
27 |
+
|
28 |
+
## Intended uses & limitations
|
29 |
+
|
30 |
+
More information needed
|
31 |
+
|
32 |
+
## Training and evaluation data
|
33 |
+
|
34 |
+
More information needed
|
35 |
+
|
36 |
+
## Training procedure
|
37 |
+
|
38 |
+
### Training hyperparameters
|
39 |
+
|
40 |
+
The following hyperparameters were used during training:
|
41 |
+
- learning_rate: 2e-05
|
42 |
+
- train_batch_size: 64
|
43 |
+
- eval_batch_size: 64
|
44 |
+
- seed: 42
|
45 |
+
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
46 |
+
- lr_scheduler_type: linear
|
47 |
+
- num_epochs: 15
|
48 |
+
|
49 |
+
### Training results
|
50 |
+
|
51 |
+
| Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 |
|
52 |
+
|:-------------:|:-------:|:-----:|:---------------:|:--------:|:------:|
|
53 |
+
| No log | 0.1927 | 200 | 0.7068 | 0.6753 | 0.7632 |
|
54 |
+
| No log | 0.3854 | 400 | 0.7343 | 0.6772 | 0.7646 |
|
55 |
+
| No log | 0.5780 | 600 | 0.5764 | 0.7641 | 0.8248 |
|
56 |
+
| No log | 0.7707 | 800 | 0.7871 | 0.6153 | 0.7178 |
|
57 |
+
| No log | 0.9634 | 1000 | 0.5685 | 0.7548 | 0.8186 |
|
58 |
+
| 0.358 | 1.1561 | 1200 | 0.6231 | 0.7569 | 0.8203 |
|
59 |
+
| 0.358 | 1.3487 | 1400 | 0.5796 | 0.7737 | 0.8314 |
|
60 |
+
| 0.358 | 1.5414 | 1600 | 0.5651 | 0.7758 | 0.8327 |
|
61 |
+
| 0.358 | 1.7341 | 1800 | 0.6171 | 0.7502 | 0.8157 |
|
62 |
+
| 0.358 | 1.9268 | 2000 | 0.5711 | 0.7645 | 0.8254 |
|
63 |
+
| 0.2472 | 2.1195 | 2200 | 0.6046 | 0.7615 | 0.8235 |
|
64 |
+
| 0.2472 | 2.3121 | 2400 | 0.8503 | 0.6871 | 0.7718 |
|
65 |
+
| 0.2472 | 2.5048 | 2600 | 0.7907 | 0.7136 | 0.7908 |
|
66 |
+
| 0.2472 | 2.6975 | 2800 | 0.6425 | 0.7575 | 0.8209 |
|
67 |
+
| 0.2472 | 2.8902 | 3000 | 0.5584 | 0.8067 | 0.8530 |
|
68 |
+
| 0.2065 | 3.0829 | 3200 | 0.6602 | 0.7627 | 0.8244 |
|
69 |
+
| 0.2065 | 3.2755 | 3400 | 0.7031 | 0.7570 | 0.8206 |
|
70 |
+
| 0.2065 | 3.4682 | 3600 | 0.6166 | 0.7832 | 0.8382 |
|
71 |
+
| 0.2065 | 3.6609 | 3800 | 0.7400 | 0.7279 | 0.8008 |
|
72 |
+
| 0.2065 | 3.8536 | 4000 | 0.5337 | 0.8066 | 0.8531 |
|
73 |
+
| 0.1757 | 4.0462 | 4200 | 0.7663 | 0.7600 | 0.8227 |
|
74 |
+
| 0.1757 | 4.2389 | 4400 | 0.6286 | 0.7849 | 0.8392 |
|
75 |
+
| 0.1757 | 4.4316 | 4600 | 0.6379 | 0.8031 | 0.8511 |
|
76 |
+
| 0.1757 | 4.6243 | 4800 | 0.6865 | 0.7751 | 0.8328 |
|
77 |
+
| 0.1757 | 4.8170 | 5000 | 0.5512 | 0.8216 | 0.8629 |
|
78 |
+
| 0.1511 | 5.0096 | 5200 | 0.6118 | 0.8058 | 0.8529 |
|
79 |
+
| 0.1511 | 5.2023 | 5400 | 0.8038 | 0.7545 | 0.8191 |
|
80 |
+
| 0.1511 | 5.3950 | 5600 | 0.6799 | 0.8170 | 0.8600 |
|
81 |
+
| 0.1511 | 5.5877 | 5800 | 0.8013 | 0.7679 | 0.8282 |
|
82 |
+
| 0.1511 | 5.7803 | 6000 | 0.7806 | 0.7809 | 0.8365 |
|
83 |
+
| 0.1511 | 5.9730 | 6200 | 0.7302 | 0.7738 | 0.8320 |
|
84 |
+
| 0.129 | 6.1657 | 6400 | 0.6002 | 0.8324 | 0.8697 |
|
85 |
+
| 0.129 | 6.3584 | 6600 | 0.7237 | 0.8069 | 0.8534 |
|
86 |
+
| 0.129 | 6.5511 | 6800 | 0.7118 | 0.8072 | 0.8536 |
|
87 |
+
| 0.129 | 6.7437 | 7000 | 0.7674 | 0.7933 | 0.8447 |
|
88 |
+
| 0.129 | 6.9364 | 7200 | 0.7735 | 0.7737 | 0.8319 |
|
89 |
+
| 0.1133 | 7.1291 | 7400 | 0.6940 | 0.8152 | 0.8588 |
|
90 |
+
| 0.1133 | 7.3218 | 7600 | 0.8333 | 0.7880 | 0.8413 |
|
91 |
+
| 0.1133 | 7.5145 | 7800 | 0.7050 | 0.8016 | 0.8502 |
|
92 |
+
| 0.1133 | 7.7071 | 8000 | 0.8503 | 0.7763 | 0.8336 |
|
93 |
+
| 0.1133 | 7.8998 | 8200 | 0.8677 | 0.7734 | 0.8318 |
|
94 |
+
| 0.0964 | 8.0925 | 8400 | 0.7368 | 0.7994 | 0.8488 |
|
95 |
+
| 0.0964 | 8.2852 | 8600 | 0.7291 | 0.8161 | 0.8594 |
|
96 |
+
| 0.0964 | 8.4778 | 8800 | 0.8928 | 0.7948 | 0.8457 |
|
97 |
+
| 0.0964 | 8.6705 | 9000 | 0.9070 | 0.7799 | 0.8360 |
|
98 |
+
| 0.0964 | 8.8632 | 9200 | 0.8584 | 0.7961 | 0.8465 |
|
99 |
+
| 0.085 | 9.0559 | 9400 | 0.8249 | 0.8081 | 0.8543 |
|
100 |
+
| 0.085 | 9.2486 | 9600 | 0.8202 | 0.7929 | 0.8446 |
|
101 |
+
| 0.085 | 9.4412 | 9800 | 0.9296 | 0.7757 | 0.8332 |
|
102 |
+
| 0.085 | 9.6339 | 10000 | 0.9153 | 0.7931 | 0.8447 |
|
103 |
+
| 0.085 | 9.8266 | 10200 | 0.9087 | 0.7868 | 0.8405 |
|
104 |
+
| 0.0749 | 10.0193 | 10400 | 0.8043 | 0.8054 | 0.8526 |
|
105 |
+
| 0.0749 | 10.2119 | 10600 | 0.9692 | 0.7916 | 0.8436 |
|
106 |
+
| 0.0749 | 10.4046 | 10800 | 0.8181 | 0.8190 | 0.8614 |
|
107 |
+
| 0.0749 | 10.5973 | 11000 | 0.8767 | 0.8010 | 0.8498 |
|
108 |
+
| 0.0749 | 10.7900 | 11200 | 0.9470 | 0.7944 | 0.8455 |
|
109 |
+
| 0.0749 | 10.9827 | 11400 | 0.9699 | 0.7796 | 0.8358 |
|
110 |
+
| 0.0668 | 11.1753 | 11600 | 0.9448 | 0.7862 | 0.8402 |
|
111 |
+
| 0.0668 | 11.3680 | 11800 | 0.9925 | 0.7982 | 0.8480 |
|
112 |
+
| 0.0668 | 11.5607 | 12000 | 1.0677 | 0.7826 | 0.8378 |
|
113 |
+
| 0.0668 | 11.7534 | 12200 | 0.8985 | 0.7994 | 0.8487 |
|
114 |
+
| 0.0668 | 11.9461 | 12400 | 0.9710 | 0.7969 | 0.8471 |
|
115 |
+
| 0.0601 | 12.1387 | 12600 | 1.0032 | 0.7924 | 0.8442 |
|
116 |
+
| 0.0601 | 12.3314 | 12800 | 1.0084 | 0.7911 | 0.8432 |
|
117 |
+
| 0.0601 | 12.5241 | 13000 | 1.1361 | 0.7666 | 0.8272 |
|
118 |
+
| 0.0601 | 12.7168 | 13200 | 0.9933 | 0.7935 | 0.8449 |
|
119 |
+
| 0.0601 | 12.9094 | 13400 | 1.0405 | 0.7888 | 0.8419 |
|
120 |
+
| 0.0528 | 13.1021 | 13600 | 1.0769 | 0.7822 | 0.8375 |
|
121 |
+
| 0.0528 | 13.2948 | 13800 | 1.0596 | 0.7906 | 0.8431 |
|
122 |
+
| 0.0528 | 13.4875 | 14000 | 1.0612 | 0.7848 | 0.8393 |
|
123 |
+
| 0.0528 | 13.6802 | 14200 | 1.0330 | 0.7909 | 0.8434 |
|
124 |
+
| 0.0528 | 13.8728 | 14400 | 1.0386 | 0.7967 | 0.8471 |
|
125 |
+
| 0.0477 | 14.0655 | 14600 | 0.9948 | 0.7956 | 0.8464 |
|
126 |
+
| 0.0477 | 14.2582 | 14800 | 1.0767 | 0.7897 | 0.8425 |
|
127 |
+
| 0.0477 | 14.4509 | 15000 | 1.0176 | 0.7938 | 0.8451 |
|
128 |
+
| 0.0477 | 14.6435 | 15200 | 1.0246 | 0.7945 | 0.8456 |
|
129 |
+
| 0.0477 | 14.8362 | 15400 | 1.0230 | 0.7969 | 0.8472 |
|
130 |
+
|
131 |
+
|
132 |
+
### Framework versions
|
133 |
+
|
134 |
+
- Transformers 4.44.0
|
135 |
+
- Pytorch 2.1.2
|
136 |
+
- Datasets 2.20.0
|
137 |
+
- Tokenizers 0.19.1
|
added_tokens.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"<mask>": 64000
|
3 |
+
}
|
bpe.codes
ADDED
The diff for this file is too large to render.
See raw diff
|
|
config.json
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "vinai/phobert-base-v2",
|
3 |
+
"architectures": [
|
4 |
+
"PhoBertLexical"
|
5 |
+
],
|
6 |
+
"attention_probs_dropout_prob": 0.1,
|
7 |
+
"bos_token_id": 0,
|
8 |
+
"classifier_dropout": null,
|
9 |
+
"eos_token_id": 2,
|
10 |
+
"hidden_act": "gelu",
|
11 |
+
"hidden_dropout_prob": 0.1,
|
12 |
+
"hidden_size": 768,
|
13 |
+
"id2label": {
|
14 |
+
"0": "B\u00ecnh th\u01b0\u1eddng",
|
15 |
+
"1": "T\u00ednh nhi\u1ec7m th\u1ea5p"
|
16 |
+
},
|
17 |
+
"initializer_range": 0.02,
|
18 |
+
"intermediate_size": 3072,
|
19 |
+
"label2id": {
|
20 |
+
"B\u00ecnh th\u01b0\u1eddng": 0,
|
21 |
+
"T\u00ednh nhi\u1ec7m th\u1ea5p": 1
|
22 |
+
},
|
23 |
+
"layer_norm_eps": 1e-05,
|
24 |
+
"max_position_embeddings": 258,
|
25 |
+
"model_type": "roberta",
|
26 |
+
"num_attention_heads": 12,
|
27 |
+
"num_classes": 2,
|
28 |
+
"num_hidden_layers": 12,
|
29 |
+
"pad_token_id": 1,
|
30 |
+
"position_embedding_type": "absolute",
|
31 |
+
"tokenizer_class": "PhobertTokenizer",
|
32 |
+
"torch_dtype": "float32",
|
33 |
+
"transformers_version": "4.44.0",
|
34 |
+
"type_vocab_size": 1,
|
35 |
+
"use_cache": true,
|
36 |
+
"vocab_size": 64001
|
37 |
+
}
|
model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:92cfaf6bc775f4d8e608fa7a6af6012fc10d0ec5351afa86ee057e670e6beb50
|
3 |
+
size 538468008
|
special_tokens_map.json
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": "<s>",
|
3 |
+
"cls_token": "<s>",
|
4 |
+
"eos_token": "</s>",
|
5 |
+
"mask_token": "<mask>",
|
6 |
+
"pad_token": "<pad>",
|
7 |
+
"sep_token": "</s>",
|
8 |
+
"unk_token": "<unk>"
|
9 |
+
}
|
tokenizer_config.json
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"added_tokens_decoder": {
|
3 |
+
"0": {
|
4 |
+
"content": "<s>",
|
5 |
+
"lstrip": false,
|
6 |
+
"normalized": false,
|
7 |
+
"rstrip": false,
|
8 |
+
"single_word": false,
|
9 |
+
"special": true
|
10 |
+
},
|
11 |
+
"1": {
|
12 |
+
"content": "<pad>",
|
13 |
+
"lstrip": false,
|
14 |
+
"normalized": false,
|
15 |
+
"rstrip": false,
|
16 |
+
"single_word": false,
|
17 |
+
"special": true
|
18 |
+
},
|
19 |
+
"2": {
|
20 |
+
"content": "</s>",
|
21 |
+
"lstrip": false,
|
22 |
+
"normalized": false,
|
23 |
+
"rstrip": false,
|
24 |
+
"single_word": false,
|
25 |
+
"special": true
|
26 |
+
},
|
27 |
+
"3": {
|
28 |
+
"content": "<unk>",
|
29 |
+
"lstrip": false,
|
30 |
+
"normalized": false,
|
31 |
+
"rstrip": false,
|
32 |
+
"single_word": false,
|
33 |
+
"special": true
|
34 |
+
},
|
35 |
+
"64000": {
|
36 |
+
"content": "<mask>",
|
37 |
+
"lstrip": false,
|
38 |
+
"normalized": false,
|
39 |
+
"rstrip": false,
|
40 |
+
"single_word": false,
|
41 |
+
"special": true
|
42 |
+
}
|
43 |
+
},
|
44 |
+
"bos_token": "<s>",
|
45 |
+
"clean_up_tokenization_spaces": true,
|
46 |
+
"cls_token": "<s>",
|
47 |
+
"eos_token": "</s>",
|
48 |
+
"mask_token": "<mask>",
|
49 |
+
"model_max_length": 1000000000000000019884624838656,
|
50 |
+
"pad_token": "<pad>",
|
51 |
+
"sep_token": "</s>",
|
52 |
+
"tokenizer_class": "PhobertTokenizer",
|
53 |
+
"unk_token": "<unk>"
|
54 |
+
}
|
training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:63d05598c2ea80f70d2b1074ba6871ead6cd753125051f297d69b32271193f13
|
3 |
+
size 5176
|
vocab.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|