jenslemmens commited on
Commit
dcb555b
1 Parent(s): b0bd7e0

Training in progress, epoch 0

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ base_model: xlm-roberta-base
4
+ tags:
5
+ - generated_from_trainer
6
+ model-index:
7
+ - name: dutch_genre_classifier
8
+ results: []
9
+ ---
10
+
11
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
12
+ should probably proofread and complete it, then remove this comment. -->
13
+
14
+ # dutch_genre_classifier
15
+
16
+ This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on an unknown dataset.
17
+
18
+ ## Model description
19
+
20
+ More information needed
21
+
22
+ ## Intended uses & limitations
23
+
24
+ More information needed
25
+
26
+ ## Training and evaluation data
27
+
28
+ More information needed
29
+
30
+ ## Training procedure
31
+
32
+ ### Training hyperparameters
33
+
34
+ The following hyperparameters were used during training:
35
+ - learning_rate: 3e-05
36
+ - train_batch_size: 8
37
+ - eval_batch_size: 8
38
+ - seed: 42
39
+ - gradient_accumulation_steps: 2
40
+ - total_train_batch_size: 16
41
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
42
+ - lr_scheduler_type: linear
43
+ - lr_scheduler_warmup_ratio: 0.06
44
+ - num_epochs: 2.0
45
+ - mixed_precision_training: Native AMP
46
+
47
+ ### Training results
48
+
49
+
50
+
51
+ ### Framework versions
52
+
53
+ - Transformers 4.36.2
54
+ - Pytorch 2.1.2+cu121
55
+ - Datasets 2.16.1
56
+ - Tokenizers 0.15.0
all_results.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.0,
3
+ "eval_fscore": 0.9706675849357428,
4
+ "eval_loss": 0.14040668308734894,
5
+ "eval_precision": 0.971255206348869,
6
+ "eval_recall": 0.9707142857142858,
7
+ "eval_runtime": 9.1586,
8
+ "eval_samples": 1400,
9
+ "eval_samples_per_second": 152.862,
10
+ "eval_steps_per_second": 19.108,
11
+ "train_loss": 0.23042542536496113,
12
+ "train_runtime": 543.7059,
13
+ "train_samples": 11415,
14
+ "train_samples_per_second": 41.99,
15
+ "train_steps_per_second": 2.623
16
+ }
config.json ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "xlm-roberta-base",
3
+ "architectures": [
4
+ "XLMRobertaForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "classifier_dropout": null,
9
+ "eos_token_id": 2,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 768,
13
+ "id2label": {
14
+ "0": "LABEL_0",
15
+ "1": "LABEL_1",
16
+ "2": "LABEL_2",
17
+ "3": "LABEL_3",
18
+ "4": "LABEL_4",
19
+ "5": "LABEL_5",
20
+ "6": "LABEL_6"
21
+ },
22
+ "initializer_range": 0.02,
23
+ "intermediate_size": 3072,
24
+ "label2id": {
25
+ "essay": 0,
26
+ "facebook": 1,
27
+ "literature": 2,
28
+ "news": 3,
29
+ "parliament": 4,
30
+ "review": 5,
31
+ "tweet": 6
32
+ },
33
+ "layer_norm_eps": 1e-05,
34
+ "max_position_embeddings": 514,
35
+ "model_type": "xlm-roberta",
36
+ "num_attention_heads": 12,
37
+ "num_hidden_layers": 12,
38
+ "output_past": true,
39
+ "pad_token_id": 1,
40
+ "position_embedding_type": "absolute",
41
+ "problem_type": "single_label_classification",
42
+ "torch_dtype": "float32",
43
+ "transformers_version": "4.36.2",
44
+ "type_vocab_size": 1,
45
+ "use_cache": true,
46
+ "vocab_size": 250002
47
+ }
eval_results.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.0,
3
+ "eval_fscore": 0.9706675849357428,
4
+ "eval_loss": 0.14040668308734894,
5
+ "eval_precision": 0.971255206348869,
6
+ "eval_recall": 0.9707142857142858,
7
+ "eval_runtime": 9.1586,
8
+ "eval_samples": 1400,
9
+ "eval_samples_per_second": 152.862,
10
+ "eval_steps_per_second": 19.108
11
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6324287db9f024b0b16b5f14604a95fc00868a83aabc2363ae02f2ad9a70148b
3
+ size 1112220388
special_tokens_map.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "cls_token": "<s>",
4
+ "eos_token": "</s>",
5
+ "mask_token": {
6
+ "content": "<mask>",
7
+ "lstrip": true,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "pad_token": "<pad>",
13
+ "sep_token": "</s>",
14
+ "unk_token": "<unk>"
15
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f59925fcb90c92b894cb93e51bb9b4a6105c5c249fe54ce1c704420ac39b81af
3
+ size 17082756
tokenizer_config.json ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<s>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "<pad>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "</s>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "<unk>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "250001": {
36
+ "content": "<mask>",
37
+ "lstrip": true,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "bos_token": "<s>",
45
+ "clean_up_tokenization_spaces": true,
46
+ "cls_token": "<s>",
47
+ "eos_token": "</s>",
48
+ "mask_token": "<mask>",
49
+ "model_max_length": 512,
50
+ "pad_token": "<pad>",
51
+ "sep_token": "</s>",
52
+ "tokenizer_class": "XLMRobertaTokenizer",
53
+ "unk_token": "<unk>"
54
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.0,
3
+ "train_loss": 0.23042542536496113,
4
+ "train_runtime": 543.7059,
5
+ "train_samples": 11415,
6
+ "train_samples_per_second": 41.99,
7
+ "train_steps_per_second": 2.623
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 1.9985984583041345,
5
+ "eval_steps": 500,
6
+ "global_step": 1426,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.7,
13
+ "learning_rate": 2.082089552238806e-05,
14
+ "loss": 0.481,
15
+ "step": 500
16
+ },
17
+ {
18
+ "epoch": 1.4,
19
+ "learning_rate": 9.62686567164179e-06,
20
+ "loss": 0.1133,
21
+ "step": 1000
22
+ },
23
+ {
24
+ "epoch": 2.0,
25
+ "step": 1426,
26
+ "total_flos": 6003148215168000.0,
27
+ "train_loss": 0.23042542536496113,
28
+ "train_runtime": 543.7059,
29
+ "train_samples_per_second": 41.99,
30
+ "train_steps_per_second": 2.623
31
+ }
32
+ ],
33
+ "logging_steps": 500,
34
+ "max_steps": 1426,
35
+ "num_input_tokens_seen": 0,
36
+ "num_train_epochs": 2,
37
+ "save_steps": 500,
38
+ "total_flos": 6003148215168000.0,
39
+ "train_batch_size": 8,
40
+ "trial_name": null,
41
+ "trial_params": null
42
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:066a10426346ff382cd6123ed886c9943d0e8da28828bcf7b4f96354e584f864
3
+ size 4856