nguyenkhoa commited on
Commit
19947eb
1 Parent(s): 71f1073

Model save

Browse files
README.md ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - generated_from_trainer
4
+ model-index:
5
+ - name: LongBartRecipe1M_with_title_v3
6
+ results: []
7
+ ---
8
+
9
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
10
+ should probably proofread and complete it, then remove this comment. -->
11
+
12
+ # LongBartRecipe1M_with_title_v3
13
+
14
+ This model was trained from scratch on an unknown dataset.
15
+
16
+ ## Model description
17
+
18
+ More information needed
19
+
20
+ ## Intended uses & limitations
21
+
22
+ More information needed
23
+
24
+ ## Training and evaluation data
25
+
26
+ More information needed
27
+
28
+ ## Training procedure
29
+
30
+ ### Training hyperparameters
31
+
32
+ The following hyperparameters were used during training:
33
+ - learning_rate: 5e-05
34
+ - train_batch_size: 8
35
+ - eval_batch_size: 8
36
+ - seed: 42
37
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
38
+ - lr_scheduler_type: linear
39
+ - num_epochs: 1
40
+ - mixed_precision_training: Native AMP
41
+
42
+ ### Framework versions
43
+
44
+ - Transformers 4.41.2
45
+ - Pytorch 2.1.2
46
+ - Datasets 2.19.2
47
+ - Tokenizers 0.19.1
config.json ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "Longformer_bart_model",
3
+ "activation_dropout": 0.1,
4
+ "activation_function": "gelu",
5
+ "add_bias_logits": false,
6
+ "add_final_layer_norm": false,
7
+ "architectures": [
8
+ "LongformerEncoderBARTDecoderForConditionalGeneration"
9
+ ],
10
+ "attention_dilation": [
11
+ 1,
12
+ 1,
13
+ 1,
14
+ 1,
15
+ 1,
16
+ 1,
17
+ 1,
18
+ 1,
19
+ 1,
20
+ 1,
21
+ 1,
22
+ 1
23
+ ],
24
+ "attention_dropout": 0.1,
25
+ "attention_mode": "sliding_chunks",
26
+ "attention_probs_dropout_prob": 0.1,
27
+ "attention_window": [
28
+ 512,
29
+ 512,
30
+ 512,
31
+ 512,
32
+ 512,
33
+ 512,
34
+ 512,
35
+ 512,
36
+ 512,
37
+ 512,
38
+ 512,
39
+ 512
40
+ ],
41
+ "autoregressive": false,
42
+ "bos_token_id": 0,
43
+ "classif_dropout": 0.1,
44
+ "classifier_dropout": 0.0,
45
+ "d_model": 1024,
46
+ "decoder_attention_heads": 16,
47
+ "decoder_ffn_dim": 4096,
48
+ "decoder_layerdrop": 0.0,
49
+ "decoder_layers": 12,
50
+ "decoder_start_token_id": 2,
51
+ "dropout": 0.1,
52
+ "early_stopping": true,
53
+ "encoder_attention_heads": 16,
54
+ "encoder_ffn_dim": 4096,
55
+ "encoder_layerdrop": 0.0,
56
+ "encoder_layers": 12,
57
+ "eos_token_id": 2,
58
+ "forced_bos_token_id": 0,
59
+ "forced_eos_token_id": 2,
60
+ "gradient_checkpointing": false,
61
+ "id2label": {
62
+ "0": "LABEL_0",
63
+ "1": "LABEL_1",
64
+ "2": "LABEL_2"
65
+ },
66
+ "init_std": 0.02,
67
+ "is_encoder_decoder": true,
68
+ "label2id": {
69
+ "LABEL_0": 0,
70
+ "LABEL_1": 1,
71
+ "LABEL_2": 2
72
+ },
73
+ "max_decoder_position_embeddings": 1024,
74
+ "max_encoder_position_embeddings": 1024,
75
+ "max_position_embeddings": 1024,
76
+ "max_seq_len": 1024,
77
+ "model_type": "bart",
78
+ "no_repeat_ngram_size": 3,
79
+ "normalize_before": false,
80
+ "num_beams": 4,
81
+ "num_hidden_layers": 12,
82
+ "pad_token_id": 1,
83
+ "scale_embedding": false,
84
+ "task_specific_params": {
85
+ "summarization": {
86
+ "length_penalty": 1.0,
87
+ "max_length": 128,
88
+ "min_length": 12,
89
+ "num_beams": 4
90
+ },
91
+ "summarization_cnn": {
92
+ "length_penalty": 2.0,
93
+ "max_length": 142,
94
+ "min_length": 56,
95
+ "num_beams": 4
96
+ },
97
+ "summarization_xsum": {
98
+ "length_penalty": 1.0,
99
+ "max_length": 62,
100
+ "min_length": 11,
101
+ "num_beams": 6
102
+ }
103
+ },
104
+ "torch_dtype": "float32",
105
+ "transformers_version": "4.41.2",
106
+ "use_cache": true,
107
+ "vocab_size": 50265
108
+ }
generation_config.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 0,
4
+ "decoder_start_token_id": 2,
5
+ "early_stopping": true,
6
+ "eos_token_id": 2,
7
+ "forced_bos_token_id": 0,
8
+ "forced_eos_token_id": 2,
9
+ "no_repeat_ngram_size": 3,
10
+ "num_beams": 4,
11
+ "pad_token_id": 1,
12
+ "transformers_version": "4.41.2"
13
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:774639982c36ff16e896adc9117932bd145d46a6726c8023dbfca80b28f52216
3
+ size 1776581308
runs/Aug23_08-58-15_5f18c8752608/events.out.tfevents.1724403495.5f18c8752608.24.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e239b72c1816ceff6d3f49bc8911a19fb65a9ab758c375945c420b2990577a22
3
+ size 7797
special_tokens_map.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "cls_token": {
10
+ "content": "<s>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "eos_token": {
17
+ "content": "</s>",
18
+ "lstrip": false,
19
+ "normalized": true,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "mask_token": {
24
+ "content": "<mask>",
25
+ "lstrip": true,
26
+ "normalized": true,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "pad_token": {
31
+ "content": "<pad>",
32
+ "lstrip": false,
33
+ "normalized": true,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ },
37
+ "sep_token": {
38
+ "content": "</s>",
39
+ "lstrip": false,
40
+ "normalized": true,
41
+ "rstrip": false,
42
+ "single_word": false
43
+ },
44
+ "unk_token": {
45
+ "content": "<unk>",
46
+ "lstrip": false,
47
+ "normalized": true,
48
+ "rstrip": false,
49
+ "single_word": false
50
+ }
51
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "<s>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "1": {
13
+ "content": "<pad>",
14
+ "lstrip": false,
15
+ "normalized": true,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "2": {
21
+ "content": "</s>",
22
+ "lstrip": false,
23
+ "normalized": true,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "3": {
29
+ "content": "<unk>",
30
+ "lstrip": false,
31
+ "normalized": true,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ },
36
+ "50264": {
37
+ "content": "<mask>",
38
+ "lstrip": true,
39
+ "normalized": true,
40
+ "rstrip": false,
41
+ "single_word": false,
42
+ "special": true
43
+ }
44
+ },
45
+ "bos_token": "<s>",
46
+ "clean_up_tokenization_spaces": true,
47
+ "cls_token": "<s>",
48
+ "eos_token": "</s>",
49
+ "errors": "replace",
50
+ "mask_token": "<mask>",
51
+ "model_max_length": 1024,
52
+ "pad_token": "<pad>",
53
+ "sep_token": "</s>",
54
+ "tokenizer_class": "BartTokenizer",
55
+ "trim_offsets": true,
56
+ "truncated": true,
57
+ "unk_token": "<unk>"
58
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ffe451b05d8a0ef8485ad5d8c8d389899c1576ccb2846fad85fe23d6b0683fb
3
+ size 5176
vocab.json ADDED
The diff for this file is too large to render. See raw diff