rishavranaut
commited on
Commit
•
fbb9e1d
1
Parent(s):
4b71894
End of training
Browse files- README.md +10 -15
- config.json +0 -8
- logs/events.out.tfevents.1722715035.iit-p +3 -0
- logs/events.out.tfevents.1722715551.iit-p +3 -0
- logs/events.out.tfevents.1722715603.iit-p +3 -0
- logs/events.out.tfevents.1722717723.iit-p +3 -0
- model.safetensors +1 -1
- runs/Aug04_01-13-08_iit-p/events.out.tfevents.1722714191.iit-p +3 -0
- runs/Jul28_08-51-33_iit-p/events.out.tfevents.1722143137.iit-p +3 -0
- training_args.bin +1 -1
README.md
CHANGED
@@ -3,8 +3,6 @@ license: apache-2.0
|
|
3 |
base_model: google/flan-t5-base
|
4 |
tags:
|
5 |
- generated_from_trainer
|
6 |
-
metrics:
|
7 |
-
- accuracy
|
8 |
model-index:
|
9 |
- name: flanT5_base_Task2_Fact_updates
|
10 |
results: []
|
@@ -17,11 +15,7 @@ should probably proofread and complete it, then remove this comment. -->
|
|
17 |
|
18 |
This model is a fine-tuned version of [google/flan-t5-base](https://huggingface.co/google/flan-t5-base) on the None dataset.
|
19 |
It achieves the following results on the evaluation set:
|
20 |
-
- Loss: 0.
|
21 |
-
- Accuracy: 0.9102
|
22 |
-
- Weighted F1: 0.9102
|
23 |
-
- Micro F1: 0.9102
|
24 |
-
- Macro F1: 0.9102
|
25 |
|
26 |
## Model description
|
27 |
|
@@ -40,23 +34,24 @@ More information needed
|
|
40 |
### Training hyperparameters
|
41 |
|
42 |
The following hyperparameters were used during training:
|
43 |
-
- learning_rate:
|
44 |
- train_batch_size: 1
|
45 |
- eval_batch_size: 1
|
46 |
- seed: 42
|
47 |
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
48 |
- lr_scheduler_type: linear
|
|
|
49 |
- num_epochs: 5
|
50 |
|
51 |
### Training results
|
52 |
|
53 |
-
| Training Loss | Epoch | Step | Validation Loss |
|
54 |
-
|
55 |
-
| 0.
|
56 |
-
| 0.
|
57 |
-
| 0.
|
58 |
-
|
|
59 |
-
| 0.
|
60 |
|
61 |
|
62 |
### Framework versions
|
|
|
3 |
base_model: google/flan-t5-base
|
4 |
tags:
|
5 |
- generated_from_trainer
|
|
|
|
|
6 |
model-index:
|
7 |
- name: flanT5_base_Task2_Fact_updates
|
8 |
results: []
|
|
|
15 |
|
16 |
This model is a fine-tuned version of [google/flan-t5-base](https://huggingface.co/google/flan-t5-base) on the None dataset.
|
17 |
It achieves the following results on the evaluation set:
|
18 |
+
- Loss: 0.5533
|
|
|
|
|
|
|
|
|
19 |
|
20 |
## Model description
|
21 |
|
|
|
34 |
### Training hyperparameters
|
35 |
|
36 |
The following hyperparameters were used during training:
|
37 |
+
- learning_rate: 1e-05
|
38 |
- train_batch_size: 1
|
39 |
- eval_batch_size: 1
|
40 |
- seed: 42
|
41 |
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
42 |
- lr_scheduler_type: linear
|
43 |
+
- lr_scheduler_warmup_steps: 500
|
44 |
- num_epochs: 5
|
45 |
|
46 |
### Training results
|
47 |
|
48 |
+
| Training Loss | Epoch | Step | Validation Loss |
|
49 |
+
|:-------------:|:-----:|:-----:|:---------------:|
|
50 |
+
| 0.6835 | 1.0 | 5793 | 0.8141 |
|
51 |
+
| 0.0034 | 2.0 | 11586 | 0.5511 |
|
52 |
+
| 0.0003 | 3.0 | 17379 | 0.5200 |
|
53 |
+
| 1.3605 | 4.0 | 23172 | 0.5584 |
|
54 |
+
| 0.0336 | 5.0 | 28965 | 0.5533 |
|
55 |
|
56 |
|
57 |
### Framework versions
|
config.json
CHANGED
@@ -12,17 +12,9 @@
|
|
12 |
"dropout_rate": 0.1,
|
13 |
"eos_token_id": 1,
|
14 |
"feed_forward_proj": "gated-gelu",
|
15 |
-
"id2label": {
|
16 |
-
"0": "0",
|
17 |
-
"1": "1"
|
18 |
-
},
|
19 |
"initializer_factor": 1.0,
|
20 |
"is_encoder_decoder": true,
|
21 |
"is_gated_act": true,
|
22 |
-
"label2id": {
|
23 |
-
"0": 0,
|
24 |
-
"1": 1
|
25 |
-
},
|
26 |
"layer_norm_epsilon": 1e-06,
|
27 |
"model_type": "t5",
|
28 |
"n_positions": 512,
|
|
|
12 |
"dropout_rate": 0.1,
|
13 |
"eos_token_id": 1,
|
14 |
"feed_forward_proj": "gated-gelu",
|
|
|
|
|
|
|
|
|
15 |
"initializer_factor": 1.0,
|
16 |
"is_encoder_decoder": true,
|
17 |
"is_gated_act": true,
|
|
|
|
|
|
|
|
|
18 |
"layer_norm_epsilon": 1e-06,
|
19 |
"model_type": "t5",
|
20 |
"n_positions": 512,
|
logs/events.out.tfevents.1722715035.iit-p
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:31757d55bf2212664c3303669ebff3ba4d4786fc01279e7d9fdc2c731c742c9b
|
3 |
+
size 67514
|
logs/events.out.tfevents.1722715551.iit-p
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:60e3d58eb35999d86277c21e2a9308bd94824471f66cafe01998bece060dbd05
|
3 |
+
size 10085
|
logs/events.out.tfevents.1722715603.iit-p
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d301fb7456b04ecbee3cf7f1e07145f74d66c0ff3a4367ef96eaac806ebe475a
|
3 |
+
size 262175
|
logs/events.out.tfevents.1722717723.iit-p
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e1db635e0accbbee49dee477ea3a05389f74eb63d9c3c46f0e8cdd75966902e5
|
3 |
+
size 623508
|
model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 894020048
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:742b72d82c54b7e6018d2a0c3ce0e43acb75652a435899d9e25e627c840bc9e7
|
3 |
size 894020048
|
runs/Aug04_01-13-08_iit-p/events.out.tfevents.1722714191.iit-p
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1fc96dd298a722b6520333c97f6962d94641ba0d54fed61cae22fb75936f5a36
|
3 |
+
size 5850
|
runs/Jul28_08-51-33_iit-p/events.out.tfevents.1722143137.iit-p
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7410511b43b07b378759c030f286e70cfb69256410785c96bfd6093c83f9d3c4
|
3 |
+
size 531
|
training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 5176
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b8089e8b6d4026de6b77ad7801a3b0ddabb06ff22b648649301724a06a2ecf64
|
3 |
size 5176
|