Henil1 commited on
Commit
4a12edd
1 Parent(s): 0f02056

Training in progress epoch 0

Browse files
Files changed (4) hide show
  1. README.md +6 -7
  2. config.json +2 -1
  3. generation_config.json +1 -1
  4. tf_model.h5 +1 -1
README.md CHANGED
@@ -15,9 +15,9 @@ probably proofread and complete it, then remove this comment. -->
15
 
16
  This model is a fine-tuned version of [google/mt5-small](https://huggingface.co/google/mt5-small) on an unknown dataset.
17
  It achieves the following results on the evaluation set:
18
- - Train Loss: 2.0076
19
- - Validation Loss: 1.6043
20
- - Epoch: 1
21
 
22
  ## Model description
23
 
@@ -36,20 +36,19 @@ More information needed
36
  ### Training hyperparameters
37
 
38
  The following hyperparameters were used during training:
39
- - optimizer: {'name': 'AdamWeightDecay', 'learning_rate': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 5.6e-05, 'decay_steps': 13806, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}}, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False, 'weight_decay_rate': 0.01}
40
  - training_precision: mixed_float16
41
 
42
  ### Training results
43
 
44
  | Train Loss | Validation Loss | Epoch |
45
  |:----------:|:---------------:|:-----:|
46
- | 3.3038 | 1.7895 | 0 |
47
- | 2.0076 | 1.6043 | 1 |
48
 
49
 
50
  ### Framework versions
51
 
52
- - Transformers 4.31.0
53
  - TensorFlow 2.12.0
54
  - Datasets 2.14.4
55
  - Tokenizers 0.13.3
 
15
 
16
  This model is a fine-tuned version of [google/mt5-small](https://huggingface.co/google/mt5-small) on an unknown dataset.
17
  It achieves the following results on the evaluation set:
18
+ - Train Loss: 1.8179
19
+ - Validation Loss: 1.3050
20
+ - Epoch: 0
21
 
22
  ## Model description
23
 
 
36
  ### Training hyperparameters
37
 
38
  The following hyperparameters were used during training:
39
+ - optimizer: {'name': 'Adam', 'weight_decay': None, 'clipnorm': None, 'global_clipnorm': None, 'clipvalue': None, 'use_ema': False, 'ema_momentum': 0.99, 'ema_overwrite_frequency': None, 'jit_compile': True, 'is_legacy_optimizer': False, 'learning_rate': 0.001, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-07, 'amsgrad': False}
40
  - training_precision: mixed_float16
41
 
42
  ### Training results
43
 
44
  | Train Loss | Validation Loss | Epoch |
45
  |:----------:|:---------------:|:-----:|
46
+ | 1.8179 | 1.3050 | 0 |
 
47
 
48
 
49
  ### Framework versions
50
 
51
+ - Transformers 4.32.0
52
  - TensorFlow 2.12.0
53
  - Datasets 2.14.4
54
  - Tokenizers 0.13.3
config.json CHANGED
@@ -3,6 +3,7 @@
3
  "architectures": [
4
  "MT5ForConditionalGeneration"
5
  ],
 
6
  "d_ff": 1024,
7
  "d_kv": 64,
8
  "d_model": 512,
@@ -24,7 +25,7 @@
24
  "relative_attention_num_buckets": 32,
25
  "tie_word_embeddings": false,
26
  "tokenizer_class": "T5Tokenizer",
27
- "transformers_version": "4.31.0",
28
  "use_cache": true,
29
  "vocab_size": 250112
30
  }
 
3
  "architectures": [
4
  "MT5ForConditionalGeneration"
5
  ],
6
+ "classifier_dropout": 0.0,
7
  "d_ff": 1024,
8
  "d_kv": 64,
9
  "d_model": 512,
 
25
  "relative_attention_num_buckets": 32,
26
  "tie_word_embeddings": false,
27
  "tokenizer_class": "T5Tokenizer",
28
+ "transformers_version": "4.32.0",
29
  "use_cache": true,
30
  "vocab_size": 250112
31
  }
generation_config.json CHANGED
@@ -3,5 +3,5 @@
3
  "decoder_start_token_id": 0,
4
  "eos_token_id": 1,
5
  "pad_token_id": 0,
6
- "transformers_version": "4.31.0"
7
  }
 
3
  "decoder_start_token_id": 0,
4
  "eos_token_id": 1,
5
  "pad_token_id": 0,
6
+ "transformers_version": "4.32.0"
7
  }
tf_model.h5 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0c28691f23550d3ebbd6e5be7ff69f578c1f76948ab52149bc28cb049c2e3ff4
3
  size 2225556280
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e197ca46a314076ce4512e4c990229f4f877bdda75b9bd168e5bc41f8a5f1dd1
3
  size 2225556280