lenatr99 commited on
Commit
b2c677a
1 Parent(s): 39a975a

lora_fine_tuned_cb

Browse files
README.md CHANGED
@@ -1,9 +1,9 @@
1
  ---
2
- license: cc-by-4.0
3
  library_name: peft
4
  tags:
5
  - generated_from_trainer
6
- base_model: EMBEDDIA/crosloengual-bert
7
  metrics:
8
  - accuracy
9
  - f1
@@ -17,9 +17,9 @@ should probably proofread and complete it, then remove this comment. -->
17
 
18
  # lora_fine_tuned_cb
19
 
20
- This model is a fine-tuned version of [EMBEDDIA/crosloengual-bert](https://huggingface.co/EMBEDDIA/crosloengual-bert) on an unknown dataset.
21
  It achieves the following results on the evaluation set:
22
- - Loss: 1.4348
23
  - Accuracy: 0.3182
24
  - F1: 0.1536
25
 
@@ -40,7 +40,7 @@ More information needed
40
  ### Training hyperparameters
41
 
42
  The following hyperparameters were used during training:
43
- - learning_rate: 0.003
44
  - train_batch_size: 8
45
  - eval_batch_size: 8
46
  - seed: 42
@@ -52,14 +52,14 @@ The following hyperparameters were used during training:
52
 
53
  | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 |
54
  |:-------------:|:-------:|:----:|:---------------:|:--------:|:------:|
55
- | 0.9795 | 3.5714 | 50 | 2.4262 | 0.3182 | 0.1536 |
56
- | 1.0466 | 7.1429 | 100 | 1.5536 | 0.3182 | 0.1536 |
57
- | 0.7941 | 10.7143 | 150 | 1.7159 | 0.3182 | 0.1536 |
58
- | 0.7834 | 14.2857 | 200 | 1.7813 | 0.3182 | 0.1536 |
59
- | 0.8008 | 17.8571 | 250 | 1.6710 | 0.3182 | 0.1536 |
60
- | 0.767 | 21.4286 | 300 | 1.4292 | 0.3182 | 0.1536 |
61
- | 0.7215 | 25.0 | 350 | 1.4617 | 0.3182 | 0.1536 |
62
- | 0.7297 | 28.5714 | 400 | 1.4348 | 0.3182 | 0.1536 |
63
 
64
 
65
  ### Framework versions
 
1
  ---
2
+ license: apache-2.0
3
  library_name: peft
4
  tags:
5
  - generated_from_trainer
6
+ base_model: google-bert/bert-base-uncased
7
  metrics:
8
  - accuracy
9
  - f1
 
17
 
18
  # lora_fine_tuned_cb
19
 
20
+ This model is a fine-tuned version of [google-bert/bert-base-uncased](https://huggingface.co/google-bert/bert-base-uncased) on an unknown dataset.
21
  It achieves the following results on the evaluation set:
22
+ - Loss: 1.4089
23
  - Accuracy: 0.3182
24
  - F1: 0.1536
25
 
 
40
  ### Training hyperparameters
41
 
42
  The following hyperparameters were used during training:
43
+ - learning_rate: 2e-05
44
  - train_batch_size: 8
45
  - eval_batch_size: 8
46
  - seed: 42
 
52
 
53
  | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 |
54
  |:-------------:|:-------:|:----:|:---------------:|:--------:|:------:|
55
+ | 0.9467 | 3.5714 | 50 | 1.1690 | 0.3182 | 0.1536 |
56
+ | 0.7755 | 7.1429 | 100 | 1.2983 | 0.3182 | 0.1536 |
57
+ | 0.7396 | 10.7143 | 150 | 1.3709 | 0.3182 | 0.1536 |
58
+ | 0.6894 | 14.2857 | 200 | 1.3939 | 0.3182 | 0.1536 |
59
+ | 0.7253 | 17.8571 | 250 | 1.4084 | 0.3182 | 0.1536 |
60
+ | 0.7187 | 21.4286 | 300 | 1.4133 | 0.3182 | 0.1536 |
61
+ | 0.6998 | 25.0 | 350 | 1.4096 | 0.3182 | 0.1536 |
62
+ | 0.7152 | 28.5714 | 400 | 1.4089 | 0.3182 | 0.1536 |
63
 
64
 
65
  ### Framework versions
adapter_config.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "alpha_pattern": {},
3
  "auto_mapping": null,
4
- "base_model_name_or_path": "EMBEDDIA/crosloengual-bert",
5
  "bias": "none",
6
  "fan_in_fan_out": false,
7
  "inference_mode": true,
 
1
  {
2
  "alpha_pattern": {},
3
  "auto_mapping": null,
4
+ "base_model_name_or_path": "google-bert/bert-base-uncased",
5
  "bias": "none",
6
  "fan_in_fan_out": false,
7
  "inference_mode": true,
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:14c09e26f590c5c76ad1f1091dd17bc4604d7d9a1673f08aa1d5fedc1ce5ae24
3
  size 2375492
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:264b1faec628cc0e5ebec097981d2916d11f12c33b737ff1533ab01a2fe9a9cc
3
  size 2375492
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -8,7 +8,7 @@
8
  "single_word": false,
9
  "special": true
10
  },
11
- "102": {
12
  "content": "[UNK]",
13
  "lstrip": false,
14
  "normalized": false,
@@ -16,7 +16,7 @@
16
  "single_word": false,
17
  "special": true
18
  },
19
- "103": {
20
  "content": "[CLS]",
21
  "lstrip": false,
22
  "normalized": false,
@@ -24,7 +24,7 @@
24
  "single_word": false,
25
  "special": true
26
  },
27
- "104": {
28
  "content": "[SEP]",
29
  "lstrip": false,
30
  "normalized": false,
@@ -32,7 +32,7 @@
32
  "single_word": false,
33
  "special": true
34
  },
35
- "105": {
36
  "content": "[MASK]",
37
  "lstrip": false,
38
  "normalized": false,
@@ -43,12 +43,9 @@
43
  },
44
  "clean_up_tokenization_spaces": true,
45
  "cls_token": "[CLS]",
46
- "do_basic_tokenize": true,
47
- "do_lower_case": false,
48
  "mask_token": "[MASK]",
49
- "max_len": 512,
50
  "model_max_length": 512,
51
- "never_split": null,
52
  "pad_token": "[PAD]",
53
  "sep_token": "[SEP]",
54
  "strip_accents": null,
 
8
  "single_word": false,
9
  "special": true
10
  },
11
+ "100": {
12
  "content": "[UNK]",
13
  "lstrip": false,
14
  "normalized": false,
 
16
  "single_word": false,
17
  "special": true
18
  },
19
+ "101": {
20
  "content": "[CLS]",
21
  "lstrip": false,
22
  "normalized": false,
 
24
  "single_word": false,
25
  "special": true
26
  },
27
+ "102": {
28
  "content": "[SEP]",
29
  "lstrip": false,
30
  "normalized": false,
 
32
  "single_word": false,
33
  "special": true
34
  },
35
+ "103": {
36
  "content": "[MASK]",
37
  "lstrip": false,
38
  "normalized": false,
 
43
  },
44
  "clean_up_tokenization_spaces": true,
45
  "cls_token": "[CLS]",
46
+ "do_lower_case": true,
 
47
  "mask_token": "[MASK]",
 
48
  "model_max_length": 512,
 
49
  "pad_token": "[PAD]",
50
  "sep_token": "[SEP]",
51
  "strip_accents": null,
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3ef17a22499ff8b582ca7328b84614ae061ad2c4caa550206c0e234fdf27d149
3
  size 4984
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:57f03c056c47a6e726c0d57b3f1d49fee5080ea73a94e345e509780fe338fdf8
3
  size 4984
vocab.txt CHANGED
The diff for this file is too large to render. See raw diff