abhiGOAT commited on
Commit
d70dc0e
1 Parent(s): b6aca04

abhiGOAT/DPO

Browse files
README.md ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ library_name: peft
4
+ tags:
5
+ - trl
6
+ - dpo
7
+ - generated_from_trainer
8
+ base_model: TheBloke/OpenHermes-2-Mistral-7B-GPTQ
9
+ model-index:
10
+ - name: mistral-dpo
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ # mistral-dpo
18
+
19
+ This model is a fine-tuned version of [TheBloke/OpenHermes-2-Mistral-7B-GPTQ](https://huggingface.co/TheBloke/OpenHermes-2-Mistral-7B-GPTQ) on the None dataset.
20
+ It achieves the following results on the evaluation set:
21
+ - Loss: 0.7572
22
+ - Rewards/chosen: 1.2167
23
+ - Rewards/rejected: 1.0623
24
+ - Rewards/accuracies: 0.5096
25
+ - Rewards/margins: 0.1544
26
+ - Logps/rejected: -175.8600
27
+ - Logps/chosen: -185.2850
28
+ - Logits/rejected: -2.4403
29
+ - Logits/chosen: -2.5187
30
+
31
+ ## Model description
32
+
33
+ More information needed
34
+
35
+ ## Intended uses & limitations
36
+
37
+ More information needed
38
+
39
+ ## Training and evaluation data
40
+
41
+ More information needed
42
+
43
+ ## Training procedure
44
+
45
+ ### Training hyperparameters
46
+
47
+ The following hyperparameters were used during training:
48
+ - learning_rate: 0.0002
49
+ - train_batch_size: 1
50
+ - eval_batch_size: 8
51
+ - seed: 42
52
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
53
+ - lr_scheduler_type: linear
54
+ - lr_scheduler_warmup_steps: 2
55
+ - training_steps: 250
56
+ - mixed_precision_training: Native AMP
57
+
58
+ ### Training results
59
+
60
+ | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
61
+ |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
62
+ | 0.7021 | 0.0 | 10 | 0.6890 | 0.1146 | 0.0999 | 0.5192 | 0.0147 | -185.4845 | -196.3061 | -2.3759 | -2.4612 |
63
+ | 0.6841 | 0.0 | 20 | 0.7001 | 0.0160 | 0.0098 | 0.5 | 0.0062 | -186.3854 | -197.2926 | -2.3872 | -2.4717 |
64
+ | 0.8876 | 0.0 | 30 | 0.7302 | -0.1955 | -0.1884 | 0.5385 | -0.0071 | -188.3676 | -199.4075 | -2.4070 | -2.4917 |
65
+ | 0.8433 | 0.0 | 40 | 0.7276 | -0.0970 | -0.0927 | 0.5288 | -0.0043 | -187.4101 | -198.4224 | -2.4156 | -2.4975 |
66
+ | 0.8355 | 0.0 | 50 | 0.7405 | 0.3519 | 0.3619 | 0.4808 | -0.0100 | -182.8643 | -193.9333 | -2.4256 | -2.5051 |
67
+ | 0.7391 | 0.0 | 60 | 0.7472 | 0.5734 | 0.5592 | 0.5481 | 0.0142 | -180.8914 | -191.7185 | -2.4426 | -2.5190 |
68
+ | 0.5922 | 0.01 | 70 | 0.7534 | 0.8179 | 0.7898 | 0.5192 | 0.0281 | -178.5854 | -189.2730 | -2.4396 | -2.5167 |
69
+ | 0.6762 | 0.01 | 80 | 0.7436 | 0.7843 | 0.7179 | 0.5385 | 0.0664 | -179.3046 | -189.6097 | -2.4306 | -2.5082 |
70
+ | 0.5934 | 0.01 | 90 | 0.7474 | 0.8646 | 0.7935 | 0.5192 | 0.0711 | -178.5482 | -188.8059 | -2.4360 | -2.5117 |
71
+ | 0.5773 | 0.01 | 100 | 0.7527 | 0.8864 | 0.8060 | 0.5481 | 0.0804 | -178.4233 | -188.5887 | -2.4363 | -2.5114 |
72
+ | 1.159 | 0.01 | 110 | 0.7513 | 0.7767 | 0.6900 | 0.5385 | 0.0867 | -179.5837 | -189.6852 | -2.4325 | -2.5061 |
73
+ | 0.5871 | 0.01 | 120 | 0.7514 | 0.6924 | 0.6190 | 0.5385 | 0.0733 | -180.2931 | -190.5286 | -2.4307 | -2.5040 |
74
+ | 0.6655 | 0.01 | 130 | 0.7515 | 0.5617 | 0.4862 | 0.5385 | 0.0755 | -181.6214 | -191.8357 | -2.4293 | -2.5029 |
75
+ | 0.5963 | 0.01 | 140 | 0.7489 | 0.4748 | 0.3917 | 0.5481 | 0.0831 | -182.5665 | -192.7043 | -2.4326 | -2.5072 |
76
+ | 0.7817 | 0.01 | 150 | 0.7466 | 0.5389 | 0.4527 | 0.5288 | 0.0862 | -181.9568 | -192.0635 | -2.4306 | -2.5059 |
77
+ | 0.7836 | 0.01 | 160 | 0.7399 | 0.5166 | 0.4148 | 0.5288 | 0.1017 | -182.3349 | -192.2867 | -2.4256 | -2.5014 |
78
+ | 0.6246 | 0.01 | 170 | 0.7478 | 0.9222 | 0.8063 | 0.5 | 0.1159 | -178.4202 | -188.2300 | -2.4449 | -2.5218 |
79
+ | 0.6159 | 0.01 | 180 | 0.7637 | 1.1539 | 1.0352 | 0.5 | 0.1187 | -176.1314 | -185.9132 | -2.4491 | -2.5259 |
80
+ | 0.9218 | 0.02 | 190 | 0.7670 | 1.1914 | 1.0684 | 0.4808 | 0.1230 | -175.7993 | -185.5382 | -2.4471 | -2.5233 |
81
+ | 0.8469 | 0.02 | 200 | 0.7670 | 1.2246 | 1.0991 | 0.5096 | 0.1255 | -175.4922 | -185.2060 | -2.4455 | -2.5220 |
82
+ | 0.5824 | 0.02 | 210 | 0.7601 | 1.2119 | 1.0773 | 0.5096 | 0.1346 | -175.7105 | -185.3338 | -2.4418 | -2.5188 |
83
+ | 0.5718 | 0.02 | 220 | 0.7590 | 1.2120 | 1.0736 | 0.5096 | 0.1384 | -175.7473 | -185.3322 | -2.4392 | -2.5168 |
84
+ | 0.7219 | 0.02 | 230 | 0.7578 | 1.2033 | 1.0583 | 0.4904 | 0.1450 | -175.9007 | -185.4198 | -2.4385 | -2.5165 |
85
+ | 2.6464 | 0.02 | 240 | 0.7570 | 1.2096 | 1.0575 | 0.5 | 0.1520 | -175.9079 | -185.3567 | -2.4392 | -2.5175 |
86
+ | 0.8964 | 0.02 | 250 | 0.7572 | 1.2167 | 1.0623 | 0.5096 | 0.1544 | -175.8600 | -185.2850 | -2.4403 | -2.5187 |
87
+
88
+
89
+ ### Framework versions
90
+
91
+ - PEFT 0.8.2
92
+ - Transformers 4.37.0
93
+ - Pytorch 2.0.1+cu117
94
+ - Datasets 2.15.0
95
+ - Tokenizers 0.15.1
adapter_config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "TheBloke/OpenHermes-2-Mistral-7B-GPTQ",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layers_pattern": null,
10
+ "layers_to_transform": null,
11
+ "loftq_config": {},
12
+ "lora_alpha": 6,
13
+ "lora_dropout": 0.1,
14
+ "megatron_config": null,
15
+ "megatron_core": "megatron.core",
16
+ "modules_to_save": null,
17
+ "peft_type": "LORA",
18
+ "r": 4,
19
+ "rank_pattern": {},
20
+ "revision": null,
21
+ "target_modules": [
22
+ "q_proj",
23
+ "v_proj"
24
+ ],
25
+ "task_type": "CAUSAL_LM",
26
+ "use_rslora": false
27
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5880079c35bb58b2ec88fe21db37bb561970c0bde289d8e9a0290ed43b78c788
3
+ size 6832600
added_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "<|im_end|>": 32000,
3
+ "<|im_start|>": 32001
4
+ }
runs/Feb14_06-57-05_4980eb37d5b1/events.out.tfevents.1707893901.4980eb37d5b1.34.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f29934f0dc08fed34ca7afe2212b3772c1e36a04d6dc8e6862fa4e108a3883d8
3
+ size 40089
special_tokens_map.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<unk>",
4
+ "<s>",
5
+ "</s>"
6
+ ],
7
+ "bos_token": {
8
+ "content": "<s>",
9
+ "lstrip": false,
10
+ "normalized": false,
11
+ "rstrip": false,
12
+ "single_word": false
13
+ },
14
+ "eos_token": {
15
+ "content": "<|im_end|>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false
20
+ },
21
+ "pad_token": {
22
+ "content": "</s>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false
27
+ },
28
+ "unk_token": {
29
+ "content": "<unk>",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false
34
+ }
35
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dadfd56d766715c61d2ef780a525ab43b8e6da4de6865bda3d95fdef5e134055
3
+ size 493443
tokenizer_config.json ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<unk>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "1": {
14
+ "content": "<s>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "2": {
22
+ "content": "</s>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "32000": {
30
+ "content": "<|im_end|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "32001": {
38
+ "content": "<|im_start|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": false
44
+ }
45
+ },
46
+ "additional_special_tokens": [
47
+ "<unk>",
48
+ "<s>",
49
+ "</s>"
50
+ ],
51
+ "bos_token": "<s>",
52
+ "clean_up_tokenization_spaces": false,
53
+ "eos_token": "<|im_end|>",
54
+ "legacy": true,
55
+ "model_max_length": 1000000000000000019884624838656,
56
+ "pad_token": "</s>",
57
+ "sp_model_kwargs": {},
58
+ "spaces_between_special_tokens": false,
59
+ "tokenizer_class": "LlamaTokenizer",
60
+ "trust_remote_code": false,
61
+ "unk_token": "<unk>",
62
+ "use_default_system_prompt": true,
63
+ "use_fast": true
64
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da80bad236a74f2a70a455dbc801ee413eda02f564728639c71288f4a6528298
3
+ size 4219