willtensora commited on
Commit
4321b87
·
verified ·
1 Parent(s): c80578e

Training in progress, step 3, checkpoint

Browse files
last-checkpoint/adapter_config.json CHANGED
@@ -10,21 +10,21 @@
10
  "layers_pattern": null,
11
  "layers_to_transform": null,
12
  "loftq_config": {},
13
- "lora_alpha": 8,
14
- "lora_dropout": 0.1,
15
  "megatron_config": null,
16
  "megatron_core": "megatron.core",
17
  "modules_to_save": null,
18
  "peft_type": "LORA",
19
- "r": 4,
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
 
 
23
  "q_proj",
24
  "gate_proj",
25
- "up_proj",
26
  "k_proj",
27
- "down_proj",
28
  "v_proj",
29
  "o_proj"
30
  ],
 
10
  "layers_pattern": null,
11
  "layers_to_transform": null,
12
  "loftq_config": {},
13
+ "lora_alpha": 16,
14
+ "lora_dropout": 0.05,
15
  "megatron_config": null,
16
  "megatron_core": "megatron.core",
17
  "modules_to_save": null,
18
  "peft_type": "LORA",
19
+ "r": 8,
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
+ "down_proj",
24
+ "up_proj",
25
  "q_proj",
26
  "gate_proj",
 
27
  "k_proj",
 
28
  "v_proj",
29
  "o_proj"
30
  ],
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9c1c29d1f5649df700edb2931a4ba65802f1d7aede9c11934423e1b0801b9fcb
3
- size 9048
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c599b75a981b0d9f0dc6a0e91aa387a26e70ef6d36d76cb04bce8ce82a092b48
3
+ size 14696
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a42d954b1a09f470dff3b078b5597b5e7417e591f49cdff866f5e78474794bea
3
- size 28134
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a58ebf1b77ea2d04d0501506dc56060e091959c460d798517cb5ca27f6473af7
3
+ size 39398
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:89dbad7a5f4eb47d4580585d91b020d1a7f5dc2fd4d54175561c1eb48d2175e4
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:464e6cdff3f8548ba79ace3859c72e6b06d1e045e69932eb75c584ad0e8918c8
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e6da16eefab22daee3fb0999c7651be192feb36b73810bf419e9165c7023ec02
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ffd8c58e5d02492554dbaa495f8cf80dff41fabc0e1288cb2fd18a103d402219
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,34 +1,56 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.0002666666666666667,
5
- "eval_steps": 500,
6
- "global_step": 1,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
- "epoch": 0.0002666666666666667,
13
- "grad_norm": 0.017132652923464775,
14
- "learning_rate": 0.0,
15
- "loss": 11.9308,
16
  "step": 1
17
  },
18
  {
19
- "epoch": 0.0002666666666666667,
20
- "eval_loss": 11.933894157409668,
21
- "eval_runtime": 0.0614,
22
- "eval_samples_per_second": 260.547,
23
- "eval_steps_per_second": 65.137,
24
  "step": 1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
  }
26
  ],
27
  "logging_steps": 1,
28
- "max_steps": 1,
29
  "num_input_tokens_seen": 0,
30
  "num_train_epochs": 1,
31
- "save_steps": 500,
32
  "stateful_callbacks": {
33
  "TrainerControl": {
34
  "args": {
@@ -36,13 +58,13 @@
36
  "should_evaluate": false,
37
  "should_log": false,
38
  "should_save": true,
39
- "should_training_stop": true
40
  },
41
  "attributes": {}
42
  }
43
  },
44
- "total_flos": 10444800.0,
45
- "train_batch_size": 4,
46
  "trial_name": null,
47
  "trial_params": null
48
  }
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.0016825574873808188,
5
+ "eval_steps": 3,
6
+ "global_step": 3,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
+ "epoch": 0.0005608524957936063,
13
+ "grad_norm": 0.012027375400066376,
14
+ "learning_rate": 2e-05,
15
+ "loss": 11.9315,
16
  "step": 1
17
  },
18
  {
19
+ "epoch": 0.0005608524957936063,
20
+ "eval_loss": 11.931306838989258,
21
+ "eval_runtime": 2.6701,
22
+ "eval_samples_per_second": 281.261,
23
+ "eval_steps_per_second": 140.818,
24
  "step": 1
25
+ },
26
+ {
27
+ "epoch": 0.0011217049915872126,
28
+ "grad_norm": 0.008168625645339489,
29
+ "learning_rate": 4e-05,
30
+ "loss": 11.9304,
31
+ "step": 2
32
+ },
33
+ {
34
+ "epoch": 0.0016825574873808188,
35
+ "grad_norm": 0.014232893474400043,
36
+ "learning_rate": 6e-05,
37
+ "loss": 11.9319,
38
+ "step": 3
39
+ },
40
+ {
41
+ "epoch": 0.0016825574873808188,
42
+ "eval_loss": 11.931304931640625,
43
+ "eval_runtime": 2.7097,
44
+ "eval_samples_per_second": 277.154,
45
+ "eval_steps_per_second": 138.761,
46
+ "step": 3
47
  }
48
  ],
49
  "logging_steps": 1,
50
+ "max_steps": 10,
51
  "num_input_tokens_seen": 0,
52
  "num_train_epochs": 1,
53
+ "save_steps": 3,
54
  "stateful_callbacks": {
55
  "TrainerControl": {
56
  "args": {
 
58
  "should_evaluate": false,
59
  "should_log": false,
60
  "should_save": true,
61
+ "should_training_stop": false
62
  },
63
  "attributes": {}
64
  }
65
  },
66
+ "total_flos": 384024576.0,
67
+ "train_batch_size": 2,
68
  "trial_name": null,
69
  "trial_params": null
70
  }
last-checkpoint/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5f30b79b60b8c46ca02ae421a50ada3e80027f9f26343b71ced3942fb88eb24b
3
  size 6776
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dae77c6881efffb1525ab27d15f41925709b4eb57600c98d05461f542cc3b806
3
  size 6776