tuanna08go commited on
Commit
11a5ca6
·
verified ·
1 Parent(s): 1ab557b

Training in progress, step 1, checkpoint

Browse files
last-checkpoint/adapter_config.json CHANGED
@@ -20,13 +20,13 @@
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
- "q_proj",
24
- "gate_proj",
25
- "up_proj",
26
  "k_proj",
27
- "o_proj",
 
28
  "v_proj",
29
- "down_proj"
 
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
 
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
+ "down_proj",
 
 
24
  "k_proj",
25
+ "gate_proj",
26
+ "q_proj",
27
  "v_proj",
28
+ "up_proj",
29
+ "o_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4f6b60fcae97ff3254a518b14da04f37124bdf1ae476f1994c2a8cd7567e14be
3
  size 80792096
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0add5290396cf0ac4f59ce067dd503a5f8ea88edf4037194f9da4dfe8655af63
3
  size 80792096
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ddd06a1d51d594964d2231d0a9f1dcd7ce979116db8da9b5de5ff823411dcb44
3
  size 41459700
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1360b214dc305ce0c7fca0fdb3feecc9d03ca3af798b10af45b117b0f4bff123
3
  size 41459700
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dec5fe9a4e7d0b0d0b5890a54362458d63f1aa0cd3805572b7c904b144470602
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d9673d22f265ed23c9ac8fe3534732259b5ca12d0692ce0ea4ceef5963b1b4fd
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b1df0528620c07325b8faa7567e59b0c1e86a1f1ee6af1245a69c6c0463fe4e2
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ae751897b8e87ff08962a91d1d3485984775a96aa89e29a1caac3d6f449228f7
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,102 +1,27 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.052144440099074436,
5
- "eval_steps": 10,
6
- "global_step": 50,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
- "epoch": 0.0010428888019814887,
13
  "eval_loss": NaN,
14
- "eval_runtime": 330.4614,
15
- "eval_samples_per_second": 19.548,
16
- "eval_steps_per_second": 2.445,
17
  "step": 1
18
- },
19
- {
20
- "epoch": 0.010428888019814887,
21
- "grad_norm": NaN,
22
- "learning_rate": 9.330127018922194e-05,
23
- "loss": 0.0,
24
- "step": 10
25
- },
26
- {
27
- "epoch": 0.010428888019814887,
28
- "eval_loss": NaN,
29
- "eval_runtime": 329.3613,
30
- "eval_samples_per_second": 19.614,
31
- "eval_steps_per_second": 2.453,
32
- "step": 10
33
- },
34
- {
35
- "epoch": 0.020857776039629773,
36
- "grad_norm": NaN,
37
- "learning_rate": 6.91341716182545e-05,
38
- "loss": 0.0,
39
- "step": 20
40
- },
41
- {
42
- "epoch": 0.020857776039629773,
43
- "eval_loss": NaN,
44
- "eval_runtime": 329.7608,
45
- "eval_samples_per_second": 19.59,
46
- "eval_steps_per_second": 2.45,
47
- "step": 20
48
- },
49
- {
50
- "epoch": 0.03128666405944466,
51
- "grad_norm": NaN,
52
- "learning_rate": 3.705904774487396e-05,
53
- "loss": 0.0,
54
- "step": 30
55
- },
56
- {
57
- "epoch": 0.03128666405944466,
58
- "eval_loss": NaN,
59
- "eval_runtime": 330.0198,
60
- "eval_samples_per_second": 19.575,
61
- "eval_steps_per_second": 2.448,
62
- "step": 30
63
- },
64
- {
65
- "epoch": 0.041715552079259546,
66
- "grad_norm": NaN,
67
- "learning_rate": 1.0332332985438248e-05,
68
- "loss": 0.0,
69
- "step": 40
70
- },
71
- {
72
- "epoch": 0.041715552079259546,
73
- "eval_loss": NaN,
74
- "eval_runtime": 329.6182,
75
- "eval_samples_per_second": 19.598,
76
- "eval_steps_per_second": 2.451,
77
- "step": 40
78
- },
79
- {
80
- "epoch": 0.052144440099074436,
81
- "grad_norm": NaN,
82
- "learning_rate": 0.0,
83
- "loss": 0.0,
84
- "step": 50
85
- },
86
- {
87
- "epoch": 0.052144440099074436,
88
- "eval_loss": NaN,
89
- "eval_runtime": 329.6408,
90
- "eval_samples_per_second": 19.597,
91
- "eval_steps_per_second": 2.451,
92
- "step": 50
93
  }
94
  ],
95
- "logging_steps": 10,
96
- "max_steps": 50,
97
  "num_input_tokens_seen": 0,
98
  "num_train_epochs": 1,
99
- "save_steps": 13,
100
  "stateful_callbacks": {
101
  "TrainerControl": {
102
  "args": {
@@ -109,8 +34,8 @@
109
  "attributes": {}
110
  }
111
  },
112
- "total_flos": 1.394108836872192e+17,
113
- "train_batch_size": 8,
114
  "trial_name": null,
115
  "trial_params": null
116
  }
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 6.518267444513249e-05,
5
+ "eval_steps": 1,
6
+ "global_step": 1,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
+ "epoch": 6.518267444513249e-05,
13
  "eval_loss": NaN,
14
+ "eval_runtime": 378.354,
15
+ "eval_samples_per_second": 17.074,
16
+ "eval_steps_per_second": 8.537,
17
  "step": 1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
  }
19
  ],
20
+ "logging_steps": 5,
21
+ "max_steps": 1,
22
  "num_input_tokens_seen": 0,
23
  "num_train_epochs": 1,
24
+ "save_steps": 1,
25
  "stateful_callbacks": {
26
  "TrainerControl": {
27
  "args": {
 
34
  "attributes": {}
35
  }
36
  },
37
+ "total_flos": 174263604609024.0,
38
+ "train_batch_size": 2,
39
  "trial_name": null,
40
  "trial_params": null
41
  }
last-checkpoint/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8d9a9dea8f331f584c9b487728133ae2af0263e1b2a48a913b461eaa2ca9767b
3
  size 6776
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f79eac3c3becad6b6342c9f355b4383f31ede640da5992da13e3b93ef21b914
3
  size 6776