rishdotblog commited on
Commit
3b6585f
1 Parent(s): 70daa33

Upload folder using huggingface_hub

Browse files
config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "meta-llama/Meta-Llama-3-8B-Instruct",
3
+ "architectures": [
4
+ "LlamaForCausalLM"
5
+ ],
6
+ "attention_bias": false,
7
+ "attention_dropout": 0.0,
8
+ "bos_token_id": 128000,
9
+ "eos_token_id": 128001,
10
+ "hidden_act": "silu",
11
+ "hidden_size": 4096,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 14336,
14
+ "max_position_embeddings": 8192,
15
+ "model_type": "llama",
16
+ "num_attention_heads": 32,
17
+ "num_hidden_layers": 32,
18
+ "num_key_value_heads": 8,
19
+ "pretraining_tp": 1,
20
+ "rms_norm_eps": 1e-05,
21
+ "rope_scaling": null,
22
+ "rope_theta": 500000.0,
23
+ "tie_word_embeddings": false,
24
+ "torch_dtype": "bfloat16",
25
+ "transformers_version": "4.40.0",
26
+ "use_cache": true,
27
+ "vocab_size": 128256
28
+ }
generation_config.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 128000,
3
+ "do_sample": true,
4
+ "eos_token_id": [
5
+ 128001,
6
+ 128009
7
+ ],
8
+ "max_length": 4096,
9
+ "temperature": 0.6,
10
+ "top_p": 0.9,
11
+ "transformers_version": "4.40.0"
12
+ }
model-00001-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2cd4f0ced6eaaa81ced74d2adc1e1926a33b9baaa11baacfb4f813d3e8c4301a
3
+ size 4976698672
model-00002-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:44aca46d6e6a471e919a511ce1cd1d0d3dcf5cc4828f6a4b2a433dbcd54852aa
3
+ size 4999802720
model-00003-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a0f2c83c62767ceaa9c46884ad08df1dca13d266dd2e8e6523fd25c5d9b1ed30
3
+ size 4915916176
model-00004-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:79925407cc9b3988886d6011c475b5fd3fcf1c064542da277d1a04a3f7722445
3
+ size 1168138808
model.safetensors.index.json ADDED
@@ -0,0 +1,298 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 16060522496
4
+ },
5
+ "weight_map": {
6
+ "lm_head.weight": "model-00004-of-00004.safetensors",
7
+ "model.embed_tokens.weight": "model-00001-of-00004.safetensors",
8
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00004.safetensors",
9
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
10
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
11
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
12
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
13
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
14
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
15
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
16
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
17
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00004.safetensors",
18
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
19
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
20
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
21
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
22
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
23
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
24
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
25
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
26
+ "model.layers.10.input_layernorm.weight": "model-00002-of-00004.safetensors",
27
+ "model.layers.10.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
28
+ "model.layers.10.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
29
+ "model.layers.10.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
30
+ "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
31
+ "model.layers.10.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
32
+ "model.layers.10.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
33
+ "model.layers.10.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
34
+ "model.layers.10.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
35
+ "model.layers.11.input_layernorm.weight": "model-00002-of-00004.safetensors",
36
+ "model.layers.11.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
37
+ "model.layers.11.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
38
+ "model.layers.11.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
39
+ "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
40
+ "model.layers.11.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
41
+ "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
42
+ "model.layers.11.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
43
+ "model.layers.11.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
44
+ "model.layers.12.input_layernorm.weight": "model-00002-of-00004.safetensors",
45
+ "model.layers.12.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
46
+ "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
47
+ "model.layers.12.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
48
+ "model.layers.12.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
49
+ "model.layers.12.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
50
+ "model.layers.12.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
51
+ "model.layers.12.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
52
+ "model.layers.12.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
53
+ "model.layers.13.input_layernorm.weight": "model-00002-of-00004.safetensors",
54
+ "model.layers.13.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
55
+ "model.layers.13.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
56
+ "model.layers.13.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
57
+ "model.layers.13.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
58
+ "model.layers.13.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
59
+ "model.layers.13.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
60
+ "model.layers.13.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
61
+ "model.layers.13.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
62
+ "model.layers.14.input_layernorm.weight": "model-00002-of-00004.safetensors",
63
+ "model.layers.14.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
64
+ "model.layers.14.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
65
+ "model.layers.14.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
66
+ "model.layers.14.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
67
+ "model.layers.14.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
68
+ "model.layers.14.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
69
+ "model.layers.14.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
70
+ "model.layers.14.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
71
+ "model.layers.15.input_layernorm.weight": "model-00002-of-00004.safetensors",
72
+ "model.layers.15.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
73
+ "model.layers.15.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
74
+ "model.layers.15.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
75
+ "model.layers.15.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
76
+ "model.layers.15.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
77
+ "model.layers.15.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
78
+ "model.layers.15.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
79
+ "model.layers.15.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
80
+ "model.layers.16.input_layernorm.weight": "model-00002-of-00004.safetensors",
81
+ "model.layers.16.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
82
+ "model.layers.16.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
83
+ "model.layers.16.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
84
+ "model.layers.16.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
85
+ "model.layers.16.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
86
+ "model.layers.16.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
87
+ "model.layers.16.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
88
+ "model.layers.16.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
89
+ "model.layers.17.input_layernorm.weight": "model-00002-of-00004.safetensors",
90
+ "model.layers.17.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
91
+ "model.layers.17.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
92
+ "model.layers.17.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
93
+ "model.layers.17.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
94
+ "model.layers.17.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
95
+ "model.layers.17.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
96
+ "model.layers.17.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
97
+ "model.layers.17.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
98
+ "model.layers.18.input_layernorm.weight": "model-00002-of-00004.safetensors",
99
+ "model.layers.18.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
100
+ "model.layers.18.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
101
+ "model.layers.18.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
102
+ "model.layers.18.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
103
+ "model.layers.18.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
104
+ "model.layers.18.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
105
+ "model.layers.18.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
106
+ "model.layers.18.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
107
+ "model.layers.19.input_layernorm.weight": "model-00002-of-00004.safetensors",
108
+ "model.layers.19.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
109
+ "model.layers.19.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
110
+ "model.layers.19.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
111
+ "model.layers.19.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
112
+ "model.layers.19.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
113
+ "model.layers.19.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
114
+ "model.layers.19.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
115
+ "model.layers.19.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
116
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00004.safetensors",
117
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
118
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
119
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
120
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
121
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
122
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
123
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
124
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
125
+ "model.layers.20.input_layernorm.weight": "model-00003-of-00004.safetensors",
126
+ "model.layers.20.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
127
+ "model.layers.20.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
128
+ "model.layers.20.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
129
+ "model.layers.20.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
130
+ "model.layers.20.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
131
+ "model.layers.20.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
132
+ "model.layers.20.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
133
+ "model.layers.20.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
134
+ "model.layers.21.input_layernorm.weight": "model-00003-of-00004.safetensors",
135
+ "model.layers.21.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
136
+ "model.layers.21.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
137
+ "model.layers.21.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
138
+ "model.layers.21.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
139
+ "model.layers.21.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
140
+ "model.layers.21.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
141
+ "model.layers.21.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
142
+ "model.layers.21.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
143
+ "model.layers.22.input_layernorm.weight": "model-00003-of-00004.safetensors",
144
+ "model.layers.22.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
145
+ "model.layers.22.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
146
+ "model.layers.22.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
147
+ "model.layers.22.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
148
+ "model.layers.22.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
149
+ "model.layers.22.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
150
+ "model.layers.22.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
151
+ "model.layers.22.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
152
+ "model.layers.23.input_layernorm.weight": "model-00003-of-00004.safetensors",
153
+ "model.layers.23.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
154
+ "model.layers.23.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
155
+ "model.layers.23.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
156
+ "model.layers.23.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
157
+ "model.layers.23.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
158
+ "model.layers.23.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
159
+ "model.layers.23.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
160
+ "model.layers.23.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
161
+ "model.layers.24.input_layernorm.weight": "model-00003-of-00004.safetensors",
162
+ "model.layers.24.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
163
+ "model.layers.24.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
164
+ "model.layers.24.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
165
+ "model.layers.24.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
166
+ "model.layers.24.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
167
+ "model.layers.24.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
168
+ "model.layers.24.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
169
+ "model.layers.24.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
170
+ "model.layers.25.input_layernorm.weight": "model-00003-of-00004.safetensors",
171
+ "model.layers.25.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
172
+ "model.layers.25.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
173
+ "model.layers.25.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
174
+ "model.layers.25.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
175
+ "model.layers.25.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
176
+ "model.layers.25.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
177
+ "model.layers.25.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
178
+ "model.layers.25.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
179
+ "model.layers.26.input_layernorm.weight": "model-00003-of-00004.safetensors",
180
+ "model.layers.26.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
181
+ "model.layers.26.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
182
+ "model.layers.26.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
183
+ "model.layers.26.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
184
+ "model.layers.26.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
185
+ "model.layers.26.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
186
+ "model.layers.26.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
187
+ "model.layers.26.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
188
+ "model.layers.27.input_layernorm.weight": "model-00003-of-00004.safetensors",
189
+ "model.layers.27.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
190
+ "model.layers.27.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
191
+ "model.layers.27.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
192
+ "model.layers.27.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
193
+ "model.layers.27.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
194
+ "model.layers.27.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
195
+ "model.layers.27.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
196
+ "model.layers.27.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
197
+ "model.layers.28.input_layernorm.weight": "model-00003-of-00004.safetensors",
198
+ "model.layers.28.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
199
+ "model.layers.28.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
200
+ "model.layers.28.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
201
+ "model.layers.28.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
202
+ "model.layers.28.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
203
+ "model.layers.28.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
204
+ "model.layers.28.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
205
+ "model.layers.28.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
206
+ "model.layers.29.input_layernorm.weight": "model-00003-of-00004.safetensors",
207
+ "model.layers.29.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
208
+ "model.layers.29.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
209
+ "model.layers.29.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
210
+ "model.layers.29.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
211
+ "model.layers.29.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
212
+ "model.layers.29.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
213
+ "model.layers.29.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
214
+ "model.layers.29.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
215
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00004.safetensors",
216
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
217
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
218
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
219
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
220
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
221
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
222
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
223
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
224
+ "model.layers.30.input_layernorm.weight": "model-00003-of-00004.safetensors",
225
+ "model.layers.30.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
226
+ "model.layers.30.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
227
+ "model.layers.30.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
228
+ "model.layers.30.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
229
+ "model.layers.30.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
230
+ "model.layers.30.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
231
+ "model.layers.30.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
232
+ "model.layers.30.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
233
+ "model.layers.31.input_layernorm.weight": "model-00004-of-00004.safetensors",
234
+ "model.layers.31.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
235
+ "model.layers.31.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
236
+ "model.layers.31.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
237
+ "model.layers.31.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
238
+ "model.layers.31.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
239
+ "model.layers.31.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
240
+ "model.layers.31.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
241
+ "model.layers.31.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
242
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00004.safetensors",
243
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
244
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
245
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
246
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
247
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
248
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
249
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
250
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
251
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00004.safetensors",
252
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
253
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
254
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
255
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
256
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
257
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
258
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
259
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
260
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00004.safetensors",
261
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
262
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
263
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
264
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
265
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
266
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
267
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
268
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
269
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00004.safetensors",
270
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
271
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
272
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
273
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
274
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
275
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
276
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
277
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
278
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00004.safetensors",
279
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
280
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
281
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
282
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
283
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
284
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
285
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
286
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
287
+ "model.layers.9.input_layernorm.weight": "model-00002-of-00004.safetensors",
288
+ "model.layers.9.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
289
+ "model.layers.9.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
290
+ "model.layers.9.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
291
+ "model.layers.9.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
292
+ "model.layers.9.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
293
+ "model.layers.9.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
294
+ "model.layers.9.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
295
+ "model.layers.9.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
296
+ "model.norm.weight": "model-00004-of-00004.safetensors"
297
+ }
298
+ }
trainer_state.json ADDED
@@ -0,0 +1,934 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.82,
3
+ "best_model_checkpoint": "/workspace/finetuning/models/sqlcoder_8b_fullft_ds_001_600_mgn10/checkpoint-500",
4
+ "epoch": 0.9166666666666666,
5
+ "eval_steps": 50,
6
+ "global_step": 550,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.008333333333333333,
13
+ "grad_norm": 8.25,
14
+ "learning_rate": 9.99830375862901e-06,
15
+ "loss": 0.6079,
16
+ "step": 5
17
+ },
18
+ {
19
+ "epoch": 0.016666666666666666,
20
+ "grad_norm": 6.0,
21
+ "learning_rate": 9.99321619703514e-06,
22
+ "loss": 0.3417,
23
+ "step": 10
24
+ },
25
+ {
26
+ "epoch": 0.025,
27
+ "grad_norm": 5.5,
28
+ "learning_rate": 9.984740801978986e-06,
29
+ "loss": 0.3039,
30
+ "step": 15
31
+ },
32
+ {
33
+ "epoch": 0.03333333333333333,
34
+ "grad_norm": 5.875,
35
+ "learning_rate": 9.972883382072955e-06,
36
+ "loss": 0.3035,
37
+ "step": 20
38
+ },
39
+ {
40
+ "epoch": 0.041666666666666664,
41
+ "grad_norm": 6.15625,
42
+ "learning_rate": 9.957652063800363e-06,
43
+ "loss": 0.2822,
44
+ "step": 25
45
+ },
46
+ {
47
+ "epoch": 0.05,
48
+ "grad_norm": 4.90625,
49
+ "learning_rate": 9.939057285945933e-06,
50
+ "loss": 0.2703,
51
+ "step": 30
52
+ },
53
+ {
54
+ "epoch": 0.058333333333333334,
55
+ "grad_norm": 5.96875,
56
+ "learning_rate": 9.917111792441576e-06,
57
+ "loss": 0.2598,
58
+ "step": 35
59
+ },
60
+ {
61
+ "epoch": 0.06666666666666667,
62
+ "grad_norm": 5.1875,
63
+ "learning_rate": 9.891830623632339e-06,
64
+ "loss": 0.267,
65
+ "step": 40
66
+ },
67
+ {
68
+ "epoch": 0.075,
69
+ "grad_norm": 5.4375,
70
+ "learning_rate": 9.8632311059685e-06,
71
+ "loss": 0.2431,
72
+ "step": 45
73
+ },
74
+ {
75
+ "epoch": 0.08333333333333333,
76
+ "grad_norm": 5.21875,
77
+ "learning_rate": 9.831332840130889e-06,
78
+ "loss": 0.2533,
79
+ "step": 50
80
+ },
81
+ {
82
+ "epoch": 0.08333333333333333,
83
+ "eval_count_mismatch_i_diff_avg": 2.25,
84
+ "eval_first_index_mismatch_avg": 8.25,
85
+ "eval_loss": 0.15123993158340454,
86
+ "eval_mean_mismatch_i_diff_avg": 8.4375,
87
+ "eval_runtime": 1.4309,
88
+ "eval_samples_per_second": 2.795,
89
+ "eval_sql_exact_match_string": 0,
90
+ "eval_steps_per_second": 0.699,
91
+ "eval_tokens_match_avg": 0.9552213461413188,
92
+ "step": 50
93
+ },
94
+ {
95
+ "epoch": 0.09166666666666666,
96
+ "grad_norm": 5.03125,
97
+ "learning_rate": 9.796157687597557e-06,
98
+ "loss": 0.261,
99
+ "step": 55
100
+ },
101
+ {
102
+ "epoch": 0.1,
103
+ "grad_norm": 5.5,
104
+ "learning_rate": 9.757729755661012e-06,
105
+ "loss": 0.2309,
106
+ "step": 60
107
+ },
108
+ {
109
+ "epoch": 0.10833333333333334,
110
+ "grad_norm": 4.6875,
111
+ "learning_rate": 9.716075380906285e-06,
112
+ "loss": 0.2418,
113
+ "step": 65
114
+ },
115
+ {
116
+ "epoch": 0.11666666666666667,
117
+ "grad_norm": 4.84375,
118
+ "learning_rate": 9.671223111161151e-06,
119
+ "loss": 0.2142,
120
+ "step": 70
121
+ },
122
+ {
123
+ "epoch": 0.125,
124
+ "grad_norm": 5.625,
125
+ "learning_rate": 9.623203685930872e-06,
126
+ "loss": 0.23,
127
+ "step": 75
128
+ },
129
+ {
130
+ "epoch": 0.13333333333333333,
131
+ "grad_norm": 4.125,
132
+ "learning_rate": 9.572050015330875e-06,
133
+ "loss": 0.2205,
134
+ "step": 80
135
+ },
136
+ {
137
+ "epoch": 0.14166666666666666,
138
+ "grad_norm": 5.28125,
139
+ "learning_rate": 9.517797157531813e-06,
140
+ "loss": 0.2112,
141
+ "step": 85
142
+ },
143
+ {
144
+ "epoch": 0.15,
145
+ "grad_norm": 5.09375,
146
+ "learning_rate": 9.460482294732423e-06,
147
+ "loss": 0.206,
148
+ "step": 90
149
+ },
150
+ {
151
+ "epoch": 0.15833333333333333,
152
+ "grad_norm": 4.53125,
153
+ "learning_rate": 9.40014470767673e-06,
154
+ "loss": 0.2323,
155
+ "step": 95
156
+ },
157
+ {
158
+ "epoch": 0.16666666666666666,
159
+ "grad_norm": 5.25,
160
+ "learning_rate": 9.336825748732973e-06,
161
+ "loss": 0.2046,
162
+ "step": 100
163
+ },
164
+ {
165
+ "epoch": 0.16666666666666666,
166
+ "eval_count_mismatch_i_diff_avg": 1.5,
167
+ "eval_first_index_mismatch_avg": 19.25,
168
+ "eval_loss": 0.12254643440246582,
169
+ "eval_mean_mismatch_i_diff_avg": 19.4375,
170
+ "eval_runtime": 1.4476,
171
+ "eval_samples_per_second": 2.763,
172
+ "eval_sql_exact_match_string": 1,
173
+ "eval_steps_per_second": 0.691,
174
+ "eval_tokens_match_avg": 0.9684337464860721,
175
+ "step": 100
176
+ },
177
+ {
178
+ "epoch": 0.175,
179
+ "grad_norm": 4.5625,
180
+ "learning_rate": 9.270568813552757e-06,
181
+ "loss": 0.209,
182
+ "step": 105
183
+ },
184
+ {
185
+ "epoch": 0.18333333333333332,
186
+ "grad_norm": 5.5,
187
+ "learning_rate": 9.201419311329848e-06,
188
+ "loss": 0.2209,
189
+ "step": 110
190
+ },
191
+ {
192
+ "epoch": 0.19166666666666668,
193
+ "grad_norm": 5.25,
194
+ "learning_rate": 9.129424633678977e-06,
195
+ "loss": 0.1959,
196
+ "step": 115
197
+ },
198
+ {
199
+ "epoch": 0.2,
200
+ "grad_norm": 6.15625,
201
+ "learning_rate": 9.054634122155991e-06,
202
+ "loss": 0.19,
203
+ "step": 120
204
+ },
205
+ {
206
+ "epoch": 0.20833333333333334,
207
+ "grad_norm": 4.4375,
208
+ "learning_rate": 8.977099034441616e-06,
209
+ "loss": 0.2103,
210
+ "step": 125
211
+ },
212
+ {
213
+ "epoch": 0.21666666666666667,
214
+ "grad_norm": 6.03125,
215
+ "learning_rate": 8.896872509212006e-06,
216
+ "loss": 0.2035,
217
+ "step": 130
218
+ },
219
+ {
220
+ "epoch": 0.225,
221
+ "grad_norm": 5.53125,
222
+ "learning_rate": 8.814009529720153e-06,
223
+ "loss": 0.1997,
224
+ "step": 135
225
+ },
226
+ {
227
+ "epoch": 0.23333333333333334,
228
+ "grad_norm": 5.03125,
229
+ "learning_rate": 8.728566886113101e-06,
230
+ "loss": 0.198,
231
+ "step": 140
232
+ },
233
+ {
234
+ "epoch": 0.24166666666666667,
235
+ "grad_norm": 5.1875,
236
+ "learning_rate": 8.640603136510823e-06,
237
+ "loss": 0.184,
238
+ "step": 145
239
+ },
240
+ {
241
+ "epoch": 0.25,
242
+ "grad_norm": 5.34375,
243
+ "learning_rate": 8.55017856687341e-06,
244
+ "loss": 0.1826,
245
+ "step": 150
246
+ },
247
+ {
248
+ "epoch": 0.25,
249
+ "eval_count_mismatch_i_diff_avg": 1.75,
250
+ "eval_first_index_mismatch_avg": 9.25,
251
+ "eval_loss": 0.11601436883211136,
252
+ "eval_mean_mismatch_i_diff_avg": 9.583333333333334,
253
+ "eval_runtime": 1.4309,
254
+ "eval_samples_per_second": 2.795,
255
+ "eval_sql_exact_match_string": 0,
256
+ "eval_steps_per_second": 0.699,
257
+ "eval_tokens_match_avg": 0.9654995853439766,
258
+ "step": 150
259
+ },
260
+ {
261
+ "epoch": 0.25833333333333336,
262
+ "grad_norm": 4.53125,
263
+ "learning_rate": 8.45735514968408e-06,
264
+ "loss": 0.1722,
265
+ "step": 155
266
+ },
267
+ {
268
+ "epoch": 0.26666666666666666,
269
+ "grad_norm": 5.4375,
270
+ "learning_rate": 8.362196501476348e-06,
271
+ "loss": 0.1735,
272
+ "step": 160
273
+ },
274
+ {
275
+ "epoch": 0.275,
276
+ "grad_norm": 5.15625,
277
+ "learning_rate": 8.26476783923441e-06,
278
+ "loss": 0.178,
279
+ "step": 165
280
+ },
281
+ {
282
+ "epoch": 0.2833333333333333,
283
+ "grad_norm": 5.46875,
284
+ "learning_rate": 8.165135935696693e-06,
285
+ "loss": 0.1929,
286
+ "step": 170
287
+ },
288
+ {
289
+ "epoch": 0.2916666666666667,
290
+ "grad_norm": 4.3125,
291
+ "learning_rate": 8.063369073593167e-06,
292
+ "loss": 0.1815,
293
+ "step": 175
294
+ },
295
+ {
296
+ "epoch": 0.3,
297
+ "grad_norm": 5.21875,
298
+ "learning_rate": 7.959536998847742e-06,
299
+ "loss": 0.195,
300
+ "step": 180
301
+ },
302
+ {
303
+ "epoch": 0.30833333333333335,
304
+ "grad_norm": 4.5625,
305
+ "learning_rate": 7.853710872777922e-06,
306
+ "loss": 0.1765,
307
+ "step": 185
308
+ },
309
+ {
310
+ "epoch": 0.31666666666666665,
311
+ "grad_norm": 4.84375,
312
+ "learning_rate": 7.745963223324384e-06,
313
+ "loss": 0.1772,
314
+ "step": 190
315
+ },
316
+ {
317
+ "epoch": 0.325,
318
+ "grad_norm": 4.75,
319
+ "learning_rate": 7.636367895343949e-06,
320
+ "loss": 0.1641,
321
+ "step": 195
322
+ },
323
+ {
324
+ "epoch": 0.3333333333333333,
325
+ "grad_norm": 4.8125,
326
+ "learning_rate": 7.525000000000003e-06,
327
+ "loss": 0.1889,
328
+ "step": 200
329
+ },
330
+ {
331
+ "epoch": 0.3333333333333333,
332
+ "eval_count_mismatch_i_diff_avg": 1.5,
333
+ "eval_first_index_mismatch_avg": 9.25,
334
+ "eval_loss": 0.10480775684118271,
335
+ "eval_mean_mismatch_i_diff_avg": 9.083333333333334,
336
+ "eval_runtime": 1.4264,
337
+ "eval_samples_per_second": 2.804,
338
+ "eval_sql_exact_match_string": 0,
339
+ "eval_steps_per_second": 0.701,
340
+ "eval_tokens_match_avg": 0.9693457391901305,
341
+ "step": 200
342
+ },
343
+ {
344
+ "epoch": 0.3416666666666667,
345
+ "grad_norm": 4.9375,
346
+ "learning_rate": 7.411935863285065e-06,
347
+ "loss": 0.1786,
348
+ "step": 205
349
+ },
350
+ {
351
+ "epoch": 0.35,
352
+ "grad_norm": 4.1875,
353
+ "learning_rate": 7.297252973710758e-06,
354
+ "loss": 0.1706,
355
+ "step": 210
356
+ },
357
+ {
358
+ "epoch": 0.35833333333333334,
359
+ "grad_norm": 4.75,
360
+ "learning_rate": 7.181029929201061e-06,
361
+ "loss": 0.1916,
362
+ "step": 215
363
+ },
364
+ {
365
+ "epoch": 0.36666666666666664,
366
+ "grad_norm": 5.53125,
367
+ "learning_rate": 7.063346383225212e-06,
368
+ "loss": 0.1701,
369
+ "step": 220
370
+ },
371
+ {
372
+ "epoch": 0.375,
373
+ "grad_norm": 4.75,
374
+ "learning_rate": 6.944282990207196e-06,
375
+ "loss": 0.1628,
376
+ "step": 225
377
+ },
378
+ {
379
+ "epoch": 0.38333333333333336,
380
+ "grad_norm": 5.3125,
381
+ "learning_rate": 6.823921350249237e-06,
382
+ "loss": 0.1801,
383
+ "step": 230
384
+ },
385
+ {
386
+ "epoch": 0.39166666666666666,
387
+ "grad_norm": 4.6875,
388
+ "learning_rate": 6.702343953207167e-06,
389
+ "loss": 0.1492,
390
+ "step": 235
391
+ },
392
+ {
393
+ "epoch": 0.4,
394
+ "grad_norm": 4.625,
395
+ "learning_rate": 6.579634122155992e-06,
396
+ "loss": 0.1608,
397
+ "step": 240
398
+ },
399
+ {
400
+ "epoch": 0.4083333333333333,
401
+ "grad_norm": 4.59375,
402
+ "learning_rate": 6.455875956284421e-06,
403
+ "loss": 0.1515,
404
+ "step": 245
405
+ },
406
+ {
407
+ "epoch": 0.4166666666666667,
408
+ "grad_norm": 4.59375,
409
+ "learning_rate": 6.331154273257482e-06,
410
+ "loss": 0.1597,
411
+ "step": 250
412
+ },
413
+ {
414
+ "epoch": 0.4166666666666667,
415
+ "eval_count_mismatch_i_diff_avg": 1.75,
416
+ "eval_first_index_mismatch_avg": 21.0,
417
+ "eval_loss": 0.11433370411396027,
418
+ "eval_mean_mismatch_i_diff_avg": 19.583333333333336,
419
+ "eval_runtime": 1.4735,
420
+ "eval_samples_per_second": 2.715,
421
+ "eval_sql_exact_match_string": 1,
422
+ "eval_steps_per_second": 0.679,
423
+ "eval_tokens_match_avg": 0.9659372604140046,
424
+ "step": 250
425
+ },
426
+ {
427
+ "epoch": 0.425,
428
+ "grad_norm": 3.890625,
429
+ "learning_rate": 6.205554551086736e-06,
430
+ "loss": 0.1339,
431
+ "step": 255
432
+ },
433
+ {
434
+ "epoch": 0.43333333333333335,
435
+ "grad_norm": 5.25,
436
+ "learning_rate": 6.079162869547913e-06,
437
+ "loss": 0.17,
438
+ "step": 260
439
+ },
440
+ {
441
+ "epoch": 0.44166666666666665,
442
+ "grad_norm": 5.28125,
443
+ "learning_rate": 5.952065851186132e-06,
444
+ "loss": 0.1598,
445
+ "step": 265
446
+ },
447
+ {
448
+ "epoch": 0.45,
449
+ "grad_norm": 4.5,
450
+ "learning_rate": 5.824350601949145e-06,
451
+ "loss": 0.1545,
452
+ "step": 270
453
+ },
454
+ {
455
+ "epoch": 0.4583333333333333,
456
+ "grad_norm": 6.71875,
457
+ "learning_rate": 5.696104651489257e-06,
458
+ "loss": 0.1552,
459
+ "step": 275
460
+ },
461
+ {
462
+ "epoch": 0.4666666666666667,
463
+ "grad_norm": 4.875,
464
+ "learning_rate": 5.567415893174889e-06,
465
+ "loss": 0.1642,
466
+ "step": 280
467
+ },
468
+ {
469
+ "epoch": 0.475,
470
+ "grad_norm": 4.25,
471
+ "learning_rate": 5.438372523852835e-06,
472
+ "loss": 0.1581,
473
+ "step": 285
474
+ },
475
+ {
476
+ "epoch": 0.48333333333333334,
477
+ "grad_norm": 5.03125,
478
+ "learning_rate": 5.309062983402575e-06,
479
+ "loss": 0.1632,
480
+ "step": 290
481
+ },
482
+ {
483
+ "epoch": 0.49166666666666664,
484
+ "grad_norm": 4.59375,
485
+ "learning_rate": 5.179575894123973e-06,
486
+ "loss": 0.1493,
487
+ "step": 295
488
+ },
489
+ {
490
+ "epoch": 0.5,
491
+ "grad_norm": 5.125,
492
+ "learning_rate": 5.050000000000001e-06,
493
+ "loss": 0.149,
494
+ "step": 300
495
+ },
496
+ {
497
+ "epoch": 0.5,
498
+ "eval_count_mismatch_i_diff_avg": 2.0,
499
+ "eval_first_index_mismatch_avg": 20.0,
500
+ "eval_loss": 0.11815375089645386,
501
+ "eval_mean_mismatch_i_diff_avg": 18.9375,
502
+ "eval_runtime": 1.3387,
503
+ "eval_samples_per_second": 2.988,
504
+ "eval_sql_exact_match_string": 1,
505
+ "eval_steps_per_second": 0.747,
506
+ "eval_tokens_match_avg": 0.9601233069256325,
507
+ "step": 300
508
+ },
509
+ {
510
+ "epoch": 0.5083333333333333,
511
+ "grad_norm": 4.21875,
512
+ "learning_rate": 4.9204241058760295e-06,
513
+ "loss": 0.155,
514
+ "step": 305
515
+ },
516
+ {
517
+ "epoch": 0.5166666666666667,
518
+ "grad_norm": 4.28125,
519
+ "learning_rate": 4.79093701659743e-06,
520
+ "loss": 0.1646,
521
+ "step": 310
522
+ },
523
+ {
524
+ "epoch": 0.525,
525
+ "grad_norm": 4.65625,
526
+ "learning_rate": 4.661627476147171e-06,
527
+ "loss": 0.1413,
528
+ "step": 315
529
+ },
530
+ {
531
+ "epoch": 0.5333333333333333,
532
+ "grad_norm": 5.03125,
533
+ "learning_rate": 4.5325841068251195e-06,
534
+ "loss": 0.1476,
535
+ "step": 320
536
+ },
537
+ {
538
+ "epoch": 0.5416666666666666,
539
+ "grad_norm": 4.75,
540
+ "learning_rate": 4.403895348510749e-06,
541
+ "loss": 0.147,
542
+ "step": 325
543
+ },
544
+ {
545
+ "epoch": 0.55,
546
+ "grad_norm": 4.875,
547
+ "learning_rate": 4.27564939805086e-06,
548
+ "loss": 0.1412,
549
+ "step": 330
550
+ },
551
+ {
552
+ "epoch": 0.5583333333333333,
553
+ "grad_norm": 4.65625,
554
+ "learning_rate": 4.1479341488138745e-06,
555
+ "loss": 0.1325,
556
+ "step": 335
557
+ },
558
+ {
559
+ "epoch": 0.5666666666666667,
560
+ "grad_norm": 4.9375,
561
+ "learning_rate": 4.020837130452094e-06,
562
+ "loss": 0.1462,
563
+ "step": 340
564
+ },
565
+ {
566
+ "epoch": 0.575,
567
+ "grad_norm": 5.28125,
568
+ "learning_rate": 3.894445448913271e-06,
569
+ "loss": 0.1454,
570
+ "step": 345
571
+ },
572
+ {
573
+ "epoch": 0.5833333333333334,
574
+ "grad_norm": 4.59375,
575
+ "learning_rate": 3.7688457267425276e-06,
576
+ "loss": 0.1476,
577
+ "step": 350
578
+ },
579
+ {
580
+ "epoch": 0.5833333333333334,
581
+ "eval_count_mismatch_i_diff_avg": 2.25,
582
+ "eval_first_index_mismatch_avg": 8.25,
583
+ "eval_loss": 0.11698409914970398,
584
+ "eval_mean_mismatch_i_diff_avg": 10.4375,
585
+ "eval_runtime": 1.2608,
586
+ "eval_samples_per_second": 3.173,
587
+ "eval_sql_exact_match_string": 0,
588
+ "eval_steps_per_second": 0.793,
589
+ "eval_tokens_match_avg": 0.9547836710712908,
590
+ "step": 350
591
+ },
592
+ {
593
+ "epoch": 0.5916666666666667,
594
+ "grad_norm": 4.875,
595
+ "learning_rate": 3.6441240437155883e-06,
596
+ "loss": 0.1504,
597
+ "step": 355
598
+ },
599
+ {
600
+ "epoch": 0.6,
601
+ "grad_norm": 4.84375,
602
+ "learning_rate": 3.5203658778440145e-06,
603
+ "loss": 0.1616,
604
+ "step": 360
605
+ },
606
+ {
607
+ "epoch": 0.6083333333333333,
608
+ "grad_norm": 4.40625,
609
+ "learning_rate": 3.397656046792837e-06,
610
+ "loss": 0.16,
611
+ "step": 365
612
+ },
613
+ {
614
+ "epoch": 0.6166666666666667,
615
+ "grad_norm": 4.4375,
616
+ "learning_rate": 3.2760786497507687e-06,
617
+ "loss": 0.1393,
618
+ "step": 370
619
+ },
620
+ {
621
+ "epoch": 0.625,
622
+ "grad_norm": 5.0,
623
+ "learning_rate": 3.155717009792809e-06,
624
+ "loss": 0.1526,
625
+ "step": 375
626
+ },
627
+ {
628
+ "epoch": 0.6333333333333333,
629
+ "grad_norm": 6.15625,
630
+ "learning_rate": 3.036653616774792e-06,
631
+ "loss": 0.1619,
632
+ "step": 380
633
+ },
634
+ {
635
+ "epoch": 0.6416666666666667,
636
+ "grad_norm": 4.75,
637
+ "learning_rate": 2.9189700707989414e-06,
638
+ "loss": 0.1428,
639
+ "step": 385
640
+ },
641
+ {
642
+ "epoch": 0.65,
643
+ "grad_norm": 5.46875,
644
+ "learning_rate": 2.8027470262892473e-06,
645
+ "loss": 0.1377,
646
+ "step": 390
647
+ },
648
+ {
649
+ "epoch": 0.6583333333333333,
650
+ "grad_norm": 4.3125,
651
+ "learning_rate": 2.688064136714942e-06,
652
+ "loss": 0.1382,
653
+ "step": 395
654
+ },
655
+ {
656
+ "epoch": 0.6666666666666666,
657
+ "grad_norm": 5.09375,
658
+ "learning_rate": 2.575000000000003e-06,
659
+ "loss": 0.1465,
660
+ "step": 400
661
+ },
662
+ {
663
+ "epoch": 0.6666666666666666,
664
+ "eval_count_mismatch_i_diff_avg": 2.25,
665
+ "eval_first_index_mismatch_avg": 10.75,
666
+ "eval_loss": 0.11492403596639633,
667
+ "eval_mean_mismatch_i_diff_avg": 9.6875,
668
+ "eval_runtime": 1.4356,
669
+ "eval_samples_per_second": 2.786,
670
+ "eval_sql_exact_match_string": 0,
671
+ "eval_steps_per_second": 0.697,
672
+ "eval_tokens_match_avg": 0.9552213461413188,
673
+ "step": 400
674
+ },
675
+ {
676
+ "epoch": 0.675,
677
+ "grad_norm": 4.90625,
678
+ "learning_rate": 2.4636321046560575e-06,
679
+ "loss": 0.146,
680
+ "step": 405
681
+ },
682
+ {
683
+ "epoch": 0.6833333333333333,
684
+ "grad_norm": 4.78125,
685
+ "learning_rate": 2.3540367766756217e-06,
686
+ "loss": 0.1496,
687
+ "step": 410
688
+ },
689
+ {
690
+ "epoch": 0.6916666666666667,
691
+ "grad_norm": 4.75,
692
+ "learning_rate": 2.2462891272220828e-06,
693
+ "loss": 0.1534,
694
+ "step": 415
695
+ },
696
+ {
697
+ "epoch": 0.7,
698
+ "grad_norm": 5.4375,
699
+ "learning_rate": 2.140463001152263e-06,
700
+ "loss": 0.139,
701
+ "step": 420
702
+ },
703
+ {
704
+ "epoch": 0.7083333333333334,
705
+ "grad_norm": 4.71875,
706
+ "learning_rate": 2.0366309264068387e-06,
707
+ "loss": 0.1495,
708
+ "step": 425
709
+ },
710
+ {
711
+ "epoch": 0.7166666666666667,
712
+ "grad_norm": 5.15625,
713
+ "learning_rate": 1.9348640643033098e-06,
714
+ "loss": 0.1434,
715
+ "step": 430
716
+ },
717
+ {
718
+ "epoch": 0.725,
719
+ "grad_norm": 4.4375,
720
+ "learning_rate": 1.8352321607655958e-06,
721
+ "loss": 0.1494,
722
+ "step": 435
723
+ },
724
+ {
725
+ "epoch": 0.7333333333333333,
726
+ "grad_norm": 4.96875,
727
+ "learning_rate": 1.7378034985236564e-06,
728
+ "loss": 0.155,
729
+ "step": 440
730
+ },
731
+ {
732
+ "epoch": 0.7416666666666667,
733
+ "grad_norm": 4.6875,
734
+ "learning_rate": 1.6426448503159242e-06,
735
+ "loss": 0.1418,
736
+ "step": 445
737
+ },
738
+ {
739
+ "epoch": 0.75,
740
+ "grad_norm": 5.0,
741
+ "learning_rate": 1.5498214331265955e-06,
742
+ "loss": 0.1542,
743
+ "step": 450
744
+ },
745
+ {
746
+ "epoch": 0.75,
747
+ "eval_count_mismatch_i_diff_avg": 2.0,
748
+ "eval_first_index_mismatch_avg": 10.75,
749
+ "eval_loss": 0.11669360101222992,
750
+ "eval_mean_mismatch_i_diff_avg": 9.6875,
751
+ "eval_runtime": 1.4248,
752
+ "eval_samples_per_second": 2.807,
753
+ "eval_sql_exact_match_string": 0,
754
+ "eval_steps_per_second": 0.702,
755
+ "eval_tokens_match_avg": 0.9590674999874726,
756
+ "step": 450
757
+ },
758
+ {
759
+ "epoch": 0.7583333333333333,
760
+ "grad_norm": 5.0,
761
+ "learning_rate": 1.4593968634891815e-06,
762
+ "loss": 0.1502,
763
+ "step": 455
764
+ },
765
+ {
766
+ "epoch": 0.7666666666666667,
767
+ "grad_norm": 5.96875,
768
+ "learning_rate": 1.371433113886903e-06,
769
+ "loss": 0.1481,
770
+ "step": 460
771
+ },
772
+ {
773
+ "epoch": 0.775,
774
+ "grad_norm": 4.75,
775
+ "learning_rate": 1.285990470279852e-06,
776
+ "loss": 0.1467,
777
+ "step": 465
778
+ },
779
+ {
780
+ "epoch": 0.7833333333333333,
781
+ "grad_norm": 4.5,
782
+ "learning_rate": 1.2031274907879994e-06,
783
+ "loss": 0.1242,
784
+ "step": 470
785
+ },
786
+ {
787
+ "epoch": 0.7916666666666666,
788
+ "grad_norm": 3.75,
789
+ "learning_rate": 1.122900965558391e-06,
790
+ "loss": 0.14,
791
+ "step": 475
792
+ },
793
+ {
794
+ "epoch": 0.8,
795
+ "grad_norm": 6.34375,
796
+ "learning_rate": 1.0453658778440162e-06,
797
+ "loss": 0.1634,
798
+ "step": 480
799
+ },
800
+ {
801
+ "epoch": 0.8083333333333333,
802
+ "grad_norm": 4.125,
803
+ "learning_rate": 9.705753663210283e-07,
804
+ "loss": 0.139,
805
+ "step": 485
806
+ },
807
+ {
808
+ "epoch": 0.8166666666666667,
809
+ "grad_norm": 5.15625,
810
+ "learning_rate": 8.98580688670156e-07,
811
+ "loss": 0.1546,
812
+ "step": 490
813
+ },
814
+ {
815
+ "epoch": 0.825,
816
+ "grad_norm": 5.46875,
817
+ "learning_rate": 8.294311864472477e-07,
818
+ "loss": 0.1471,
819
+ "step": 495
820
+ },
821
+ {
822
+ "epoch": 0.8333333333333334,
823
+ "grad_norm": 5.09375,
824
+ "learning_rate": 7.631742512670332e-07,
825
+ "loss": 0.1442,
826
+ "step": 500
827
+ },
828
+ {
829
+ "epoch": 0.8333333333333334,
830
+ "eval_count_mismatch_i_diff_avg": 2.25,
831
+ "eval_first_index_mismatch_avg": 10.75,
832
+ "eval_loss": 0.11715836822986603,
833
+ "eval_mean_mismatch_i_diff_avg": 9.6875,
834
+ "eval_runtime": 1.4332,
835
+ "eval_samples_per_second": 2.791,
836
+ "eval_sql_exact_match_string": 0,
837
+ "eval_steps_per_second": 0.698,
838
+ "eval_tokens_match_avg": 0.9552213461413188,
839
+ "step": 500
840
+ },
841
+ {
842
+ "epoch": 0.8416666666666667,
843
+ "grad_norm": 4.78125,
844
+ "learning_rate": 6.998552923232753e-07,
845
+ "loss": 0.1397,
846
+ "step": 505
847
+ },
848
+ {
849
+ "epoch": 0.85,
850
+ "grad_norm": 4.75,
851
+ "learning_rate": 6.395177052675824e-07,
852
+ "loss": 0.1494,
853
+ "step": 510
854
+ },
855
+ {
856
+ "epoch": 0.8583333333333333,
857
+ "grad_norm": 4.375,
858
+ "learning_rate": 5.822028424681925e-07,
859
+ "loss": 0.1493,
860
+ "step": 515
861
+ },
862
+ {
863
+ "epoch": 0.8666666666666667,
864
+ "grad_norm": 5.125,
865
+ "learning_rate": 5.279499846691286e-07,
866
+ "loss": 0.132,
867
+ "step": 520
868
+ },
869
+ {
870
+ "epoch": 0.875,
871
+ "grad_norm": 5.3125,
872
+ "learning_rate": 4.767963140691326e-07,
873
+ "loss": 0.1523,
874
+ "step": 525
875
+ },
876
+ {
877
+ "epoch": 0.8833333333333333,
878
+ "grad_norm": 4.375,
879
+ "learning_rate": 4.287768888388531e-07,
880
+ "loss": 0.1229,
881
+ "step": 530
882
+ },
883
+ {
884
+ "epoch": 0.8916666666666667,
885
+ "grad_norm": 4.59375,
886
+ "learning_rate": 3.839246190937183e-07,
887
+ "loss": 0.1671,
888
+ "step": 535
889
+ },
890
+ {
891
+ "epoch": 0.9,
892
+ "grad_norm": 6.25,
893
+ "learning_rate": 3.4227024433899137e-07,
894
+ "loss": 0.1304,
895
+ "step": 540
896
+ },
897
+ {
898
+ "epoch": 0.9083333333333333,
899
+ "grad_norm": 5.34375,
900
+ "learning_rate": 3.0384231240244555e-07,
901
+ "loss": 0.1528,
902
+ "step": 545
903
+ },
904
+ {
905
+ "epoch": 0.9166666666666666,
906
+ "grad_norm": 4.125,
907
+ "learning_rate": 2.686671598691128e-07,
908
+ "loss": 0.1556,
909
+ "step": 550
910
+ },
911
+ {
912
+ "epoch": 0.9166666666666666,
913
+ "eval_count_mismatch_i_diff_avg": 2.25,
914
+ "eval_first_index_mismatch_avg": 10.75,
915
+ "eval_loss": 0.11767315864562988,
916
+ "eval_mean_mismatch_i_diff_avg": 9.6875,
917
+ "eval_runtime": 1.4395,
918
+ "eval_samples_per_second": 2.779,
919
+ "eval_sql_exact_match_string": 0,
920
+ "eval_steps_per_second": 0.695,
921
+ "eval_tokens_match_avg": 0.9552213461413188,
922
+ "step": 550
923
+ }
924
+ ],
925
+ "logging_steps": 5,
926
+ "max_steps": 600,
927
+ "num_input_tokens_seen": 0,
928
+ "num_train_epochs": 1,
929
+ "save_steps": 50,
930
+ "total_flos": 5.255573265461084e+17,
931
+ "train_batch_size": 2,
932
+ "trial_name": null,
933
+ "trial_params": null
934
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a5b830ad96e6ef339827c8f2f199064d80617e6d5143ed38868d24ee5802a509
3
+ size 5112