diff --git a/.gitattributes b/.gitattributes
index a6344aac8c09253b3b630fb776ae94478aa0275b..6a7421c0f2984115eba1131cd54efddf75e131ee 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -33,3 +33,9 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
*.zip filter=lfs diff=lfs merge=lfs -text
*.zst filter=lfs diff=lfs merge=lfs -text
*tfevents* filter=lfs diff=lfs merge=lfs -text
+checkpoint-1000/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+checkpoint-1500/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+checkpoint-2000/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+checkpoint-2500/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+checkpoint-2997/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+checkpoint-500/tokenizer.json filter=lfs diff=lfs merge=lfs -text
diff --git a/checkpoint-1000/config.json b/checkpoint-1000/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..06ec1df58f28234ccce22a5325e108ece94f0078
--- /dev/null
+++ b/checkpoint-1000/config.json
@@ -0,0 +1,34 @@
+{
+ "_name_or_path": "facebook/nllb-200-3.3B",
+ "activation_dropout": 0.0,
+ "activation_function": "relu",
+ "architectures": [
+ "M2M100ForConditionalGeneration"
+ ],
+ "attention_dropout": 0.1,
+ "bos_token_id": 0,
+ "d_model": 2048,
+ "decoder_attention_heads": 16,
+ "decoder_ffn_dim": 8192,
+ "decoder_layerdrop": 0,
+ "decoder_layers": 24,
+ "decoder_start_token_id": 2,
+ "dropout": 0.1,
+ "encoder_attention_heads": 16,
+ "encoder_ffn_dim": 8192,
+ "encoder_layerdrop": 0,
+ "encoder_layers": 24,
+ "eos_token_id": 2,
+ "init_std": 0.02,
+ "is_encoder_decoder": true,
+ "max_length": 200,
+ "max_position_embeddings": 1024,
+ "model_type": "m2m_100",
+ "num_hidden_layers": 24,
+ "pad_token_id": 1,
+ "scale_embedding": true,
+ "torch_dtype": "float32",
+ "transformers_version": "4.43.2",
+ "use_cache": true,
+ "vocab_size": 256206
+}
diff --git a/checkpoint-1000/generation_config.json b/checkpoint-1000/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..402a1a43d1af8c080466b8139184b4e5b7f3f47c
--- /dev/null
+++ b/checkpoint-1000/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 0,
+ "decoder_start_token_id": 2,
+ "eos_token_id": 2,
+ "max_length": 200,
+ "pad_token_id": 1,
+ "transformers_version": "4.43.2"
+}
diff --git a/checkpoint-1000/model-00001-of-00003.safetensors b/checkpoint-1000/model-00001-of-00003.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..13aa5b878c7c2d7a1a2671b1e1df782d6c5f2b10
--- /dev/null
+++ b/checkpoint-1000/model-00001-of-00003.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d1ade5256c1994ceaea57b36487da445ff5fa3bd970757a711a206a17f987862
+size 4986088344
diff --git a/checkpoint-1000/model-00002-of-00003.safetensors b/checkpoint-1000/model-00002-of-00003.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..eb10e3b03932e2c95757c70ca5e92d21f1abde78
--- /dev/null
+++ b/checkpoint-1000/model-00002-of-00003.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cdb1e5e8869c7c036006dfd2113a779bea5437d10e79f89b5a6d55131d7837c7
+size 4985688360
diff --git a/checkpoint-1000/model-00003-of-00003.safetensors b/checkpoint-1000/model-00003-of-00003.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..30822d8db461664831b7c7b954de2c490305efa4
--- /dev/null
+++ b/checkpoint-1000/model-00003-of-00003.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7054eff5c598fbbfc10deda92eb600ff34fa89c1f5e7fbd0cefbd5fde9f96d7e
+size 3407796744
diff --git a/checkpoint-1000/model.safetensors.index.json b/checkpoint-1000/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..66f5db79b23230955de24502c00adc6525edbdfc
--- /dev/null
+++ b/checkpoint-1000/model.safetensors.index.json
@@ -0,0 +1,1020 @@
+{
+ "metadata": {
+ "total_size": 13379452928
+ },
+ "weight_map": {
+ "model.decoder.layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.0.encoder_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.encoder_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.encoder_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.encoder_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.encoder_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.encoder_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.encoder_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.encoder_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.encoder_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.encoder_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.encoder_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.encoder_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.encoder_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.encoder_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.encoder_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.encoder_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.encoder_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.encoder_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.encoder_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.encoder_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.encoder_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.encoder_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.encoder_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.encoder_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.encoder_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.encoder_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.encoder_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.encoder_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.encoder_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.encoder_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.11.encoder_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.11.encoder_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.11.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.11.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.11.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.11.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.11.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.11.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.11.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.11.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.11.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.11.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.12.encoder_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.encoder_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.self_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.self_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.self_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.self_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.encoder_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.encoder_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.self_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.self_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.self_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.self_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.encoder_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.encoder_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.self_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.self_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.self_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.self_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.encoder_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.encoder_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.self_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.self_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.self_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.self_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.encoder_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.encoder_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.self_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.self_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.self_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.self_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.encoder_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.encoder_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.self_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.self_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.self_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.self_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.encoder_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.encoder_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.self_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.self_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.self_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.self_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.encoder_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.encoder_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.self_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.self_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.self_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.self_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.2.encoder_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.encoder_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.encoder_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.encoder_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.encoder_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.encoder_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.encoder_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.encoder_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.encoder_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.encoder_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.20.encoder_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.encoder_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.self_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.self_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.self_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.self_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.encoder_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.encoder_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.self_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.self_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.self_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.self_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.encoder_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.encoder_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.self_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.self_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.self_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.self_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.encoder_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.encoder_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.self_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.self_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.self_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.self_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.3.encoder_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.encoder_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.encoder_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.encoder_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.encoder_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.encoder_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.encoder_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.encoder_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.encoder_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.encoder_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.encoder_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.encoder_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.encoder_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.encoder_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.encoder_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.encoder_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.encoder_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.encoder_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.encoder_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.encoder_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.encoder_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.encoder_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.encoder_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.encoder_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.encoder_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.encoder_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.encoder_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.encoder_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.encoder_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.encoder_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.encoder_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.encoder_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.encoder_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.encoder_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.encoder_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.encoder_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.encoder_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.encoder_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.encoder_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.encoder_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.encoder_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.encoder_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.encoder_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.encoder_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.encoder_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.encoder_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.encoder_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.encoder_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.encoder_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.encoder_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.encoder_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.encoder_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.encoder_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.encoder_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.encoder_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.encoder_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.encoder_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.encoder_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.encoder_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.encoder_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.encoder_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.encoder_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.encoder_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.encoder_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.encoder_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.encoder_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.encoder_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.encoder_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.encoder_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.encoder_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.0.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.14.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.14.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.14.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.14.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.14.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.14.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.14.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.14.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.14.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.14.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.14.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.14.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.14.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.14.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.14.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.14.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.15.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.2.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.20.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.3.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.shared.weight": "model-00001-of-00003.safetensors"
+ }
+}
diff --git a/checkpoint-1000/optimizer.pt b/checkpoint-1000/optimizer.pt
new file mode 100644
index 0000000000000000000000000000000000000000..bed847b55cdc82d5b64f4b14e84f533eb639d844
--- /dev/null
+++ b/checkpoint-1000/optimizer.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2d2238db343edcba72b8d16e24a2cbb9b4319dabd324b5461d1bcb717ff5ce8a
+size 16695613
diff --git a/checkpoint-1000/rng_state.pth b/checkpoint-1000/rng_state.pth
new file mode 100644
index 0000000000000000000000000000000000000000..45a039a7286e17c1443f8fdfb5e920eb8580fb14
--- /dev/null
+++ b/checkpoint-1000/rng_state.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f998b81a7c9ec9576c1eb3934948ddc2e93559dcb8816f2c74465b567d39eeae
+size 14244
diff --git a/checkpoint-1000/scheduler.pt b/checkpoint-1000/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..9821fb45b0a1df60214edcf1899282bb8e5baf49
--- /dev/null
+++ b/checkpoint-1000/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:570e13a31cf6d8596cc77b49b9cd5f0dba53d08fce525ea305a66da574a18d47
+size 1064
diff --git a/checkpoint-1000/sentencepiece.bpe.model b/checkpoint-1000/sentencepiece.bpe.model
new file mode 100644
index 0000000000000000000000000000000000000000..dc2262d3e1d375b235eb71c24119c8e73f85d4ad
--- /dev/null
+++ b/checkpoint-1000/sentencepiece.bpe.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:14bb8dfb35c0ffdea7bc01e56cea38b9e3d5efcdcb9c251d6b40538e1aab555a
+size 4852054
diff --git a/checkpoint-1000/special_tokens_map.json b/checkpoint-1000/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..770c6f4e25faf27bbc3878b806f2ecfb88c5169e
--- /dev/null
+++ b/checkpoint-1000/special_tokens_map.json
@@ -0,0 +1,255 @@
+{
+ "additional_special_tokens": [
+ "ace_Arab",
+ "ace_Latn",
+ "acm_Arab",
+ "acq_Arab",
+ "aeb_Arab",
+ "afr_Latn",
+ "ajp_Arab",
+ "aka_Latn",
+ "amh_Ethi",
+ "apc_Arab",
+ "arb_Arab",
+ "ars_Arab",
+ "ary_Arab",
+ "arz_Arab",
+ "asm_Beng",
+ "ast_Latn",
+ "awa_Deva",
+ "ayr_Latn",
+ "azb_Arab",
+ "azj_Latn",
+ "bak_Cyrl",
+ "bam_Latn",
+ "ban_Latn",
+ "bel_Cyrl",
+ "bem_Latn",
+ "ben_Beng",
+ "bho_Deva",
+ "bjn_Arab",
+ "bjn_Latn",
+ "bod_Tibt",
+ "bos_Latn",
+ "bug_Latn",
+ "bul_Cyrl",
+ "cat_Latn",
+ "ceb_Latn",
+ "ces_Latn",
+ "cjk_Latn",
+ "ckb_Arab",
+ "crh_Latn",
+ "cym_Latn",
+ "dan_Latn",
+ "deu_Latn",
+ "dik_Latn",
+ "dyu_Latn",
+ "dzo_Tibt",
+ "ell_Grek",
+ "eng_Latn",
+ "epo_Latn",
+ "est_Latn",
+ "eus_Latn",
+ "ewe_Latn",
+ "fao_Latn",
+ "pes_Arab",
+ "fij_Latn",
+ "fin_Latn",
+ "fon_Latn",
+ "fra_Latn",
+ "fur_Latn",
+ "fuv_Latn",
+ "gla_Latn",
+ "gle_Latn",
+ "glg_Latn",
+ "grn_Latn",
+ "guj_Gujr",
+ "hat_Latn",
+ "hau_Latn",
+ "heb_Hebr",
+ "hin_Deva",
+ "hne_Deva",
+ "hrv_Latn",
+ "hun_Latn",
+ "hye_Armn",
+ "ibo_Latn",
+ "ilo_Latn",
+ "ind_Latn",
+ "isl_Latn",
+ "ita_Latn",
+ "jav_Latn",
+ "jpn_Jpan",
+ "kab_Latn",
+ "kac_Latn",
+ "kam_Latn",
+ "kan_Knda",
+ "kas_Arab",
+ "kas_Deva",
+ "kat_Geor",
+ "knc_Arab",
+ "knc_Latn",
+ "kaz_Cyrl",
+ "kbp_Latn",
+ "kea_Latn",
+ "khm_Khmr",
+ "kik_Latn",
+ "kin_Latn",
+ "kir_Cyrl",
+ "kmb_Latn",
+ "kon_Latn",
+ "kor_Hang",
+ "kmr_Latn",
+ "lao_Laoo",
+ "lvs_Latn",
+ "lij_Latn",
+ "lim_Latn",
+ "lin_Latn",
+ "lit_Latn",
+ "lmo_Latn",
+ "ltg_Latn",
+ "ltz_Latn",
+ "lua_Latn",
+ "lug_Latn",
+ "luo_Latn",
+ "lus_Latn",
+ "mag_Deva",
+ "mai_Deva",
+ "mal_Mlym",
+ "mar_Deva",
+ "min_Latn",
+ "mkd_Cyrl",
+ "plt_Latn",
+ "mlt_Latn",
+ "mni_Beng",
+ "khk_Cyrl",
+ "mos_Latn",
+ "mri_Latn",
+ "zsm_Latn",
+ "mya_Mymr",
+ "nld_Latn",
+ "nno_Latn",
+ "nob_Latn",
+ "npi_Deva",
+ "nso_Latn",
+ "nus_Latn",
+ "nya_Latn",
+ "oci_Latn",
+ "gaz_Latn",
+ "ory_Orya",
+ "pag_Latn",
+ "pan_Guru",
+ "pap_Latn",
+ "pol_Latn",
+ "por_Latn",
+ "prs_Arab",
+ "pbt_Arab",
+ "quy_Latn",
+ "ron_Latn",
+ "run_Latn",
+ "rus_Cyrl",
+ "sag_Latn",
+ "san_Deva",
+ "sat_Beng",
+ "scn_Latn",
+ "shn_Mymr",
+ "sin_Sinh",
+ "slk_Latn",
+ "slv_Latn",
+ "smo_Latn",
+ "sna_Latn",
+ "snd_Arab",
+ "som_Latn",
+ "sot_Latn",
+ "spa_Latn",
+ "als_Latn",
+ "srd_Latn",
+ "srp_Cyrl",
+ "ssw_Latn",
+ "sun_Latn",
+ "swe_Latn",
+ "swh_Latn",
+ "szl_Latn",
+ "tam_Taml",
+ "tat_Cyrl",
+ "tel_Telu",
+ "tgk_Cyrl",
+ "tgl_Latn",
+ "tha_Thai",
+ "tir_Ethi",
+ "taq_Latn",
+ "taq_Tfng",
+ "tpi_Latn",
+ "tsn_Latn",
+ "tso_Latn",
+ "tuk_Latn",
+ "tum_Latn",
+ "tur_Latn",
+ "twi_Latn",
+ "tzm_Tfng",
+ "uig_Arab",
+ "ukr_Cyrl",
+ "umb_Latn",
+ "urd_Arab",
+ "uzn_Latn",
+ "vec_Latn",
+ "vie_Latn",
+ "war_Latn",
+ "wol_Latn",
+ "xho_Latn",
+ "ydd_Hebr",
+ "yor_Latn",
+ "yue_Hant",
+ "zho_Hans",
+ "zho_Hant",
+ "zul_Latn"
+ ],
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "cls_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "mask_token": {
+ "content": "",
+ "lstrip": true,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "sep_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/checkpoint-1000/tokenizer.json b/checkpoint-1000/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..98050e98b98364c06d83b3f41864076220cb8408
--- /dev/null
+++ b/checkpoint-1000/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3b39b25b0763a1dd69dec54081fafcf10770d9f2538a3bd975a0c4be6d60a9c2
+size 17331294
diff --git a/checkpoint-1000/tokenizer_config.json b/checkpoint-1000/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f1424d3657c008568198b44be241646482e7e9f2
--- /dev/null
+++ b/checkpoint-1000/tokenizer_config.json
@@ -0,0 +1,1878 @@
+{
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "3": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256001": {
+ "content": "ace_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256002": {
+ "content": "ace_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256003": {
+ "content": "acm_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256004": {
+ "content": "acq_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256005": {
+ "content": "aeb_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256006": {
+ "content": "afr_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256007": {
+ "content": "ajp_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256008": {
+ "content": "aka_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256009": {
+ "content": "amh_Ethi",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256010": {
+ "content": "apc_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256011": {
+ "content": "arb_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256012": {
+ "content": "ars_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256013": {
+ "content": "ary_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256014": {
+ "content": "arz_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256015": {
+ "content": "asm_Beng",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256016": {
+ "content": "ast_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256017": {
+ "content": "awa_Deva",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256018": {
+ "content": "ayr_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256019": {
+ "content": "azb_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256020": {
+ "content": "azj_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256021": {
+ "content": "bak_Cyrl",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256022": {
+ "content": "bam_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256023": {
+ "content": "ban_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256024": {
+ "content": "bel_Cyrl",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256025": {
+ "content": "bem_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256026": {
+ "content": "ben_Beng",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256027": {
+ "content": "bho_Deva",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256028": {
+ "content": "bjn_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256029": {
+ "content": "bjn_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256030": {
+ "content": "bod_Tibt",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256031": {
+ "content": "bos_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256032": {
+ "content": "bug_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256033": {
+ "content": "bul_Cyrl",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256034": {
+ "content": "cat_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256035": {
+ "content": "ceb_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256036": {
+ "content": "ces_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256037": {
+ "content": "cjk_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256038": {
+ "content": "ckb_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256039": {
+ "content": "crh_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256040": {
+ "content": "cym_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256041": {
+ "content": "dan_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256042": {
+ "content": "deu_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256043": {
+ "content": "dik_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256044": {
+ "content": "dyu_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256045": {
+ "content": "dzo_Tibt",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256046": {
+ "content": "ell_Grek",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256047": {
+ "content": "eng_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256048": {
+ "content": "epo_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256049": {
+ "content": "est_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256050": {
+ "content": "eus_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256051": {
+ "content": "ewe_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256052": {
+ "content": "fao_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256053": {
+ "content": "pes_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256054": {
+ "content": "fij_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256055": {
+ "content": "fin_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256056": {
+ "content": "fon_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256057": {
+ "content": "fra_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256058": {
+ "content": "fur_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256059": {
+ "content": "fuv_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256060": {
+ "content": "gla_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256061": {
+ "content": "gle_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256062": {
+ "content": "glg_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256063": {
+ "content": "grn_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256064": {
+ "content": "guj_Gujr",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256065": {
+ "content": "hat_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256066": {
+ "content": "hau_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256067": {
+ "content": "heb_Hebr",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256068": {
+ "content": "hin_Deva",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256069": {
+ "content": "hne_Deva",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256070": {
+ "content": "hrv_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256071": {
+ "content": "hun_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256072": {
+ "content": "hye_Armn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256073": {
+ "content": "ibo_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256074": {
+ "content": "ilo_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256075": {
+ "content": "ind_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256076": {
+ "content": "isl_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256077": {
+ "content": "ita_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256078": {
+ "content": "jav_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256079": {
+ "content": "jpn_Jpan",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256080": {
+ "content": "kab_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256081": {
+ "content": "kac_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256082": {
+ "content": "kam_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256083": {
+ "content": "kan_Knda",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256084": {
+ "content": "kas_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256085": {
+ "content": "kas_Deva",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256086": {
+ "content": "kat_Geor",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256087": {
+ "content": "knc_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256088": {
+ "content": "knc_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256089": {
+ "content": "kaz_Cyrl",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256090": {
+ "content": "kbp_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256091": {
+ "content": "kea_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256092": {
+ "content": "khm_Khmr",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256093": {
+ "content": "kik_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256094": {
+ "content": "kin_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256095": {
+ "content": "kir_Cyrl",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256096": {
+ "content": "kmb_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256097": {
+ "content": "kon_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256098": {
+ "content": "kor_Hang",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256099": {
+ "content": "kmr_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256100": {
+ "content": "lao_Laoo",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256101": {
+ "content": "lvs_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256102": {
+ "content": "lij_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256103": {
+ "content": "lim_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256104": {
+ "content": "lin_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256105": {
+ "content": "lit_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256106": {
+ "content": "lmo_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256107": {
+ "content": "ltg_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256108": {
+ "content": "ltz_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256109": {
+ "content": "lua_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256110": {
+ "content": "lug_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256111": {
+ "content": "luo_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256112": {
+ "content": "lus_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256113": {
+ "content": "mag_Deva",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256114": {
+ "content": "mai_Deva",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256115": {
+ "content": "mal_Mlym",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256116": {
+ "content": "mar_Deva",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256117": {
+ "content": "min_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256118": {
+ "content": "mkd_Cyrl",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256119": {
+ "content": "plt_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256120": {
+ "content": "mlt_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256121": {
+ "content": "mni_Beng",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256122": {
+ "content": "khk_Cyrl",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256123": {
+ "content": "mos_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256124": {
+ "content": "mri_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256125": {
+ "content": "zsm_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256126": {
+ "content": "mya_Mymr",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256127": {
+ "content": "nld_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256128": {
+ "content": "nno_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256129": {
+ "content": "nob_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256130": {
+ "content": "npi_Deva",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256131": {
+ "content": "nso_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256132": {
+ "content": "nus_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256133": {
+ "content": "nya_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256134": {
+ "content": "oci_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256135": {
+ "content": "gaz_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256136": {
+ "content": "ory_Orya",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256137": {
+ "content": "pag_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256138": {
+ "content": "pan_Guru",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256139": {
+ "content": "pap_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256140": {
+ "content": "pol_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256141": {
+ "content": "por_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256142": {
+ "content": "prs_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256143": {
+ "content": "pbt_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256144": {
+ "content": "quy_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256145": {
+ "content": "ron_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256146": {
+ "content": "run_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256147": {
+ "content": "rus_Cyrl",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256148": {
+ "content": "sag_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256149": {
+ "content": "san_Deva",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256150": {
+ "content": "sat_Beng",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256151": {
+ "content": "scn_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256152": {
+ "content": "shn_Mymr",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256153": {
+ "content": "sin_Sinh",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256154": {
+ "content": "slk_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256155": {
+ "content": "slv_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256156": {
+ "content": "smo_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256157": {
+ "content": "sna_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256158": {
+ "content": "snd_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256159": {
+ "content": "som_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256160": {
+ "content": "sot_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256161": {
+ "content": "spa_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256162": {
+ "content": "als_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256163": {
+ "content": "srd_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256164": {
+ "content": "srp_Cyrl",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256165": {
+ "content": "ssw_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256166": {
+ "content": "sun_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256167": {
+ "content": "swe_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256168": {
+ "content": "swh_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256169": {
+ "content": "szl_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256170": {
+ "content": "tam_Taml",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256171": {
+ "content": "tat_Cyrl",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256172": {
+ "content": "tel_Telu",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256173": {
+ "content": "tgk_Cyrl",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256174": {
+ "content": "tgl_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256175": {
+ "content": "tha_Thai",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256176": {
+ "content": "tir_Ethi",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256177": {
+ "content": "taq_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256178": {
+ "content": "taq_Tfng",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256179": {
+ "content": "tpi_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256180": {
+ "content": "tsn_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256181": {
+ "content": "tso_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256182": {
+ "content": "tuk_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256183": {
+ "content": "tum_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256184": {
+ "content": "tur_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256185": {
+ "content": "twi_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256186": {
+ "content": "tzm_Tfng",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256187": {
+ "content": "uig_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256188": {
+ "content": "ukr_Cyrl",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256189": {
+ "content": "umb_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256190": {
+ "content": "urd_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256191": {
+ "content": "uzn_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256192": {
+ "content": "vec_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256193": {
+ "content": "vie_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256194": {
+ "content": "war_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256195": {
+ "content": "wol_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256196": {
+ "content": "xho_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256197": {
+ "content": "ydd_Hebr",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256198": {
+ "content": "yor_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256199": {
+ "content": "yue_Hant",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256200": {
+ "content": "zho_Hans",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256201": {
+ "content": "zho_Hant",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256202": {
+ "content": "zul_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256203": {
+ "content": "",
+ "lstrip": true,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "additional_special_tokens": [
+ "ace_Arab",
+ "ace_Latn",
+ "acm_Arab",
+ "acq_Arab",
+ "aeb_Arab",
+ "afr_Latn",
+ "ajp_Arab",
+ "aka_Latn",
+ "amh_Ethi",
+ "apc_Arab",
+ "arb_Arab",
+ "ars_Arab",
+ "ary_Arab",
+ "arz_Arab",
+ "asm_Beng",
+ "ast_Latn",
+ "awa_Deva",
+ "ayr_Latn",
+ "azb_Arab",
+ "azj_Latn",
+ "bak_Cyrl",
+ "bam_Latn",
+ "ban_Latn",
+ "bel_Cyrl",
+ "bem_Latn",
+ "ben_Beng",
+ "bho_Deva",
+ "bjn_Arab",
+ "bjn_Latn",
+ "bod_Tibt",
+ "bos_Latn",
+ "bug_Latn",
+ "bul_Cyrl",
+ "cat_Latn",
+ "ceb_Latn",
+ "ces_Latn",
+ "cjk_Latn",
+ "ckb_Arab",
+ "crh_Latn",
+ "cym_Latn",
+ "dan_Latn",
+ "deu_Latn",
+ "dik_Latn",
+ "dyu_Latn",
+ "dzo_Tibt",
+ "ell_Grek",
+ "eng_Latn",
+ "epo_Latn",
+ "est_Latn",
+ "eus_Latn",
+ "ewe_Latn",
+ "fao_Latn",
+ "pes_Arab",
+ "fij_Latn",
+ "fin_Latn",
+ "fon_Latn",
+ "fra_Latn",
+ "fur_Latn",
+ "fuv_Latn",
+ "gla_Latn",
+ "gle_Latn",
+ "glg_Latn",
+ "grn_Latn",
+ "guj_Gujr",
+ "hat_Latn",
+ "hau_Latn",
+ "heb_Hebr",
+ "hin_Deva",
+ "hne_Deva",
+ "hrv_Latn",
+ "hun_Latn",
+ "hye_Armn",
+ "ibo_Latn",
+ "ilo_Latn",
+ "ind_Latn",
+ "isl_Latn",
+ "ita_Latn",
+ "jav_Latn",
+ "jpn_Jpan",
+ "kab_Latn",
+ "kac_Latn",
+ "kam_Latn",
+ "kan_Knda",
+ "kas_Arab",
+ "kas_Deva",
+ "kat_Geor",
+ "knc_Arab",
+ "knc_Latn",
+ "kaz_Cyrl",
+ "kbp_Latn",
+ "kea_Latn",
+ "khm_Khmr",
+ "kik_Latn",
+ "kin_Latn",
+ "kir_Cyrl",
+ "kmb_Latn",
+ "kon_Latn",
+ "kor_Hang",
+ "kmr_Latn",
+ "lao_Laoo",
+ "lvs_Latn",
+ "lij_Latn",
+ "lim_Latn",
+ "lin_Latn",
+ "lit_Latn",
+ "lmo_Latn",
+ "ltg_Latn",
+ "ltz_Latn",
+ "lua_Latn",
+ "lug_Latn",
+ "luo_Latn",
+ "lus_Latn",
+ "mag_Deva",
+ "mai_Deva",
+ "mal_Mlym",
+ "mar_Deva",
+ "min_Latn",
+ "mkd_Cyrl",
+ "plt_Latn",
+ "mlt_Latn",
+ "mni_Beng",
+ "khk_Cyrl",
+ "mos_Latn",
+ "mri_Latn",
+ "zsm_Latn",
+ "mya_Mymr",
+ "nld_Latn",
+ "nno_Latn",
+ "nob_Latn",
+ "npi_Deva",
+ "nso_Latn",
+ "nus_Latn",
+ "nya_Latn",
+ "oci_Latn",
+ "gaz_Latn",
+ "ory_Orya",
+ "pag_Latn",
+ "pan_Guru",
+ "pap_Latn",
+ "pol_Latn",
+ "por_Latn",
+ "prs_Arab",
+ "pbt_Arab",
+ "quy_Latn",
+ "ron_Latn",
+ "run_Latn",
+ "rus_Cyrl",
+ "sag_Latn",
+ "san_Deva",
+ "sat_Beng",
+ "scn_Latn",
+ "shn_Mymr",
+ "sin_Sinh",
+ "slk_Latn",
+ "slv_Latn",
+ "smo_Latn",
+ "sna_Latn",
+ "snd_Arab",
+ "som_Latn",
+ "sot_Latn",
+ "spa_Latn",
+ "als_Latn",
+ "srd_Latn",
+ "srp_Cyrl",
+ "ssw_Latn",
+ "sun_Latn",
+ "swe_Latn",
+ "swh_Latn",
+ "szl_Latn",
+ "tam_Taml",
+ "tat_Cyrl",
+ "tel_Telu",
+ "tgk_Cyrl",
+ "tgl_Latn",
+ "tha_Thai",
+ "tir_Ethi",
+ "taq_Latn",
+ "taq_Tfng",
+ "tpi_Latn",
+ "tsn_Latn",
+ "tso_Latn",
+ "tuk_Latn",
+ "tum_Latn",
+ "tur_Latn",
+ "twi_Latn",
+ "tzm_Tfng",
+ "uig_Arab",
+ "ukr_Cyrl",
+ "umb_Latn",
+ "urd_Arab",
+ "uzn_Latn",
+ "vec_Latn",
+ "vie_Latn",
+ "war_Latn",
+ "wol_Latn",
+ "xho_Latn",
+ "ydd_Hebr",
+ "yor_Latn",
+ "yue_Hant",
+ "zho_Hans",
+ "zho_Hant",
+ "zul_Latn"
+ ],
+ "bos_token": "",
+ "clean_up_tokenization_spaces": true,
+ "cls_token": "",
+ "eos_token": "",
+ "legacy_behaviour": false,
+ "mask_token": "",
+ "model_max_length": 1024,
+ "pad_token": "",
+ "sep_token": "",
+ "sp_model_kwargs": {},
+ "src_lang": "eng_Latn",
+ "tgt_lang": null,
+ "tokenizer_class": "NllbTokenizer",
+ "unk_token": ""
+}
diff --git a/checkpoint-1000/trainer_state.json b/checkpoint-1000/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..b06914c3723edc4681e0d1d9ed0a7a9858af309e
--- /dev/null
+++ b/checkpoint-1000/trainer_state.json
@@ -0,0 +1,7033 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 1.0002813291238162,
+ "eval_steps": 500,
+ "global_step": 1000,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.001000281329123816,
+ "grad_norm": 5.902005195617676,
+ "learning_rate": 0.0001999999450590425,
+ "loss": 3.1875,
+ "step": 1
+ },
+ {
+ "epoch": 0.002000562658247632,
+ "grad_norm": 3.2577760219573975,
+ "learning_rate": 0.00019999978023623033,
+ "loss": 2.3666,
+ "step": 2
+ },
+ {
+ "epoch": 0.003000843987371448,
+ "grad_norm": 5.3700995445251465,
+ "learning_rate": 0.0001999995055317446,
+ "loss": 2.8282,
+ "step": 3
+ },
+ {
+ "epoch": 0.004001125316495264,
+ "grad_norm": 2.1445534229278564,
+ "learning_rate": 0.00019999912094588717,
+ "loss": 2.2322,
+ "step": 4
+ },
+ {
+ "epoch": 0.005001406645619081,
+ "grad_norm": 1.5143821239471436,
+ "learning_rate": 0.00019999862647908064,
+ "loss": 2.1709,
+ "step": 5
+ },
+ {
+ "epoch": 0.006001687974742896,
+ "grad_norm": 2.0491714477539062,
+ "learning_rate": 0.00019999802213186834,
+ "loss": 2.2863,
+ "step": 6
+ },
+ {
+ "epoch": 0.007001969303866712,
+ "grad_norm": 1.2016857862472534,
+ "learning_rate": 0.0001999973079049143,
+ "loss": 1.5595,
+ "step": 7
+ },
+ {
+ "epoch": 0.008002250632990529,
+ "grad_norm": 1.3860406875610352,
+ "learning_rate": 0.00019999648379900338,
+ "loss": 1.7264,
+ "step": 8
+ },
+ {
+ "epoch": 0.009002531962114344,
+ "grad_norm": 1.0861930847167969,
+ "learning_rate": 0.0001999955498150411,
+ "loss": 2.0533,
+ "step": 9
+ },
+ {
+ "epoch": 0.010002813291238161,
+ "grad_norm": 2.233243703842163,
+ "learning_rate": 0.00019999450595405374,
+ "loss": 1.9378,
+ "step": 10
+ },
+ {
+ "epoch": 0.011003094620361977,
+ "grad_norm": 1.302808165550232,
+ "learning_rate": 0.0001999933522171883,
+ "loss": 1.9182,
+ "step": 11
+ },
+ {
+ "epoch": 0.012003375949485792,
+ "grad_norm": 0.8285257816314697,
+ "learning_rate": 0.00019999208860571255,
+ "loss": 1.9146,
+ "step": 12
+ },
+ {
+ "epoch": 0.01300365727860961,
+ "grad_norm": 1.2248319387435913,
+ "learning_rate": 0.00019999071512101496,
+ "loss": 1.7467,
+ "step": 13
+ },
+ {
+ "epoch": 0.014003938607733425,
+ "grad_norm": 0.8307135105133057,
+ "learning_rate": 0.00019998923176460474,
+ "loss": 1.6896,
+ "step": 14
+ },
+ {
+ "epoch": 0.01500421993685724,
+ "grad_norm": 1.1531301736831665,
+ "learning_rate": 0.00019998763853811184,
+ "loss": 1.7549,
+ "step": 15
+ },
+ {
+ "epoch": 0.016004501265981057,
+ "grad_norm": 1.0071958303451538,
+ "learning_rate": 0.00019998593544328692,
+ "loss": 1.903,
+ "step": 16
+ },
+ {
+ "epoch": 0.017004782595104875,
+ "grad_norm": 0.9111937284469604,
+ "learning_rate": 0.00019998412248200138,
+ "loss": 1.8372,
+ "step": 17
+ },
+ {
+ "epoch": 0.01800506392422869,
+ "grad_norm": 0.9943836331367493,
+ "learning_rate": 0.00019998219965624734,
+ "loss": 1.7304,
+ "step": 18
+ },
+ {
+ "epoch": 0.019005345253352506,
+ "grad_norm": 0.8139007687568665,
+ "learning_rate": 0.0001999801669681376,
+ "loss": 1.6932,
+ "step": 19
+ },
+ {
+ "epoch": 0.020005626582476323,
+ "grad_norm": 0.7991273999214172,
+ "learning_rate": 0.00019997802441990573,
+ "loss": 1.9596,
+ "step": 20
+ },
+ {
+ "epoch": 0.021005907911600136,
+ "grad_norm": 0.832266628742218,
+ "learning_rate": 0.00019997577201390606,
+ "loss": 1.7116,
+ "step": 21
+ },
+ {
+ "epoch": 0.022006189240723954,
+ "grad_norm": 0.8465655446052551,
+ "learning_rate": 0.00019997340975261353,
+ "loss": 1.7711,
+ "step": 22
+ },
+ {
+ "epoch": 0.02300647056984777,
+ "grad_norm": 1.032426118850708,
+ "learning_rate": 0.00019997093763862383,
+ "loss": 1.6746,
+ "step": 23
+ },
+ {
+ "epoch": 0.024006751898971584,
+ "grad_norm": 1.0036743879318237,
+ "learning_rate": 0.0001999683556746534,
+ "loss": 1.7274,
+ "step": 24
+ },
+ {
+ "epoch": 0.0250070332280954,
+ "grad_norm": 0.9491412043571472,
+ "learning_rate": 0.0001999656638635393,
+ "loss": 2.0302,
+ "step": 25
+ },
+ {
+ "epoch": 0.02600731455721922,
+ "grad_norm": 0.9477822184562683,
+ "learning_rate": 0.0001999628622082394,
+ "loss": 1.6107,
+ "step": 26
+ },
+ {
+ "epoch": 0.027007595886343033,
+ "grad_norm": 1.0687041282653809,
+ "learning_rate": 0.0001999599507118322,
+ "loss": 1.8225,
+ "step": 27
+ },
+ {
+ "epoch": 0.02800787721546685,
+ "grad_norm": 1.6572712659835815,
+ "learning_rate": 0.00019995692937751683,
+ "loss": 1.896,
+ "step": 28
+ },
+ {
+ "epoch": 0.029008158544590667,
+ "grad_norm": 1.013258695602417,
+ "learning_rate": 0.0001999537982086133,
+ "loss": 1.7847,
+ "step": 29
+ },
+ {
+ "epoch": 0.03000843987371448,
+ "grad_norm": 0.7584932446479797,
+ "learning_rate": 0.00019995055720856218,
+ "loss": 1.5841,
+ "step": 30
+ },
+ {
+ "epoch": 0.031008721202838298,
+ "grad_norm": 1.1543537378311157,
+ "learning_rate": 0.00019994720638092468,
+ "loss": 1.8362,
+ "step": 31
+ },
+ {
+ "epoch": 0.032009002531962115,
+ "grad_norm": 0.8389608860015869,
+ "learning_rate": 0.00019994374572938277,
+ "loss": 1.7913,
+ "step": 32
+ },
+ {
+ "epoch": 0.03300928386108593,
+ "grad_norm": 0.7582125663757324,
+ "learning_rate": 0.00019994017525773913,
+ "loss": 1.5406,
+ "step": 33
+ },
+ {
+ "epoch": 0.03400956519020975,
+ "grad_norm": 0.7866935133934021,
+ "learning_rate": 0.00019993649496991705,
+ "loss": 1.5363,
+ "step": 34
+ },
+ {
+ "epoch": 0.03500984651933356,
+ "grad_norm": 0.8007768988609314,
+ "learning_rate": 0.00019993270486996046,
+ "loss": 1.7597,
+ "step": 35
+ },
+ {
+ "epoch": 0.03601012784845738,
+ "grad_norm": 0.8109031319618225,
+ "learning_rate": 0.000199928804962034,
+ "loss": 1.5554,
+ "step": 36
+ },
+ {
+ "epoch": 0.037010409177581194,
+ "grad_norm": 0.7722628116607666,
+ "learning_rate": 0.00019992479525042303,
+ "loss": 1.6437,
+ "step": 37
+ },
+ {
+ "epoch": 0.03801069050670501,
+ "grad_norm": 0.7336480021476746,
+ "learning_rate": 0.00019992067573953342,
+ "loss": 1.7276,
+ "step": 38
+ },
+ {
+ "epoch": 0.03901097183582883,
+ "grad_norm": 0.6940280795097351,
+ "learning_rate": 0.0001999164464338918,
+ "loss": 1.846,
+ "step": 39
+ },
+ {
+ "epoch": 0.040011253164952645,
+ "grad_norm": 0.7079702615737915,
+ "learning_rate": 0.0001999121073381454,
+ "loss": 1.7017,
+ "step": 40
+ },
+ {
+ "epoch": 0.041011534494076456,
+ "grad_norm": 0.7438498139381409,
+ "learning_rate": 0.0001999076584570621,
+ "loss": 1.665,
+ "step": 41
+ },
+ {
+ "epoch": 0.04201181582320027,
+ "grad_norm": 0.6951525211334229,
+ "learning_rate": 0.00019990309979553045,
+ "loss": 1.588,
+ "step": 42
+ },
+ {
+ "epoch": 0.04301209715232409,
+ "grad_norm": 0.9398604035377502,
+ "learning_rate": 0.00019989843135855958,
+ "loss": 1.6513,
+ "step": 43
+ },
+ {
+ "epoch": 0.04401237848144791,
+ "grad_norm": 0.7384347319602966,
+ "learning_rate": 0.00019989365315127922,
+ "loss": 1.5975,
+ "step": 44
+ },
+ {
+ "epoch": 0.045012659810571724,
+ "grad_norm": 0.9856846332550049,
+ "learning_rate": 0.0001998887651789398,
+ "loss": 1.644,
+ "step": 45
+ },
+ {
+ "epoch": 0.04601294113969554,
+ "grad_norm": 0.7322820425033569,
+ "learning_rate": 0.0001998837674469123,
+ "loss": 1.5207,
+ "step": 46
+ },
+ {
+ "epoch": 0.04701322246881936,
+ "grad_norm": 0.8695257902145386,
+ "learning_rate": 0.00019987865996068833,
+ "loss": 1.5572,
+ "step": 47
+ },
+ {
+ "epoch": 0.04801350379794317,
+ "grad_norm": 0.7231017351150513,
+ "learning_rate": 0.00019987344272588006,
+ "loss": 1.5841,
+ "step": 48
+ },
+ {
+ "epoch": 0.049013785127066986,
+ "grad_norm": 0.7147384285926819,
+ "learning_rate": 0.00019986811574822033,
+ "loss": 1.8628,
+ "step": 49
+ },
+ {
+ "epoch": 0.0500140664561908,
+ "grad_norm": 0.8631477355957031,
+ "learning_rate": 0.00019986267903356254,
+ "loss": 1.8487,
+ "step": 50
+ },
+ {
+ "epoch": 0.05101434778531462,
+ "grad_norm": 0.7995486855506897,
+ "learning_rate": 0.0001998571325878806,
+ "loss": 1.6491,
+ "step": 51
+ },
+ {
+ "epoch": 0.05201462911443844,
+ "grad_norm": 0.7828657031059265,
+ "learning_rate": 0.0001998514764172691,
+ "loss": 1.7496,
+ "step": 52
+ },
+ {
+ "epoch": 0.053014910443562255,
+ "grad_norm": 0.7789833545684814,
+ "learning_rate": 0.00019984571052794313,
+ "loss": 1.6628,
+ "step": 53
+ },
+ {
+ "epoch": 0.054015191772686065,
+ "grad_norm": 0.7077661752700806,
+ "learning_rate": 0.00019983983492623833,
+ "loss": 1.771,
+ "step": 54
+ },
+ {
+ "epoch": 0.05501547310180988,
+ "grad_norm": 0.7939582467079163,
+ "learning_rate": 0.00019983384961861096,
+ "loss": 1.707,
+ "step": 55
+ },
+ {
+ "epoch": 0.0560157544309337,
+ "grad_norm": 0.9438828229904175,
+ "learning_rate": 0.0001998277546116378,
+ "loss": 1.8334,
+ "step": 56
+ },
+ {
+ "epoch": 0.05701603576005752,
+ "grad_norm": 0.8028286695480347,
+ "learning_rate": 0.00019982154991201608,
+ "loss": 1.9117,
+ "step": 57
+ },
+ {
+ "epoch": 0.058016317089181334,
+ "grad_norm": 0.6563037037849426,
+ "learning_rate": 0.00019981523552656377,
+ "loss": 1.4767,
+ "step": 58
+ },
+ {
+ "epoch": 0.05901659841830515,
+ "grad_norm": 0.6600964665412903,
+ "learning_rate": 0.00019980881146221914,
+ "loss": 1.6656,
+ "step": 59
+ },
+ {
+ "epoch": 0.06001687974742896,
+ "grad_norm": 0.7966578602790833,
+ "learning_rate": 0.00019980227772604112,
+ "loss": 1.4844,
+ "step": 60
+ },
+ {
+ "epoch": 0.06101716107655278,
+ "grad_norm": 0.8019976615905762,
+ "learning_rate": 0.0001997956343252091,
+ "loss": 1.5682,
+ "step": 61
+ },
+ {
+ "epoch": 0.062017442405676595,
+ "grad_norm": 0.8935349583625793,
+ "learning_rate": 0.00019978888126702296,
+ "loss": 1.8131,
+ "step": 62
+ },
+ {
+ "epoch": 0.06301772373480041,
+ "grad_norm": 0.8085179924964905,
+ "learning_rate": 0.00019978201855890308,
+ "loss": 1.5602,
+ "step": 63
+ },
+ {
+ "epoch": 0.06401800506392423,
+ "grad_norm": 0.7631951570510864,
+ "learning_rate": 0.00019977504620839035,
+ "loss": 1.8008,
+ "step": 64
+ },
+ {
+ "epoch": 0.06501828639304805,
+ "grad_norm": 0.7315165996551514,
+ "learning_rate": 0.00019976796422314615,
+ "loss": 1.5735,
+ "step": 65
+ },
+ {
+ "epoch": 0.06601856772217186,
+ "grad_norm": 0.745726466178894,
+ "learning_rate": 0.00019976077261095226,
+ "loss": 1.5775,
+ "step": 66
+ },
+ {
+ "epoch": 0.06701884905129568,
+ "grad_norm": 0.9082249999046326,
+ "learning_rate": 0.00019975347137971098,
+ "loss": 1.7427,
+ "step": 67
+ },
+ {
+ "epoch": 0.0680191303804195,
+ "grad_norm": 0.6575669050216675,
+ "learning_rate": 0.00019974606053744503,
+ "loss": 1.5231,
+ "step": 68
+ },
+ {
+ "epoch": 0.06901941170954332,
+ "grad_norm": 0.7749233245849609,
+ "learning_rate": 0.00019973854009229763,
+ "loss": 1.5703,
+ "step": 69
+ },
+ {
+ "epoch": 0.07001969303866712,
+ "grad_norm": 0.7240824699401855,
+ "learning_rate": 0.00019973091005253232,
+ "loss": 1.5197,
+ "step": 70
+ },
+ {
+ "epoch": 0.07101997436779094,
+ "grad_norm": 0.8683856725692749,
+ "learning_rate": 0.0001997231704265332,
+ "loss": 1.6183,
+ "step": 71
+ },
+ {
+ "epoch": 0.07202025569691475,
+ "grad_norm": 0.6885640621185303,
+ "learning_rate": 0.00019971532122280464,
+ "loss": 1.6565,
+ "step": 72
+ },
+ {
+ "epoch": 0.07302053702603857,
+ "grad_norm": 0.6648329496383667,
+ "learning_rate": 0.0001997073624499716,
+ "loss": 1.5943,
+ "step": 73
+ },
+ {
+ "epoch": 0.07402081835516239,
+ "grad_norm": 0.8867416977882385,
+ "learning_rate": 0.0001996992941167792,
+ "loss": 1.7855,
+ "step": 74
+ },
+ {
+ "epoch": 0.0750210996842862,
+ "grad_norm": 0.7790491580963135,
+ "learning_rate": 0.00019969111623209323,
+ "loss": 1.6723,
+ "step": 75
+ },
+ {
+ "epoch": 0.07602138101341002,
+ "grad_norm": 0.7999201416969299,
+ "learning_rate": 0.00019968282880489957,
+ "loss": 1.5619,
+ "step": 76
+ },
+ {
+ "epoch": 0.07702166234253384,
+ "grad_norm": 0.6316407322883606,
+ "learning_rate": 0.00019967443184430467,
+ "loss": 1.6377,
+ "step": 77
+ },
+ {
+ "epoch": 0.07802194367165766,
+ "grad_norm": 0.7680445313453674,
+ "learning_rate": 0.0001996659253595353,
+ "loss": 1.5433,
+ "step": 78
+ },
+ {
+ "epoch": 0.07902222500078147,
+ "grad_norm": 0.7158446907997131,
+ "learning_rate": 0.0001996573093599385,
+ "loss": 1.5436,
+ "step": 79
+ },
+ {
+ "epoch": 0.08002250632990529,
+ "grad_norm": 0.7354825139045715,
+ "learning_rate": 0.00019964858385498172,
+ "loss": 1.6512,
+ "step": 80
+ },
+ {
+ "epoch": 0.08102278765902911,
+ "grad_norm": 0.7031210660934448,
+ "learning_rate": 0.00019963974885425266,
+ "loss": 1.6411,
+ "step": 81
+ },
+ {
+ "epoch": 0.08202306898815291,
+ "grad_norm": 0.8451045751571655,
+ "learning_rate": 0.00019963080436745945,
+ "loss": 1.6622,
+ "step": 82
+ },
+ {
+ "epoch": 0.08302335031727673,
+ "grad_norm": 0.8329521417617798,
+ "learning_rate": 0.00019962175040443044,
+ "loss": 1.9269,
+ "step": 83
+ },
+ {
+ "epoch": 0.08402363164640055,
+ "grad_norm": 0.6967645883560181,
+ "learning_rate": 0.0001996125869751143,
+ "loss": 1.7243,
+ "step": 84
+ },
+ {
+ "epoch": 0.08502391297552436,
+ "grad_norm": 0.8699042797088623,
+ "learning_rate": 0.00019960331408957997,
+ "loss": 1.7211,
+ "step": 85
+ },
+ {
+ "epoch": 0.08602419430464818,
+ "grad_norm": 0.6780512928962708,
+ "learning_rate": 0.00019959393175801671,
+ "loss": 1.6376,
+ "step": 86
+ },
+ {
+ "epoch": 0.087024475633772,
+ "grad_norm": 0.7213720679283142,
+ "learning_rate": 0.00019958443999073397,
+ "loss": 1.6048,
+ "step": 87
+ },
+ {
+ "epoch": 0.08802475696289581,
+ "grad_norm": 0.6077585816383362,
+ "learning_rate": 0.00019957483879816151,
+ "loss": 1.5231,
+ "step": 88
+ },
+ {
+ "epoch": 0.08902503829201963,
+ "grad_norm": 0.6854611039161682,
+ "learning_rate": 0.00019956512819084928,
+ "loss": 1.4726,
+ "step": 89
+ },
+ {
+ "epoch": 0.09002531962114345,
+ "grad_norm": 0.6969390511512756,
+ "learning_rate": 0.00019955530817946748,
+ "loss": 1.6435,
+ "step": 90
+ },
+ {
+ "epoch": 0.09102560095026727,
+ "grad_norm": 0.7178792953491211,
+ "learning_rate": 0.00019954537877480655,
+ "loss": 1.6967,
+ "step": 91
+ },
+ {
+ "epoch": 0.09202588227939108,
+ "grad_norm": 0.8248458504676819,
+ "learning_rate": 0.00019953533998777706,
+ "loss": 1.5884,
+ "step": 92
+ },
+ {
+ "epoch": 0.0930261636085149,
+ "grad_norm": 0.6472075581550598,
+ "learning_rate": 0.00019952519182940993,
+ "loss": 1.5074,
+ "step": 93
+ },
+ {
+ "epoch": 0.09402644493763872,
+ "grad_norm": 0.7548672556877136,
+ "learning_rate": 0.00019951493431085603,
+ "loss": 1.6934,
+ "step": 94
+ },
+ {
+ "epoch": 0.09502672626676252,
+ "grad_norm": 0.6680666208267212,
+ "learning_rate": 0.00019950456744338658,
+ "loss": 1.4014,
+ "step": 95
+ },
+ {
+ "epoch": 0.09602700759588634,
+ "grad_norm": 0.7270862460136414,
+ "learning_rate": 0.00019949409123839288,
+ "loss": 1.6991,
+ "step": 96
+ },
+ {
+ "epoch": 0.09702728892501016,
+ "grad_norm": 0.682833731174469,
+ "learning_rate": 0.00019948350570738642,
+ "loss": 1.4926,
+ "step": 97
+ },
+ {
+ "epoch": 0.09802757025413397,
+ "grad_norm": 0.6598315238952637,
+ "learning_rate": 0.0001994728108619987,
+ "loss": 1.6329,
+ "step": 98
+ },
+ {
+ "epoch": 0.09902785158325779,
+ "grad_norm": 0.830845832824707,
+ "learning_rate": 0.0001994620067139815,
+ "loss": 1.8517,
+ "step": 99
+ },
+ {
+ "epoch": 0.1000281329123816,
+ "grad_norm": 0.6964694857597351,
+ "learning_rate": 0.00019945109327520658,
+ "loss": 1.5459,
+ "step": 100
+ },
+ {
+ "epoch": 0.10102841424150542,
+ "grad_norm": 0.8640177249908447,
+ "learning_rate": 0.00019944007055766586,
+ "loss": 1.6638,
+ "step": 101
+ },
+ {
+ "epoch": 0.10202869557062924,
+ "grad_norm": 0.6485210657119751,
+ "learning_rate": 0.00019942893857347128,
+ "loss": 1.8025,
+ "step": 102
+ },
+ {
+ "epoch": 0.10302897689975306,
+ "grad_norm": 0.6746248006820679,
+ "learning_rate": 0.00019941769733485494,
+ "loss": 1.6954,
+ "step": 103
+ },
+ {
+ "epoch": 0.10402925822887688,
+ "grad_norm": 0.7386549115180969,
+ "learning_rate": 0.00019940634685416888,
+ "loss": 1.4547,
+ "step": 104
+ },
+ {
+ "epoch": 0.10502953955800069,
+ "grad_norm": 0.7518633008003235,
+ "learning_rate": 0.00019939488714388524,
+ "loss": 1.5098,
+ "step": 105
+ },
+ {
+ "epoch": 0.10602982088712451,
+ "grad_norm": 0.7350422739982605,
+ "learning_rate": 0.00019938331821659614,
+ "loss": 1.5452,
+ "step": 106
+ },
+ {
+ "epoch": 0.10703010221624833,
+ "grad_norm": 0.6544668674468994,
+ "learning_rate": 0.0001993716400850138,
+ "loss": 1.5106,
+ "step": 107
+ },
+ {
+ "epoch": 0.10803038354537213,
+ "grad_norm": 0.6108564138412476,
+ "learning_rate": 0.0001993598527619703,
+ "loss": 1.5818,
+ "step": 108
+ },
+ {
+ "epoch": 0.10903066487449595,
+ "grad_norm": 0.731071949005127,
+ "learning_rate": 0.00019934795626041783,
+ "loss": 1.4819,
+ "step": 109
+ },
+ {
+ "epoch": 0.11003094620361976,
+ "grad_norm": 0.5978986620903015,
+ "learning_rate": 0.0001993359505934285,
+ "loss": 1.5469,
+ "step": 110
+ },
+ {
+ "epoch": 0.11103122753274358,
+ "grad_norm": 0.7249881029129028,
+ "learning_rate": 0.00019932383577419432,
+ "loss": 1.7466,
+ "step": 111
+ },
+ {
+ "epoch": 0.1120315088618674,
+ "grad_norm": 0.6161806583404541,
+ "learning_rate": 0.0001993116118160273,
+ "loss": 1.3411,
+ "step": 112
+ },
+ {
+ "epoch": 0.11303179019099122,
+ "grad_norm": 0.6745229363441467,
+ "learning_rate": 0.00019929927873235938,
+ "loss": 1.5615,
+ "step": 113
+ },
+ {
+ "epoch": 0.11403207152011503,
+ "grad_norm": 0.6489872336387634,
+ "learning_rate": 0.00019928683653674237,
+ "loss": 1.6279,
+ "step": 114
+ },
+ {
+ "epoch": 0.11503235284923885,
+ "grad_norm": 0.7769975662231445,
+ "learning_rate": 0.00019927428524284805,
+ "loss": 1.6155,
+ "step": 115
+ },
+ {
+ "epoch": 0.11603263417836267,
+ "grad_norm": 0.734336793422699,
+ "learning_rate": 0.00019926162486446792,
+ "loss": 1.6837,
+ "step": 116
+ },
+ {
+ "epoch": 0.11703291550748648,
+ "grad_norm": 0.6966903805732727,
+ "learning_rate": 0.0001992488554155135,
+ "loss": 1.6659,
+ "step": 117
+ },
+ {
+ "epoch": 0.1180331968366103,
+ "grad_norm": 0.6714586615562439,
+ "learning_rate": 0.00019923597691001615,
+ "loss": 1.5161,
+ "step": 118
+ },
+ {
+ "epoch": 0.11903347816573412,
+ "grad_norm": 0.6390894651412964,
+ "learning_rate": 0.0001992229893621269,
+ "loss": 1.4561,
+ "step": 119
+ },
+ {
+ "epoch": 0.12003375949485792,
+ "grad_norm": 0.6481143832206726,
+ "learning_rate": 0.00019920989278611687,
+ "loss": 1.6331,
+ "step": 120
+ },
+ {
+ "epoch": 0.12103404082398174,
+ "grad_norm": 0.6819384694099426,
+ "learning_rate": 0.0001991966871963767,
+ "loss": 1.6508,
+ "step": 121
+ },
+ {
+ "epoch": 0.12203432215310556,
+ "grad_norm": 0.6839059591293335,
+ "learning_rate": 0.000199183372607417,
+ "loss": 1.6514,
+ "step": 122
+ },
+ {
+ "epoch": 0.12303460348222937,
+ "grad_norm": 0.6401050090789795,
+ "learning_rate": 0.0001991699490338681,
+ "loss": 1.8065,
+ "step": 123
+ },
+ {
+ "epoch": 0.12403488481135319,
+ "grad_norm": 0.6860588788986206,
+ "learning_rate": 0.00019915641649048005,
+ "loss": 1.7658,
+ "step": 124
+ },
+ {
+ "epoch": 0.12503516614047702,
+ "grad_norm": 0.6286434531211853,
+ "learning_rate": 0.0001991427749921227,
+ "loss": 1.7678,
+ "step": 125
+ },
+ {
+ "epoch": 0.12603544746960083,
+ "grad_norm": 0.6609922647476196,
+ "learning_rate": 0.00019912902455378556,
+ "loss": 1.4934,
+ "step": 126
+ },
+ {
+ "epoch": 0.12703572879872463,
+ "grad_norm": 0.7058399319648743,
+ "learning_rate": 0.00019911516519057788,
+ "loss": 1.6058,
+ "step": 127
+ },
+ {
+ "epoch": 0.12803601012784846,
+ "grad_norm": 0.6362051963806152,
+ "learning_rate": 0.00019910119691772863,
+ "loss": 1.502,
+ "step": 128
+ },
+ {
+ "epoch": 0.12903629145697226,
+ "grad_norm": 0.7493100762367249,
+ "learning_rate": 0.00019908711975058637,
+ "loss": 1.5287,
+ "step": 129
+ },
+ {
+ "epoch": 0.1300365727860961,
+ "grad_norm": 0.6492393612861633,
+ "learning_rate": 0.0001990729337046194,
+ "loss": 1.5716,
+ "step": 130
+ },
+ {
+ "epoch": 0.1310368541152199,
+ "grad_norm": 0.70331871509552,
+ "learning_rate": 0.0001990586387954156,
+ "loss": 1.5882,
+ "step": 131
+ },
+ {
+ "epoch": 0.13203713544434373,
+ "grad_norm": 0.7581572532653809,
+ "learning_rate": 0.00019904423503868247,
+ "loss": 1.7627,
+ "step": 132
+ },
+ {
+ "epoch": 0.13303741677346753,
+ "grad_norm": 0.7087228894233704,
+ "learning_rate": 0.00019902972245024715,
+ "loss": 1.6257,
+ "step": 133
+ },
+ {
+ "epoch": 0.13403769810259136,
+ "grad_norm": 0.7920627593994141,
+ "learning_rate": 0.00019901510104605637,
+ "loss": 1.572,
+ "step": 134
+ },
+ {
+ "epoch": 0.13503797943171517,
+ "grad_norm": 0.6869202256202698,
+ "learning_rate": 0.00019900037084217637,
+ "loss": 1.5478,
+ "step": 135
+ },
+ {
+ "epoch": 0.136038260760839,
+ "grad_norm": 0.6879409551620483,
+ "learning_rate": 0.00019898553185479303,
+ "loss": 1.3104,
+ "step": 136
+ },
+ {
+ "epoch": 0.1370385420899628,
+ "grad_norm": 0.6574143767356873,
+ "learning_rate": 0.00019897058410021167,
+ "loss": 1.7041,
+ "step": 137
+ },
+ {
+ "epoch": 0.13803882341908663,
+ "grad_norm": 0.7793259620666504,
+ "learning_rate": 0.00019895552759485722,
+ "loss": 1.5417,
+ "step": 138
+ },
+ {
+ "epoch": 0.13903910474821043,
+ "grad_norm": 0.6310438513755798,
+ "learning_rate": 0.00019894036235527395,
+ "loss": 1.4978,
+ "step": 139
+ },
+ {
+ "epoch": 0.14003938607733424,
+ "grad_norm": 0.6298012137413025,
+ "learning_rate": 0.00019892508839812584,
+ "loss": 1.5367,
+ "step": 140
+ },
+ {
+ "epoch": 0.14103966740645807,
+ "grad_norm": 0.5647856593132019,
+ "learning_rate": 0.00019890970574019617,
+ "loss": 1.537,
+ "step": 141
+ },
+ {
+ "epoch": 0.14203994873558187,
+ "grad_norm": 0.6491876244544983,
+ "learning_rate": 0.00019889421439838763,
+ "loss": 1.6992,
+ "step": 142
+ },
+ {
+ "epoch": 0.1430402300647057,
+ "grad_norm": 0.6574720144271851,
+ "learning_rate": 0.00019887861438972246,
+ "loss": 1.3837,
+ "step": 143
+ },
+ {
+ "epoch": 0.1440405113938295,
+ "grad_norm": 0.6267092227935791,
+ "learning_rate": 0.00019886290573134228,
+ "loss": 1.6307,
+ "step": 144
+ },
+ {
+ "epoch": 0.14504079272295334,
+ "grad_norm": 0.6785029172897339,
+ "learning_rate": 0.000198847088440508,
+ "loss": 1.574,
+ "step": 145
+ },
+ {
+ "epoch": 0.14604107405207714,
+ "grad_norm": 0.6218644380569458,
+ "learning_rate": 0.0001988311625346,
+ "loss": 1.4676,
+ "step": 146
+ },
+ {
+ "epoch": 0.14704135538120097,
+ "grad_norm": 0.6047986745834351,
+ "learning_rate": 0.00019881512803111796,
+ "loss": 1.4316,
+ "step": 147
+ },
+ {
+ "epoch": 0.14804163671032478,
+ "grad_norm": 0.7340937256813049,
+ "learning_rate": 0.00019879898494768093,
+ "loss": 1.5185,
+ "step": 148
+ },
+ {
+ "epoch": 0.1490419180394486,
+ "grad_norm": 0.5874620676040649,
+ "learning_rate": 0.00019878273330202717,
+ "loss": 1.5031,
+ "step": 149
+ },
+ {
+ "epoch": 0.1500421993685724,
+ "grad_norm": 0.6943556666374207,
+ "learning_rate": 0.00019876637311201433,
+ "loss": 1.7323,
+ "step": 150
+ },
+ {
+ "epoch": 0.15104248069769624,
+ "grad_norm": 0.6345832347869873,
+ "learning_rate": 0.00019874990439561934,
+ "loss": 1.4691,
+ "step": 151
+ },
+ {
+ "epoch": 0.15204276202682004,
+ "grad_norm": 0.7047753930091858,
+ "learning_rate": 0.0001987333271709383,
+ "loss": 1.5198,
+ "step": 152
+ },
+ {
+ "epoch": 0.15304304335594385,
+ "grad_norm": 0.6043322086334229,
+ "learning_rate": 0.00019871664145618657,
+ "loss": 1.5488,
+ "step": 153
+ },
+ {
+ "epoch": 0.15404332468506768,
+ "grad_norm": 0.5978446006774902,
+ "learning_rate": 0.00019869984726969878,
+ "loss": 1.4278,
+ "step": 154
+ },
+ {
+ "epoch": 0.15504360601419148,
+ "grad_norm": 0.6796436905860901,
+ "learning_rate": 0.00019868294462992866,
+ "loss": 1.5845,
+ "step": 155
+ },
+ {
+ "epoch": 0.1560438873433153,
+ "grad_norm": 0.7113372087478638,
+ "learning_rate": 0.00019866593355544922,
+ "loss": 1.7509,
+ "step": 156
+ },
+ {
+ "epoch": 0.15704416867243912,
+ "grad_norm": 0.5908107757568359,
+ "learning_rate": 0.00019864881406495246,
+ "loss": 1.5693,
+ "step": 157
+ },
+ {
+ "epoch": 0.15804445000156295,
+ "grad_norm": 0.7135252952575684,
+ "learning_rate": 0.00019863158617724967,
+ "loss": 1.6109,
+ "step": 158
+ },
+ {
+ "epoch": 0.15904473133068675,
+ "grad_norm": 0.5621710419654846,
+ "learning_rate": 0.00019861424991127115,
+ "loss": 1.5368,
+ "step": 159
+ },
+ {
+ "epoch": 0.16004501265981058,
+ "grad_norm": 0.6205443143844604,
+ "learning_rate": 0.00019859680528606637,
+ "loss": 1.5181,
+ "step": 160
+ },
+ {
+ "epoch": 0.16104529398893438,
+ "grad_norm": 0.6933260560035706,
+ "learning_rate": 0.00019857925232080373,
+ "loss": 1.4508,
+ "step": 161
+ },
+ {
+ "epoch": 0.16204557531805822,
+ "grad_norm": 0.6911661028862,
+ "learning_rate": 0.00019856159103477086,
+ "loss": 1.5423,
+ "step": 162
+ },
+ {
+ "epoch": 0.16304585664718202,
+ "grad_norm": 0.7684744000434875,
+ "learning_rate": 0.00019854382144737426,
+ "loss": 1.4097,
+ "step": 163
+ },
+ {
+ "epoch": 0.16404613797630582,
+ "grad_norm": 0.6657288074493408,
+ "learning_rate": 0.00019852594357813952,
+ "loss": 1.6145,
+ "step": 164
+ },
+ {
+ "epoch": 0.16504641930542965,
+ "grad_norm": 0.7030160427093506,
+ "learning_rate": 0.00019850795744671116,
+ "loss": 1.6551,
+ "step": 165
+ },
+ {
+ "epoch": 0.16604670063455346,
+ "grad_norm": 0.87894207239151,
+ "learning_rate": 0.0001984898630728527,
+ "loss": 1.6316,
+ "step": 166
+ },
+ {
+ "epoch": 0.1670469819636773,
+ "grad_norm": 0.6282681226730347,
+ "learning_rate": 0.0001984716604764466,
+ "loss": 1.451,
+ "step": 167
+ },
+ {
+ "epoch": 0.1680472632928011,
+ "grad_norm": 0.6729792952537537,
+ "learning_rate": 0.0001984533496774942,
+ "loss": 1.4381,
+ "step": 168
+ },
+ {
+ "epoch": 0.16904754462192492,
+ "grad_norm": 0.7300116419792175,
+ "learning_rate": 0.0001984349306961158,
+ "loss": 1.4244,
+ "step": 169
+ },
+ {
+ "epoch": 0.17004782595104873,
+ "grad_norm": 0.6853480935096741,
+ "learning_rate": 0.00019841640355255043,
+ "loss": 1.6174,
+ "step": 170
+ },
+ {
+ "epoch": 0.17104810728017256,
+ "grad_norm": 0.735612690448761,
+ "learning_rate": 0.00019839776826715614,
+ "loss": 1.5085,
+ "step": 171
+ },
+ {
+ "epoch": 0.17204838860929636,
+ "grad_norm": 0.6735563278198242,
+ "learning_rate": 0.00019837902486040978,
+ "loss": 1.507,
+ "step": 172
+ },
+ {
+ "epoch": 0.1730486699384202,
+ "grad_norm": 0.6617917418479919,
+ "learning_rate": 0.0001983601733529069,
+ "loss": 1.6774,
+ "step": 173
+ },
+ {
+ "epoch": 0.174048951267544,
+ "grad_norm": 0.7137823700904846,
+ "learning_rate": 0.00019834121376536187,
+ "loss": 1.4665,
+ "step": 174
+ },
+ {
+ "epoch": 0.17504923259666783,
+ "grad_norm": 0.6372626423835754,
+ "learning_rate": 0.00019832214611860793,
+ "loss": 1.3597,
+ "step": 175
+ },
+ {
+ "epoch": 0.17604951392579163,
+ "grad_norm": 0.7131632566452026,
+ "learning_rate": 0.00019830297043359692,
+ "loss": 1.4833,
+ "step": 176
+ },
+ {
+ "epoch": 0.17704979525491543,
+ "grad_norm": 0.7538559436798096,
+ "learning_rate": 0.00019828368673139947,
+ "loss": 1.4714,
+ "step": 177
+ },
+ {
+ "epoch": 0.17805007658403926,
+ "grad_norm": 0.5684806108474731,
+ "learning_rate": 0.0001982642950332049,
+ "loss": 1.5012,
+ "step": 178
+ },
+ {
+ "epoch": 0.17905035791316307,
+ "grad_norm": 0.621658444404602,
+ "learning_rate": 0.00019824479536032112,
+ "loss": 1.9119,
+ "step": 179
+ },
+ {
+ "epoch": 0.1800506392422869,
+ "grad_norm": 0.6564679741859436,
+ "learning_rate": 0.0001982251877341748,
+ "loss": 1.5131,
+ "step": 180
+ },
+ {
+ "epoch": 0.1810509205714107,
+ "grad_norm": 0.6546526551246643,
+ "learning_rate": 0.00019820547217631117,
+ "loss": 1.4493,
+ "step": 181
+ },
+ {
+ "epoch": 0.18205120190053453,
+ "grad_norm": 0.6504479050636292,
+ "learning_rate": 0.00019818564870839405,
+ "loss": 1.6131,
+ "step": 182
+ },
+ {
+ "epoch": 0.18305148322965833,
+ "grad_norm": 0.6269803047180176,
+ "learning_rate": 0.00019816571735220583,
+ "loss": 1.5936,
+ "step": 183
+ },
+ {
+ "epoch": 0.18405176455878217,
+ "grad_norm": 0.6303942799568176,
+ "learning_rate": 0.00019814567812964748,
+ "loss": 1.6948,
+ "step": 184
+ },
+ {
+ "epoch": 0.18505204588790597,
+ "grad_norm": 0.6562885046005249,
+ "learning_rate": 0.00019812553106273847,
+ "loss": 1.5542,
+ "step": 185
+ },
+ {
+ "epoch": 0.1860523272170298,
+ "grad_norm": 0.5844212174415588,
+ "learning_rate": 0.00019810527617361681,
+ "loss": 1.539,
+ "step": 186
+ },
+ {
+ "epoch": 0.1870526085461536,
+ "grad_norm": 0.6402295231819153,
+ "learning_rate": 0.00019808491348453894,
+ "loss": 1.4748,
+ "step": 187
+ },
+ {
+ "epoch": 0.18805288987527743,
+ "grad_norm": 0.6579477190971375,
+ "learning_rate": 0.00019806444301787978,
+ "loss": 1.5114,
+ "step": 188
+ },
+ {
+ "epoch": 0.18905317120440124,
+ "grad_norm": 0.6511597037315369,
+ "learning_rate": 0.0001980438647961327,
+ "loss": 1.4678,
+ "step": 189
+ },
+ {
+ "epoch": 0.19005345253352504,
+ "grad_norm": 0.6911427974700928,
+ "learning_rate": 0.00019802317884190935,
+ "loss": 1.6876,
+ "step": 190
+ },
+ {
+ "epoch": 0.19105373386264887,
+ "grad_norm": 0.6146433353424072,
+ "learning_rate": 0.00019800238517793996,
+ "loss": 1.5986,
+ "step": 191
+ },
+ {
+ "epoch": 0.19205401519177268,
+ "grad_norm": 0.6126302480697632,
+ "learning_rate": 0.00019798148382707296,
+ "loss": 1.571,
+ "step": 192
+ },
+ {
+ "epoch": 0.1930542965208965,
+ "grad_norm": 0.5751072764396667,
+ "learning_rate": 0.00019796047481227515,
+ "loss": 1.4921,
+ "step": 193
+ },
+ {
+ "epoch": 0.1940545778500203,
+ "grad_norm": 0.6484839916229248,
+ "learning_rate": 0.00019793935815663163,
+ "loss": 1.7495,
+ "step": 194
+ },
+ {
+ "epoch": 0.19505485917914414,
+ "grad_norm": 0.6875973343849182,
+ "learning_rate": 0.00019791813388334581,
+ "loss": 1.5782,
+ "step": 195
+ },
+ {
+ "epoch": 0.19605514050826794,
+ "grad_norm": 0.8130943179130554,
+ "learning_rate": 0.00019789680201573933,
+ "loss": 1.4964,
+ "step": 196
+ },
+ {
+ "epoch": 0.19705542183739178,
+ "grad_norm": 0.6734403371810913,
+ "learning_rate": 0.00019787536257725202,
+ "loss": 1.4787,
+ "step": 197
+ },
+ {
+ "epoch": 0.19805570316651558,
+ "grad_norm": 0.6480582356452942,
+ "learning_rate": 0.00019785381559144196,
+ "loss": 1.5629,
+ "step": 198
+ },
+ {
+ "epoch": 0.1990559844956394,
+ "grad_norm": 0.6554624438285828,
+ "learning_rate": 0.00019783216108198542,
+ "loss": 1.5806,
+ "step": 199
+ },
+ {
+ "epoch": 0.2000562658247632,
+ "grad_norm": 0.705443263053894,
+ "learning_rate": 0.00019781039907267677,
+ "loss": 1.8372,
+ "step": 200
+ },
+ {
+ "epoch": 0.20105654715388704,
+ "grad_norm": 0.706923246383667,
+ "learning_rate": 0.00019778852958742853,
+ "loss": 1.6405,
+ "step": 201
+ },
+ {
+ "epoch": 0.20205682848301085,
+ "grad_norm": 0.7062544822692871,
+ "learning_rate": 0.00019776655265027127,
+ "loss": 1.6,
+ "step": 202
+ },
+ {
+ "epoch": 0.20305710981213465,
+ "grad_norm": 0.7227569222450256,
+ "learning_rate": 0.00019774446828535371,
+ "loss": 1.5172,
+ "step": 203
+ },
+ {
+ "epoch": 0.20405739114125848,
+ "grad_norm": 0.6762563586235046,
+ "learning_rate": 0.00019772227651694256,
+ "loss": 1.6753,
+ "step": 204
+ },
+ {
+ "epoch": 0.20505767247038229,
+ "grad_norm": 0.6048421859741211,
+ "learning_rate": 0.00019769997736942258,
+ "loss": 1.4827,
+ "step": 205
+ },
+ {
+ "epoch": 0.20605795379950612,
+ "grad_norm": 0.6002956032752991,
+ "learning_rate": 0.00019767757086729647,
+ "loss": 1.5438,
+ "step": 206
+ },
+ {
+ "epoch": 0.20705823512862992,
+ "grad_norm": 0.7948954701423645,
+ "learning_rate": 0.00019765505703518496,
+ "loss": 1.4988,
+ "step": 207
+ },
+ {
+ "epoch": 0.20805851645775375,
+ "grad_norm": 0.6495680809020996,
+ "learning_rate": 0.00019763243589782662,
+ "loss": 1.5738,
+ "step": 208
+ },
+ {
+ "epoch": 0.20905879778687755,
+ "grad_norm": 0.6413107514381409,
+ "learning_rate": 0.00019760970748007803,
+ "loss": 1.3794,
+ "step": 209
+ },
+ {
+ "epoch": 0.21005907911600138,
+ "grad_norm": 0.5999665260314941,
+ "learning_rate": 0.0001975868718069136,
+ "loss": 1.4313,
+ "step": 210
+ },
+ {
+ "epoch": 0.2110593604451252,
+ "grad_norm": 0.6355773210525513,
+ "learning_rate": 0.00019756392890342563,
+ "loss": 1.5107,
+ "step": 211
+ },
+ {
+ "epoch": 0.21205964177424902,
+ "grad_norm": 0.6068251729011536,
+ "learning_rate": 0.00019754087879482422,
+ "loss": 1.536,
+ "step": 212
+ },
+ {
+ "epoch": 0.21305992310337282,
+ "grad_norm": 0.5568909049034119,
+ "learning_rate": 0.00019751772150643722,
+ "loss": 1.5372,
+ "step": 213
+ },
+ {
+ "epoch": 0.21406020443249665,
+ "grad_norm": 0.5771281719207764,
+ "learning_rate": 0.00019749445706371038,
+ "loss": 1.487,
+ "step": 214
+ },
+ {
+ "epoch": 0.21506048576162046,
+ "grad_norm": 0.6146671772003174,
+ "learning_rate": 0.00019747108549220702,
+ "loss": 1.4585,
+ "step": 215
+ },
+ {
+ "epoch": 0.21606076709074426,
+ "grad_norm": 0.5595754981040955,
+ "learning_rate": 0.00019744760681760832,
+ "loss": 1.4224,
+ "step": 216
+ },
+ {
+ "epoch": 0.2170610484198681,
+ "grad_norm": 0.5873929858207703,
+ "learning_rate": 0.00019742402106571314,
+ "loss": 1.4581,
+ "step": 217
+ },
+ {
+ "epoch": 0.2180613297489919,
+ "grad_norm": 0.5725668668746948,
+ "learning_rate": 0.00019740032826243788,
+ "loss": 1.4393,
+ "step": 218
+ },
+ {
+ "epoch": 0.21906161107811573,
+ "grad_norm": 0.6452648043632507,
+ "learning_rate": 0.0001973765284338167,
+ "loss": 1.6048,
+ "step": 219
+ },
+ {
+ "epoch": 0.22006189240723953,
+ "grad_norm": 0.6166092753410339,
+ "learning_rate": 0.00019735262160600127,
+ "loss": 1.4976,
+ "step": 220
+ },
+ {
+ "epoch": 0.22106217373636336,
+ "grad_norm": 0.7053269147872925,
+ "learning_rate": 0.00019732860780526088,
+ "loss": 1.6882,
+ "step": 221
+ },
+ {
+ "epoch": 0.22206245506548716,
+ "grad_norm": 0.7072796821594238,
+ "learning_rate": 0.00019730448705798239,
+ "loss": 1.5441,
+ "step": 222
+ },
+ {
+ "epoch": 0.223062736394611,
+ "grad_norm": 0.6704496145248413,
+ "learning_rate": 0.00019728025939067008,
+ "loss": 1.3791,
+ "step": 223
+ },
+ {
+ "epoch": 0.2240630177237348,
+ "grad_norm": 0.6141743659973145,
+ "learning_rate": 0.00019725592482994583,
+ "loss": 1.5831,
+ "step": 224
+ },
+ {
+ "epoch": 0.22506329905285863,
+ "grad_norm": 0.6235673427581787,
+ "learning_rate": 0.00019723148340254892,
+ "loss": 1.6103,
+ "step": 225
+ },
+ {
+ "epoch": 0.22606358038198243,
+ "grad_norm": 0.6383673548698425,
+ "learning_rate": 0.00019720693513533598,
+ "loss": 1.6284,
+ "step": 226
+ },
+ {
+ "epoch": 0.22706386171110624,
+ "grad_norm": 0.7666104435920715,
+ "learning_rate": 0.00019718228005528122,
+ "loss": 1.702,
+ "step": 227
+ },
+ {
+ "epoch": 0.22806414304023007,
+ "grad_norm": 0.6431383490562439,
+ "learning_rate": 0.00019715751818947603,
+ "loss": 1.4571,
+ "step": 228
+ },
+ {
+ "epoch": 0.22906442436935387,
+ "grad_norm": 0.6177626252174377,
+ "learning_rate": 0.0001971326495651293,
+ "loss": 1.4326,
+ "step": 229
+ },
+ {
+ "epoch": 0.2300647056984777,
+ "grad_norm": 0.7352898120880127,
+ "learning_rate": 0.00019710767420956705,
+ "loss": 1.7427,
+ "step": 230
+ },
+ {
+ "epoch": 0.2310649870276015,
+ "grad_norm": 0.6259469389915466,
+ "learning_rate": 0.0001970825921502328,
+ "loss": 1.634,
+ "step": 231
+ },
+ {
+ "epoch": 0.23206526835672533,
+ "grad_norm": 0.6699635982513428,
+ "learning_rate": 0.0001970574034146871,
+ "loss": 1.4705,
+ "step": 232
+ },
+ {
+ "epoch": 0.23306554968584914,
+ "grad_norm": 0.5577033162117004,
+ "learning_rate": 0.00019703210803060782,
+ "loss": 1.5438,
+ "step": 233
+ },
+ {
+ "epoch": 0.23406583101497297,
+ "grad_norm": 0.6063429117202759,
+ "learning_rate": 0.00019700670602579008,
+ "loss": 1.555,
+ "step": 234
+ },
+ {
+ "epoch": 0.23506611234409677,
+ "grad_norm": 0.6069104671478271,
+ "learning_rate": 0.00019698119742814606,
+ "loss": 1.5036,
+ "step": 235
+ },
+ {
+ "epoch": 0.2360663936732206,
+ "grad_norm": 0.6158379316329956,
+ "learning_rate": 0.00019695558226570507,
+ "loss": 1.3741,
+ "step": 236
+ },
+ {
+ "epoch": 0.2370666750023444,
+ "grad_norm": 0.6366294622421265,
+ "learning_rate": 0.00019692986056661356,
+ "loss": 1.4467,
+ "step": 237
+ },
+ {
+ "epoch": 0.23806695633146824,
+ "grad_norm": 0.6726595163345337,
+ "learning_rate": 0.00019690403235913504,
+ "loss": 1.3861,
+ "step": 238
+ },
+ {
+ "epoch": 0.23906723766059204,
+ "grad_norm": 0.6546512842178345,
+ "learning_rate": 0.00019687809767165,
+ "loss": 1.6886,
+ "step": 239
+ },
+ {
+ "epoch": 0.24006751898971584,
+ "grad_norm": 0.6623121500015259,
+ "learning_rate": 0.000196852056532656,
+ "loss": 1.5925,
+ "step": 240
+ },
+ {
+ "epoch": 0.24106780031883968,
+ "grad_norm": 0.6577529311180115,
+ "learning_rate": 0.00019682590897076752,
+ "loss": 1.4509,
+ "step": 241
+ },
+ {
+ "epoch": 0.24206808164796348,
+ "grad_norm": 0.5586327314376831,
+ "learning_rate": 0.00019679965501471608,
+ "loss": 1.6346,
+ "step": 242
+ },
+ {
+ "epoch": 0.2430683629770873,
+ "grad_norm": 0.6459937691688538,
+ "learning_rate": 0.0001967732946933499,
+ "loss": 1.4129,
+ "step": 243
+ },
+ {
+ "epoch": 0.2440686443062111,
+ "grad_norm": 0.778732180595398,
+ "learning_rate": 0.00019674682803563428,
+ "loss": 1.5129,
+ "step": 244
+ },
+ {
+ "epoch": 0.24506892563533494,
+ "grad_norm": 0.7264451384544373,
+ "learning_rate": 0.00019672025507065131,
+ "loss": 1.4483,
+ "step": 245
+ },
+ {
+ "epoch": 0.24606920696445875,
+ "grad_norm": 0.616084635257721,
+ "learning_rate": 0.00019669357582759983,
+ "loss": 1.5947,
+ "step": 246
+ },
+ {
+ "epoch": 0.24706948829358258,
+ "grad_norm": 0.5911642909049988,
+ "learning_rate": 0.00019666679033579552,
+ "loss": 1.6407,
+ "step": 247
+ },
+ {
+ "epoch": 0.24806976962270638,
+ "grad_norm": 0.6102796792984009,
+ "learning_rate": 0.00019663989862467082,
+ "loss": 1.5251,
+ "step": 248
+ },
+ {
+ "epoch": 0.2490700509518302,
+ "grad_norm": 0.5973434448242188,
+ "learning_rate": 0.00019661290072377482,
+ "loss": 1.3969,
+ "step": 249
+ },
+ {
+ "epoch": 0.25007033228095404,
+ "grad_norm": 0.8515523076057434,
+ "learning_rate": 0.00019658579666277334,
+ "loss": 1.5687,
+ "step": 250
+ },
+ {
+ "epoch": 0.2510706136100778,
+ "grad_norm": 0.5003417134284973,
+ "learning_rate": 0.0001965585864714488,
+ "loss": 1.4102,
+ "step": 251
+ },
+ {
+ "epoch": 0.25207089493920165,
+ "grad_norm": 0.5215190052986145,
+ "learning_rate": 0.00019653127017970034,
+ "loss": 1.2471,
+ "step": 252
+ },
+ {
+ "epoch": 0.2530711762683255,
+ "grad_norm": 0.6491619348526001,
+ "learning_rate": 0.0001965038478175436,
+ "loss": 1.6969,
+ "step": 253
+ },
+ {
+ "epoch": 0.25407145759744926,
+ "grad_norm": 0.6176133155822754,
+ "learning_rate": 0.00019647631941511082,
+ "loss": 1.5351,
+ "step": 254
+ },
+ {
+ "epoch": 0.2550717389265731,
+ "grad_norm": 0.6913408041000366,
+ "learning_rate": 0.0001964486850026507,
+ "loss": 1.4309,
+ "step": 255
+ },
+ {
+ "epoch": 0.2560720202556969,
+ "grad_norm": 0.5875718593597412,
+ "learning_rate": 0.00019642094461052852,
+ "loss": 1.4679,
+ "step": 256
+ },
+ {
+ "epoch": 0.25707230158482075,
+ "grad_norm": 0.6682264804840088,
+ "learning_rate": 0.00019639309826922585,
+ "loss": 1.5393,
+ "step": 257
+ },
+ {
+ "epoch": 0.2580725829139445,
+ "grad_norm": 0.7241432666778564,
+ "learning_rate": 0.0001963651460093409,
+ "loss": 1.4998,
+ "step": 258
+ },
+ {
+ "epoch": 0.25907286424306836,
+ "grad_norm": 0.5210353136062622,
+ "learning_rate": 0.00019633708786158806,
+ "loss": 1.3837,
+ "step": 259
+ },
+ {
+ "epoch": 0.2600731455721922,
+ "grad_norm": 0.584020733833313,
+ "learning_rate": 0.00019630892385679818,
+ "loss": 1.4961,
+ "step": 260
+ },
+ {
+ "epoch": 0.261073426901316,
+ "grad_norm": 0.6708115935325623,
+ "learning_rate": 0.00019628065402591845,
+ "loss": 1.5277,
+ "step": 261
+ },
+ {
+ "epoch": 0.2620737082304398,
+ "grad_norm": 0.5480003952980042,
+ "learning_rate": 0.00019625227840001225,
+ "loss": 1.556,
+ "step": 262
+ },
+ {
+ "epoch": 0.2630739895595636,
+ "grad_norm": 0.595191478729248,
+ "learning_rate": 0.0001962237970102593,
+ "loss": 1.3514,
+ "step": 263
+ },
+ {
+ "epoch": 0.26407427088868746,
+ "grad_norm": 0.7332099080085754,
+ "learning_rate": 0.0001961952098879555,
+ "loss": 1.5394,
+ "step": 264
+ },
+ {
+ "epoch": 0.26507455221781123,
+ "grad_norm": 0.596319317817688,
+ "learning_rate": 0.00019616651706451287,
+ "loss": 1.3828,
+ "step": 265
+ },
+ {
+ "epoch": 0.26607483354693506,
+ "grad_norm": 0.5998026132583618,
+ "learning_rate": 0.0001961377185714597,
+ "loss": 1.4479,
+ "step": 266
+ },
+ {
+ "epoch": 0.2670751148760589,
+ "grad_norm": 0.6220220923423767,
+ "learning_rate": 0.0001961088144404403,
+ "loss": 1.5121,
+ "step": 267
+ },
+ {
+ "epoch": 0.2680753962051827,
+ "grad_norm": 0.5865943431854248,
+ "learning_rate": 0.00019607980470321505,
+ "loss": 1.6747,
+ "step": 268
+ },
+ {
+ "epoch": 0.2690756775343065,
+ "grad_norm": 0.5790852904319763,
+ "learning_rate": 0.00019605068939166045,
+ "loss": 1.3798,
+ "step": 269
+ },
+ {
+ "epoch": 0.27007595886343033,
+ "grad_norm": 0.6157498955726624,
+ "learning_rate": 0.00019602146853776894,
+ "loss": 1.6799,
+ "step": 270
+ },
+ {
+ "epoch": 0.27107624019255416,
+ "grad_norm": 0.6214422583580017,
+ "learning_rate": 0.000195992142173649,
+ "loss": 1.4782,
+ "step": 271
+ },
+ {
+ "epoch": 0.272076521521678,
+ "grad_norm": 0.6460129618644714,
+ "learning_rate": 0.0001959627103315249,
+ "loss": 1.4874,
+ "step": 272
+ },
+ {
+ "epoch": 0.27307680285080177,
+ "grad_norm": 0.5928930640220642,
+ "learning_rate": 0.00019593317304373705,
+ "loss": 1.4557,
+ "step": 273
+ },
+ {
+ "epoch": 0.2740770841799256,
+ "grad_norm": 0.5123687982559204,
+ "learning_rate": 0.00019590353034274144,
+ "loss": 1.445,
+ "step": 274
+ },
+ {
+ "epoch": 0.27507736550904943,
+ "grad_norm": 0.607455313205719,
+ "learning_rate": 0.00019587378226111014,
+ "loss": 1.4468,
+ "step": 275
+ },
+ {
+ "epoch": 0.27607764683817326,
+ "grad_norm": 0.6108120083808899,
+ "learning_rate": 0.00019584392883153088,
+ "loss": 1.3834,
+ "step": 276
+ },
+ {
+ "epoch": 0.27707792816729704,
+ "grad_norm": 0.680404543876648,
+ "learning_rate": 0.00019581397008680717,
+ "loss": 1.5094,
+ "step": 277
+ },
+ {
+ "epoch": 0.27807820949642087,
+ "grad_norm": 0.6419563889503479,
+ "learning_rate": 0.00019578390605985826,
+ "loss": 1.6933,
+ "step": 278
+ },
+ {
+ "epoch": 0.2790784908255447,
+ "grad_norm": 0.5788853764533997,
+ "learning_rate": 0.00019575373678371909,
+ "loss": 1.4754,
+ "step": 279
+ },
+ {
+ "epoch": 0.2800787721546685,
+ "grad_norm": 0.5943770408630371,
+ "learning_rate": 0.00019572346229154025,
+ "loss": 1.2949,
+ "step": 280
+ },
+ {
+ "epoch": 0.2810790534837923,
+ "grad_norm": 0.5997135043144226,
+ "learning_rate": 0.00019569308261658787,
+ "loss": 1.5365,
+ "step": 281
+ },
+ {
+ "epoch": 0.28207933481291614,
+ "grad_norm": 0.692401647567749,
+ "learning_rate": 0.00019566259779224378,
+ "loss": 1.4946,
+ "step": 282
+ },
+ {
+ "epoch": 0.28307961614203997,
+ "grad_norm": 0.5856708884239197,
+ "learning_rate": 0.00019563200785200526,
+ "loss": 1.426,
+ "step": 283
+ },
+ {
+ "epoch": 0.28407989747116374,
+ "grad_norm": 1.2516822814941406,
+ "learning_rate": 0.00019560131282948516,
+ "loss": 1.5119,
+ "step": 284
+ },
+ {
+ "epoch": 0.2850801788002876,
+ "grad_norm": 0.6360501050949097,
+ "learning_rate": 0.0001955705127584117,
+ "loss": 1.3916,
+ "step": 285
+ },
+ {
+ "epoch": 0.2860804601294114,
+ "grad_norm": 0.6822036504745483,
+ "learning_rate": 0.00019553960767262863,
+ "loss": 1.5565,
+ "step": 286
+ },
+ {
+ "epoch": 0.28708074145853524,
+ "grad_norm": 0.6973714828491211,
+ "learning_rate": 0.00019550859760609503,
+ "loss": 1.5559,
+ "step": 287
+ },
+ {
+ "epoch": 0.288081022787659,
+ "grad_norm": 0.6595618724822998,
+ "learning_rate": 0.00019547748259288536,
+ "loss": 1.5824,
+ "step": 288
+ },
+ {
+ "epoch": 0.28908130411678284,
+ "grad_norm": 0.5625808238983154,
+ "learning_rate": 0.0001954462626671894,
+ "loss": 1.2669,
+ "step": 289
+ },
+ {
+ "epoch": 0.2900815854459067,
+ "grad_norm": 0.6318663358688354,
+ "learning_rate": 0.0001954149378633122,
+ "loss": 1.3896,
+ "step": 290
+ },
+ {
+ "epoch": 0.29108186677503045,
+ "grad_norm": 0.6655906438827515,
+ "learning_rate": 0.00019538350821567404,
+ "loss": 1.3889,
+ "step": 291
+ },
+ {
+ "epoch": 0.2920821481041543,
+ "grad_norm": 0.5947337746620178,
+ "learning_rate": 0.00019535197375881045,
+ "loss": 1.6112,
+ "step": 292
+ },
+ {
+ "epoch": 0.2930824294332781,
+ "grad_norm": 0.6139295101165771,
+ "learning_rate": 0.00019532033452737205,
+ "loss": 1.5185,
+ "step": 293
+ },
+ {
+ "epoch": 0.29408271076240194,
+ "grad_norm": 0.579953670501709,
+ "learning_rate": 0.00019528859055612468,
+ "loss": 1.3874,
+ "step": 294
+ },
+ {
+ "epoch": 0.2950829920915257,
+ "grad_norm": 0.6101506352424622,
+ "learning_rate": 0.0001952567418799492,
+ "loss": 1.5965,
+ "step": 295
+ },
+ {
+ "epoch": 0.29608327342064955,
+ "grad_norm": 0.6393965482711792,
+ "learning_rate": 0.00019522478853384155,
+ "loss": 1.4124,
+ "step": 296
+ },
+ {
+ "epoch": 0.2970835547497734,
+ "grad_norm": 0.6147856712341309,
+ "learning_rate": 0.00019519273055291266,
+ "loss": 1.3776,
+ "step": 297
+ },
+ {
+ "epoch": 0.2980838360788972,
+ "grad_norm": 0.6056416630744934,
+ "learning_rate": 0.00019516056797238846,
+ "loss": 1.4453,
+ "step": 298
+ },
+ {
+ "epoch": 0.299084117408021,
+ "grad_norm": 0.6705831289291382,
+ "learning_rate": 0.00019512830082760987,
+ "loss": 1.3248,
+ "step": 299
+ },
+ {
+ "epoch": 0.3000843987371448,
+ "grad_norm": 0.6664314866065979,
+ "learning_rate": 0.00019509592915403255,
+ "loss": 1.5865,
+ "step": 300
+ },
+ {
+ "epoch": 0.30108468006626865,
+ "grad_norm": 0.5325604677200317,
+ "learning_rate": 0.00019506345298722717,
+ "loss": 1.0646,
+ "step": 301
+ },
+ {
+ "epoch": 0.3020849613953925,
+ "grad_norm": 0.589242160320282,
+ "learning_rate": 0.00019503087236287913,
+ "loss": 1.2297,
+ "step": 302
+ },
+ {
+ "epoch": 0.30308524272451626,
+ "grad_norm": 0.5677699446678162,
+ "learning_rate": 0.00019499818731678873,
+ "loss": 1.3961,
+ "step": 303
+ },
+ {
+ "epoch": 0.3040855240536401,
+ "grad_norm": 0.5676394701004028,
+ "learning_rate": 0.00019496539788487082,
+ "loss": 1.3276,
+ "step": 304
+ },
+ {
+ "epoch": 0.3050858053827639,
+ "grad_norm": 0.7280861139297485,
+ "learning_rate": 0.0001949325041031551,
+ "loss": 1.6731,
+ "step": 305
+ },
+ {
+ "epoch": 0.3060860867118877,
+ "grad_norm": 0.690636396408081,
+ "learning_rate": 0.0001948995060077859,
+ "loss": 1.5443,
+ "step": 306
+ },
+ {
+ "epoch": 0.3070863680410115,
+ "grad_norm": 0.611426055431366,
+ "learning_rate": 0.0001948664036350221,
+ "loss": 1.5827,
+ "step": 307
+ },
+ {
+ "epoch": 0.30808664937013536,
+ "grad_norm": 0.7112497091293335,
+ "learning_rate": 0.00019483319702123732,
+ "loss": 1.5401,
+ "step": 308
+ },
+ {
+ "epoch": 0.3090869306992592,
+ "grad_norm": 0.6598275303840637,
+ "learning_rate": 0.00019479988620291956,
+ "loss": 1.6432,
+ "step": 309
+ },
+ {
+ "epoch": 0.31008721202838296,
+ "grad_norm": 0.5019932985305786,
+ "learning_rate": 0.00019476647121667137,
+ "loss": 1.2561,
+ "step": 310
+ },
+ {
+ "epoch": 0.3110874933575068,
+ "grad_norm": 0.7777897715568542,
+ "learning_rate": 0.00019473295209920983,
+ "loss": 1.6118,
+ "step": 311
+ },
+ {
+ "epoch": 0.3120877746866306,
+ "grad_norm": 0.6028640866279602,
+ "learning_rate": 0.00019469932888736632,
+ "loss": 1.4682,
+ "step": 312
+ },
+ {
+ "epoch": 0.31308805601575446,
+ "grad_norm": 0.554381251335144,
+ "learning_rate": 0.00019466560161808674,
+ "loss": 1.4179,
+ "step": 313
+ },
+ {
+ "epoch": 0.31408833734487823,
+ "grad_norm": 0.6212736368179321,
+ "learning_rate": 0.00019463177032843124,
+ "loss": 1.4327,
+ "step": 314
+ },
+ {
+ "epoch": 0.31508861867400206,
+ "grad_norm": 0.6829814910888672,
+ "learning_rate": 0.00019459783505557424,
+ "loss": 1.4455,
+ "step": 315
+ },
+ {
+ "epoch": 0.3160889000031259,
+ "grad_norm": 0.5808065533638,
+ "learning_rate": 0.00019456379583680452,
+ "loss": 1.3583,
+ "step": 316
+ },
+ {
+ "epoch": 0.31708918133224967,
+ "grad_norm": 0.6354159712791443,
+ "learning_rate": 0.000194529652709525,
+ "loss": 1.6916,
+ "step": 317
+ },
+ {
+ "epoch": 0.3180894626613735,
+ "grad_norm": 0.6299159526824951,
+ "learning_rate": 0.00019449540571125286,
+ "loss": 1.47,
+ "step": 318
+ },
+ {
+ "epoch": 0.31908974399049733,
+ "grad_norm": 0.6222877502441406,
+ "learning_rate": 0.00019446105487961926,
+ "loss": 1.4137,
+ "step": 319
+ },
+ {
+ "epoch": 0.32009002531962116,
+ "grad_norm": 0.5995916724205017,
+ "learning_rate": 0.0001944266002523696,
+ "loss": 1.3679,
+ "step": 320
+ },
+ {
+ "epoch": 0.32109030664874494,
+ "grad_norm": 0.599814236164093,
+ "learning_rate": 0.0001943920418673633,
+ "loss": 1.4075,
+ "step": 321
+ },
+ {
+ "epoch": 0.32209058797786877,
+ "grad_norm": 0.5409269332885742,
+ "learning_rate": 0.00019435737976257377,
+ "loss": 1.4289,
+ "step": 322
+ },
+ {
+ "epoch": 0.3230908693069926,
+ "grad_norm": 0.5298951864242554,
+ "learning_rate": 0.00019432261397608834,
+ "loss": 1.2834,
+ "step": 323
+ },
+ {
+ "epoch": 0.32409115063611643,
+ "grad_norm": 0.7196112871170044,
+ "learning_rate": 0.00019428774454610843,
+ "loss": 1.4845,
+ "step": 324
+ },
+ {
+ "epoch": 0.3250914319652402,
+ "grad_norm": 0.5605450868606567,
+ "learning_rate": 0.00019425277151094913,
+ "loss": 1.4575,
+ "step": 325
+ },
+ {
+ "epoch": 0.32609171329436404,
+ "grad_norm": 0.573080837726593,
+ "learning_rate": 0.00019421769490903957,
+ "loss": 1.5757,
+ "step": 326
+ },
+ {
+ "epoch": 0.32709199462348787,
+ "grad_norm": 0.5017902851104736,
+ "learning_rate": 0.0001941825147789225,
+ "loss": 1.5794,
+ "step": 327
+ },
+ {
+ "epoch": 0.32809227595261165,
+ "grad_norm": 0.643267810344696,
+ "learning_rate": 0.00019414723115925456,
+ "loss": 1.4903,
+ "step": 328
+ },
+ {
+ "epoch": 0.3290925572817355,
+ "grad_norm": 0.6522070169448853,
+ "learning_rate": 0.0001941118440888061,
+ "loss": 1.5907,
+ "step": 329
+ },
+ {
+ "epoch": 0.3300928386108593,
+ "grad_norm": 0.6496105790138245,
+ "learning_rate": 0.0001940763536064611,
+ "loss": 1.4225,
+ "step": 330
+ },
+ {
+ "epoch": 0.33109311993998314,
+ "grad_norm": 0.6011468768119812,
+ "learning_rate": 0.00019404075975121716,
+ "loss": 1.5022,
+ "step": 331
+ },
+ {
+ "epoch": 0.3320934012691069,
+ "grad_norm": 0.6327878832817078,
+ "learning_rate": 0.0001940050625621855,
+ "loss": 1.468,
+ "step": 332
+ },
+ {
+ "epoch": 0.33309368259823074,
+ "grad_norm": 0.6187490820884705,
+ "learning_rate": 0.00019396926207859084,
+ "loss": 1.5183,
+ "step": 333
+ },
+ {
+ "epoch": 0.3340939639273546,
+ "grad_norm": 0.7625093460083008,
+ "learning_rate": 0.0001939333583397715,
+ "loss": 1.4813,
+ "step": 334
+ },
+ {
+ "epoch": 0.3350942452564784,
+ "grad_norm": 0.5286359190940857,
+ "learning_rate": 0.00019389735138517915,
+ "loss": 1.3674,
+ "step": 335
+ },
+ {
+ "epoch": 0.3360945265856022,
+ "grad_norm": 0.5798503160476685,
+ "learning_rate": 0.00019386124125437895,
+ "loss": 1.3016,
+ "step": 336
+ },
+ {
+ "epoch": 0.337094807914726,
+ "grad_norm": 0.48794126510620117,
+ "learning_rate": 0.00019382502798704935,
+ "loss": 1.3642,
+ "step": 337
+ },
+ {
+ "epoch": 0.33809508924384984,
+ "grad_norm": 0.7394312620162964,
+ "learning_rate": 0.00019378871162298227,
+ "loss": 1.327,
+ "step": 338
+ },
+ {
+ "epoch": 0.3390953705729737,
+ "grad_norm": 0.5598319172859192,
+ "learning_rate": 0.00019375229220208276,
+ "loss": 1.4247,
+ "step": 339
+ },
+ {
+ "epoch": 0.34009565190209745,
+ "grad_norm": 0.6099628806114197,
+ "learning_rate": 0.00019371576976436917,
+ "loss": 1.4906,
+ "step": 340
+ },
+ {
+ "epoch": 0.3410959332312213,
+ "grad_norm": 0.6749781370162964,
+ "learning_rate": 0.00019367914434997312,
+ "loss": 1.367,
+ "step": 341
+ },
+ {
+ "epoch": 0.3420962145603451,
+ "grad_norm": 0.7721238136291504,
+ "learning_rate": 0.00019364241599913924,
+ "loss": 1.4464,
+ "step": 342
+ },
+ {
+ "epoch": 0.3430964958894689,
+ "grad_norm": 0.5762369632720947,
+ "learning_rate": 0.0001936055847522254,
+ "loss": 1.409,
+ "step": 343
+ },
+ {
+ "epoch": 0.3440967772185927,
+ "grad_norm": 0.6960498690605164,
+ "learning_rate": 0.00019356865064970244,
+ "loss": 1.3907,
+ "step": 344
+ },
+ {
+ "epoch": 0.34509705854771655,
+ "grad_norm": 0.5805984735488892,
+ "learning_rate": 0.0001935316137321543,
+ "loss": 1.4539,
+ "step": 345
+ },
+ {
+ "epoch": 0.3460973398768404,
+ "grad_norm": 0.5686045289039612,
+ "learning_rate": 0.00019349447404027782,
+ "loss": 1.4493,
+ "step": 346
+ },
+ {
+ "epoch": 0.34709762120596416,
+ "grad_norm": 0.5448501706123352,
+ "learning_rate": 0.00019345723161488283,
+ "loss": 1.5633,
+ "step": 347
+ },
+ {
+ "epoch": 0.348097902535088,
+ "grad_norm": 0.6388784050941467,
+ "learning_rate": 0.000193419886496892,
+ "loss": 1.7179,
+ "step": 348
+ },
+ {
+ "epoch": 0.3490981838642118,
+ "grad_norm": 0.5240457653999329,
+ "learning_rate": 0.00019338243872734086,
+ "loss": 1.4411,
+ "step": 349
+ },
+ {
+ "epoch": 0.35009846519333565,
+ "grad_norm": 0.5460641384124756,
+ "learning_rate": 0.00019334488834737775,
+ "loss": 1.361,
+ "step": 350
+ },
+ {
+ "epoch": 0.3510987465224594,
+ "grad_norm": 0.5495695471763611,
+ "learning_rate": 0.00019330723539826375,
+ "loss": 1.5891,
+ "step": 351
+ },
+ {
+ "epoch": 0.35209902785158326,
+ "grad_norm": 0.5618153214454651,
+ "learning_rate": 0.00019326947992137262,
+ "loss": 1.3084,
+ "step": 352
+ },
+ {
+ "epoch": 0.3530993091807071,
+ "grad_norm": 0.5603707432746887,
+ "learning_rate": 0.00019323162195819082,
+ "loss": 1.5732,
+ "step": 353
+ },
+ {
+ "epoch": 0.35409959050983086,
+ "grad_norm": 0.5732563138008118,
+ "learning_rate": 0.0001931936615503174,
+ "loss": 1.5045,
+ "step": 354
+ },
+ {
+ "epoch": 0.3550998718389547,
+ "grad_norm": 0.5997583866119385,
+ "learning_rate": 0.000193155598739464,
+ "loss": 1.4175,
+ "step": 355
+ },
+ {
+ "epoch": 0.3561001531680785,
+ "grad_norm": 0.5769765377044678,
+ "learning_rate": 0.0001931174335674547,
+ "loss": 1.4834,
+ "step": 356
+ },
+ {
+ "epoch": 0.35710043449720236,
+ "grad_norm": 0.5902683138847351,
+ "learning_rate": 0.0001930791660762262,
+ "loss": 1.4664,
+ "step": 357
+ },
+ {
+ "epoch": 0.35810071582632613,
+ "grad_norm": 0.6354758143424988,
+ "learning_rate": 0.00019304079630782752,
+ "loss": 1.3891,
+ "step": 358
+ },
+ {
+ "epoch": 0.35910099715544996,
+ "grad_norm": 0.6018317341804504,
+ "learning_rate": 0.0001930023243044201,
+ "loss": 1.4514,
+ "step": 359
+ },
+ {
+ "epoch": 0.3601012784845738,
+ "grad_norm": 0.5409123301506042,
+ "learning_rate": 0.00019296375010827773,
+ "loss": 1.4708,
+ "step": 360
+ },
+ {
+ "epoch": 0.3611015598136976,
+ "grad_norm": 0.5457523465156555,
+ "learning_rate": 0.00019292507376178643,
+ "loss": 1.4988,
+ "step": 361
+ },
+ {
+ "epoch": 0.3621018411428214,
+ "grad_norm": 0.626768946647644,
+ "learning_rate": 0.00019288629530744454,
+ "loss": 1.5722,
+ "step": 362
+ },
+ {
+ "epoch": 0.36310212247194523,
+ "grad_norm": 0.566554069519043,
+ "learning_rate": 0.0001928474147878626,
+ "loss": 1.2135,
+ "step": 363
+ },
+ {
+ "epoch": 0.36410240380106906,
+ "grad_norm": 0.7327786684036255,
+ "learning_rate": 0.0001928084322457632,
+ "loss": 1.5245,
+ "step": 364
+ },
+ {
+ "epoch": 0.3651026851301929,
+ "grad_norm": 0.5205698609352112,
+ "learning_rate": 0.00019276934772398114,
+ "loss": 1.2068,
+ "step": 365
+ },
+ {
+ "epoch": 0.36610296645931667,
+ "grad_norm": 1.0956753492355347,
+ "learning_rate": 0.00019273016126546323,
+ "loss": 1.5044,
+ "step": 366
+ },
+ {
+ "epoch": 0.3671032477884405,
+ "grad_norm": 0.6484043598175049,
+ "learning_rate": 0.00019269087291326833,
+ "loss": 1.6369,
+ "step": 367
+ },
+ {
+ "epoch": 0.36810352911756433,
+ "grad_norm": 0.6363429427146912,
+ "learning_rate": 0.00019265148271056722,
+ "loss": 1.4338,
+ "step": 368
+ },
+ {
+ "epoch": 0.3691038104466881,
+ "grad_norm": 0.6295244693756104,
+ "learning_rate": 0.0001926119907006426,
+ "loss": 1.4701,
+ "step": 369
+ },
+ {
+ "epoch": 0.37010409177581194,
+ "grad_norm": 0.6013259887695312,
+ "learning_rate": 0.00019257239692688907,
+ "loss": 1.7629,
+ "step": 370
+ },
+ {
+ "epoch": 0.37110437310493577,
+ "grad_norm": 0.6949493885040283,
+ "learning_rate": 0.00019253270143281296,
+ "loss": 1.6713,
+ "step": 371
+ },
+ {
+ "epoch": 0.3721046544340596,
+ "grad_norm": 0.6933801174163818,
+ "learning_rate": 0.00019249290426203252,
+ "loss": 1.6131,
+ "step": 372
+ },
+ {
+ "epoch": 0.3731049357631834,
+ "grad_norm": 0.5847527384757996,
+ "learning_rate": 0.0001924530054582776,
+ "loss": 1.3968,
+ "step": 373
+ },
+ {
+ "epoch": 0.3741052170923072,
+ "grad_norm": 0.6053057312965393,
+ "learning_rate": 0.0001924130050653898,
+ "loss": 1.3311,
+ "step": 374
+ },
+ {
+ "epoch": 0.37510549842143104,
+ "grad_norm": 0.5513793230056763,
+ "learning_rate": 0.00019237290312732226,
+ "loss": 1.5063,
+ "step": 375
+ },
+ {
+ "epoch": 0.37610577975055487,
+ "grad_norm": 0.5859197378158569,
+ "learning_rate": 0.00019233269968813984,
+ "loss": 1.3556,
+ "step": 376
+ },
+ {
+ "epoch": 0.37710606107967864,
+ "grad_norm": 0.5623495578765869,
+ "learning_rate": 0.00019229239479201876,
+ "loss": 1.3859,
+ "step": 377
+ },
+ {
+ "epoch": 0.3781063424088025,
+ "grad_norm": 0.602118968963623,
+ "learning_rate": 0.0001922519884832469,
+ "loss": 1.334,
+ "step": 378
+ },
+ {
+ "epoch": 0.3791066237379263,
+ "grad_norm": 0.5212380886077881,
+ "learning_rate": 0.0001922114808062234,
+ "loss": 1.401,
+ "step": 379
+ },
+ {
+ "epoch": 0.3801069050670501,
+ "grad_norm": 0.4969455599784851,
+ "learning_rate": 0.00019217087180545893,
+ "loss": 1.2292,
+ "step": 380
+ },
+ {
+ "epoch": 0.3811071863961739,
+ "grad_norm": 0.578629732131958,
+ "learning_rate": 0.0001921301615255754,
+ "loss": 1.5015,
+ "step": 381
+ },
+ {
+ "epoch": 0.38210746772529774,
+ "grad_norm": 0.593053936958313,
+ "learning_rate": 0.0001920893500113061,
+ "loss": 1.302,
+ "step": 382
+ },
+ {
+ "epoch": 0.3831077490544216,
+ "grad_norm": 0.5832563638687134,
+ "learning_rate": 0.00019204843730749547,
+ "loss": 1.3695,
+ "step": 383
+ },
+ {
+ "epoch": 0.38410803038354535,
+ "grad_norm": 0.5608510375022888,
+ "learning_rate": 0.00019200742345909915,
+ "loss": 1.3792,
+ "step": 384
+ },
+ {
+ "epoch": 0.3851083117126692,
+ "grad_norm": 0.5337334275245667,
+ "learning_rate": 0.00019196630851118398,
+ "loss": 1.4163,
+ "step": 385
+ },
+ {
+ "epoch": 0.386108593041793,
+ "grad_norm": 0.5460125803947449,
+ "learning_rate": 0.0001919250925089278,
+ "loss": 1.2439,
+ "step": 386
+ },
+ {
+ "epoch": 0.38710887437091684,
+ "grad_norm": 0.6217851638793945,
+ "learning_rate": 0.00019188377549761963,
+ "loss": 1.6428,
+ "step": 387
+ },
+ {
+ "epoch": 0.3881091557000406,
+ "grad_norm": 0.7154502868652344,
+ "learning_rate": 0.00019184235752265928,
+ "loss": 1.3468,
+ "step": 388
+ },
+ {
+ "epoch": 0.38910943702916445,
+ "grad_norm": 0.5044635534286499,
+ "learning_rate": 0.00019180083862955772,
+ "loss": 1.1877,
+ "step": 389
+ },
+ {
+ "epoch": 0.3901097183582883,
+ "grad_norm": 0.5755971074104309,
+ "learning_rate": 0.00019175921886393666,
+ "loss": 1.3475,
+ "step": 390
+ },
+ {
+ "epoch": 0.39110999968741206,
+ "grad_norm": 0.6121137738227844,
+ "learning_rate": 0.00019171749827152869,
+ "loss": 1.4342,
+ "step": 391
+ },
+ {
+ "epoch": 0.3921102810165359,
+ "grad_norm": 0.5615536570549011,
+ "learning_rate": 0.0001916756768981772,
+ "loss": 1.5471,
+ "step": 392
+ },
+ {
+ "epoch": 0.3931105623456597,
+ "grad_norm": 0.6527026295661926,
+ "learning_rate": 0.00019163375478983632,
+ "loss": 1.6363,
+ "step": 393
+ },
+ {
+ "epoch": 0.39411084367478355,
+ "grad_norm": 0.6465044617652893,
+ "learning_rate": 0.00019159173199257085,
+ "loss": 1.3823,
+ "step": 394
+ },
+ {
+ "epoch": 0.3951111250039073,
+ "grad_norm": 0.5620000958442688,
+ "learning_rate": 0.00019154960855255628,
+ "loss": 1.5418,
+ "step": 395
+ },
+ {
+ "epoch": 0.39611140633303116,
+ "grad_norm": 0.7090588808059692,
+ "learning_rate": 0.0001915073845160786,
+ "loss": 1.4593,
+ "step": 396
+ },
+ {
+ "epoch": 0.397111687662155,
+ "grad_norm": 0.6644489169120789,
+ "learning_rate": 0.00019146505992953446,
+ "loss": 1.4236,
+ "step": 397
+ },
+ {
+ "epoch": 0.3981119689912788,
+ "grad_norm": 0.6038135886192322,
+ "learning_rate": 0.00019142263483943085,
+ "loss": 1.1805,
+ "step": 398
+ },
+ {
+ "epoch": 0.3991122503204026,
+ "grad_norm": 0.6746726036071777,
+ "learning_rate": 0.00019138010929238534,
+ "loss": 1.5264,
+ "step": 399
+ },
+ {
+ "epoch": 0.4001125316495264,
+ "grad_norm": 0.5871374607086182,
+ "learning_rate": 0.00019133748333512575,
+ "loss": 1.3709,
+ "step": 400
+ },
+ {
+ "epoch": 0.40111281297865026,
+ "grad_norm": 0.5743412375450134,
+ "learning_rate": 0.00019129475701449035,
+ "loss": 1.4677,
+ "step": 401
+ },
+ {
+ "epoch": 0.4021130943077741,
+ "grad_norm": 0.6184396743774414,
+ "learning_rate": 0.0001912519303774276,
+ "loss": 1.4228,
+ "step": 402
+ },
+ {
+ "epoch": 0.40311337563689786,
+ "grad_norm": 0.5872434973716736,
+ "learning_rate": 0.0001912090034709963,
+ "loss": 1.3495,
+ "step": 403
+ },
+ {
+ "epoch": 0.4041136569660217,
+ "grad_norm": 0.6500155925750732,
+ "learning_rate": 0.00019116597634236525,
+ "loss": 1.4315,
+ "step": 404
+ },
+ {
+ "epoch": 0.4051139382951455,
+ "grad_norm": 0.5240740180015564,
+ "learning_rate": 0.0001911228490388136,
+ "loss": 1.4954,
+ "step": 405
+ },
+ {
+ "epoch": 0.4061142196242693,
+ "grad_norm": 0.5531806945800781,
+ "learning_rate": 0.00019107962160773035,
+ "loss": 1.3949,
+ "step": 406
+ },
+ {
+ "epoch": 0.40711450095339313,
+ "grad_norm": 0.5266262888908386,
+ "learning_rate": 0.0001910362940966147,
+ "loss": 1.2859,
+ "step": 407
+ },
+ {
+ "epoch": 0.40811478228251696,
+ "grad_norm": 0.5734869241714478,
+ "learning_rate": 0.00019099286655307568,
+ "loss": 1.2451,
+ "step": 408
+ },
+ {
+ "epoch": 0.4091150636116408,
+ "grad_norm": 0.5922874212265015,
+ "learning_rate": 0.0001909493390248324,
+ "loss": 1.5429,
+ "step": 409
+ },
+ {
+ "epoch": 0.41011534494076457,
+ "grad_norm": 0.542540431022644,
+ "learning_rate": 0.00019090571155971366,
+ "loss": 1.4138,
+ "step": 410
+ },
+ {
+ "epoch": 0.4111156262698884,
+ "grad_norm": 0.57356196641922,
+ "learning_rate": 0.00019086198420565823,
+ "loss": 1.2592,
+ "step": 411
+ },
+ {
+ "epoch": 0.41211590759901223,
+ "grad_norm": 0.6042733192443848,
+ "learning_rate": 0.00019081815701071445,
+ "loss": 1.5524,
+ "step": 412
+ },
+ {
+ "epoch": 0.41311618892813606,
+ "grad_norm": 0.46550241112709045,
+ "learning_rate": 0.0001907742300230406,
+ "loss": 1.308,
+ "step": 413
+ },
+ {
+ "epoch": 0.41411647025725984,
+ "grad_norm": 0.6283137798309326,
+ "learning_rate": 0.00019073020329090444,
+ "loss": 1.4753,
+ "step": 414
+ },
+ {
+ "epoch": 0.41511675158638367,
+ "grad_norm": 0.5254876613616943,
+ "learning_rate": 0.0001906860768626834,
+ "loss": 1.2157,
+ "step": 415
+ },
+ {
+ "epoch": 0.4161170329155075,
+ "grad_norm": 0.59089195728302,
+ "learning_rate": 0.00019064185078686443,
+ "loss": 1.2684,
+ "step": 416
+ },
+ {
+ "epoch": 0.4171173142446313,
+ "grad_norm": 0.7129126787185669,
+ "learning_rate": 0.000190597525112044,
+ "loss": 1.3974,
+ "step": 417
+ },
+ {
+ "epoch": 0.4181175955737551,
+ "grad_norm": 0.607305109500885,
+ "learning_rate": 0.000190553099886928,
+ "loss": 1.4312,
+ "step": 418
+ },
+ {
+ "epoch": 0.41911787690287894,
+ "grad_norm": 0.49921515583992004,
+ "learning_rate": 0.00019050857516033173,
+ "loss": 1.3469,
+ "step": 419
+ },
+ {
+ "epoch": 0.42011815823200277,
+ "grad_norm": 0.6167325377464294,
+ "learning_rate": 0.00019046395098117983,
+ "loss": 1.4723,
+ "step": 420
+ },
+ {
+ "epoch": 0.42111843956112655,
+ "grad_norm": 0.6144593358039856,
+ "learning_rate": 0.00019041922739850616,
+ "loss": 1.5502,
+ "step": 421
+ },
+ {
+ "epoch": 0.4221187208902504,
+ "grad_norm": 0.61333167552948,
+ "learning_rate": 0.00019037440446145385,
+ "loss": 1.3283,
+ "step": 422
+ },
+ {
+ "epoch": 0.4231190022193742,
+ "grad_norm": 0.5881702303886414,
+ "learning_rate": 0.00019032948221927524,
+ "loss": 1.4206,
+ "step": 423
+ },
+ {
+ "epoch": 0.42411928354849804,
+ "grad_norm": 0.5334322452545166,
+ "learning_rate": 0.00019028446072133175,
+ "loss": 1.4603,
+ "step": 424
+ },
+ {
+ "epoch": 0.4251195648776218,
+ "grad_norm": 0.5730605721473694,
+ "learning_rate": 0.00019023934001709383,
+ "loss": 1.4375,
+ "step": 425
+ },
+ {
+ "epoch": 0.42611984620674564,
+ "grad_norm": 0.6227820515632629,
+ "learning_rate": 0.00019019412015614098,
+ "loss": 1.4888,
+ "step": 426
+ },
+ {
+ "epoch": 0.4271201275358695,
+ "grad_norm": 0.5811313390731812,
+ "learning_rate": 0.00019014880118816164,
+ "loss": 1.3492,
+ "step": 427
+ },
+ {
+ "epoch": 0.4281204088649933,
+ "grad_norm": 0.5685800313949585,
+ "learning_rate": 0.0001901033831629532,
+ "loss": 1.5052,
+ "step": 428
+ },
+ {
+ "epoch": 0.4291206901941171,
+ "grad_norm": 0.5961394309997559,
+ "learning_rate": 0.00019005786613042185,
+ "loss": 1.3324,
+ "step": 429
+ },
+ {
+ "epoch": 0.4301209715232409,
+ "grad_norm": 0.5845314860343933,
+ "learning_rate": 0.00019001225014058255,
+ "loss": 1.5733,
+ "step": 430
+ },
+ {
+ "epoch": 0.43112125285236474,
+ "grad_norm": 0.5400176048278809,
+ "learning_rate": 0.00018996653524355902,
+ "loss": 1.3973,
+ "step": 431
+ },
+ {
+ "epoch": 0.4321215341814885,
+ "grad_norm": 0.5462201833724976,
+ "learning_rate": 0.00018992072148958368,
+ "loss": 1.2167,
+ "step": 432
+ },
+ {
+ "epoch": 0.43312181551061235,
+ "grad_norm": 0.6200360059738159,
+ "learning_rate": 0.00018987480892899758,
+ "loss": 1.5596,
+ "step": 433
+ },
+ {
+ "epoch": 0.4341220968397362,
+ "grad_norm": 0.5230718851089478,
+ "learning_rate": 0.00018982879761225027,
+ "loss": 1.3661,
+ "step": 434
+ },
+ {
+ "epoch": 0.43512237816886,
+ "grad_norm": 0.5868643522262573,
+ "learning_rate": 0.00018978268758989991,
+ "loss": 1.4792,
+ "step": 435
+ },
+ {
+ "epoch": 0.4361226594979838,
+ "grad_norm": 0.580892026424408,
+ "learning_rate": 0.00018973647891261307,
+ "loss": 1.3275,
+ "step": 436
+ },
+ {
+ "epoch": 0.4371229408271076,
+ "grad_norm": 0.5903263688087463,
+ "learning_rate": 0.00018969017163116472,
+ "loss": 1.4721,
+ "step": 437
+ },
+ {
+ "epoch": 0.43812322215623145,
+ "grad_norm": 0.5108968019485474,
+ "learning_rate": 0.0001896437657964382,
+ "loss": 1.3785,
+ "step": 438
+ },
+ {
+ "epoch": 0.4391235034853553,
+ "grad_norm": 0.6707500219345093,
+ "learning_rate": 0.00018959726145942508,
+ "loss": 1.5033,
+ "step": 439
+ },
+ {
+ "epoch": 0.44012378481447906,
+ "grad_norm": 0.5793184638023376,
+ "learning_rate": 0.00018955065867122528,
+ "loss": 1.3629,
+ "step": 440
+ },
+ {
+ "epoch": 0.4411240661436029,
+ "grad_norm": 0.5549041628837585,
+ "learning_rate": 0.00018950395748304678,
+ "loss": 1.5557,
+ "step": 441
+ },
+ {
+ "epoch": 0.4421243474727267,
+ "grad_norm": 0.5406919121742249,
+ "learning_rate": 0.0001894571579462058,
+ "loss": 1.4441,
+ "step": 442
+ },
+ {
+ "epoch": 0.4431246288018505,
+ "grad_norm": 0.5131089091300964,
+ "learning_rate": 0.00018941026011212654,
+ "loss": 1.3051,
+ "step": 443
+ },
+ {
+ "epoch": 0.4441249101309743,
+ "grad_norm": 0.601586639881134,
+ "learning_rate": 0.00018936326403234125,
+ "loss": 1.5297,
+ "step": 444
+ },
+ {
+ "epoch": 0.44512519146009816,
+ "grad_norm": 0.5036457180976868,
+ "learning_rate": 0.00018931616975849006,
+ "loss": 1.357,
+ "step": 445
+ },
+ {
+ "epoch": 0.446125472789222,
+ "grad_norm": 0.5471266508102417,
+ "learning_rate": 0.00018926897734232115,
+ "loss": 1.2176,
+ "step": 446
+ },
+ {
+ "epoch": 0.44712575411834576,
+ "grad_norm": 0.6057867407798767,
+ "learning_rate": 0.0001892216868356904,
+ "loss": 1.4763,
+ "step": 447
+ },
+ {
+ "epoch": 0.4481260354474696,
+ "grad_norm": 0.5384593605995178,
+ "learning_rate": 0.0001891742982905615,
+ "loss": 1.513,
+ "step": 448
+ },
+ {
+ "epoch": 0.4491263167765934,
+ "grad_norm": 0.6144880056381226,
+ "learning_rate": 0.00018912681175900598,
+ "loss": 1.5782,
+ "step": 449
+ },
+ {
+ "epoch": 0.45012659810571726,
+ "grad_norm": 0.4838174879550934,
+ "learning_rate": 0.00018907922729320285,
+ "loss": 1.4085,
+ "step": 450
+ },
+ {
+ "epoch": 0.45112687943484103,
+ "grad_norm": 0.6852928400039673,
+ "learning_rate": 0.00018903154494543889,
+ "loss": 1.5989,
+ "step": 451
+ },
+ {
+ "epoch": 0.45212716076396486,
+ "grad_norm": 0.47527411580085754,
+ "learning_rate": 0.00018898376476810834,
+ "loss": 1.3409,
+ "step": 452
+ },
+ {
+ "epoch": 0.4531274420930887,
+ "grad_norm": 0.5665884613990784,
+ "learning_rate": 0.00018893588681371303,
+ "loss": 1.5395,
+ "step": 453
+ },
+ {
+ "epoch": 0.45412772342221247,
+ "grad_norm": 0.5792158246040344,
+ "learning_rate": 0.00018888791113486213,
+ "loss": 1.516,
+ "step": 454
+ },
+ {
+ "epoch": 0.4551280047513363,
+ "grad_norm": 0.5223523378372192,
+ "learning_rate": 0.00018883983778427227,
+ "loss": 1.3678,
+ "step": 455
+ },
+ {
+ "epoch": 0.45612828608046013,
+ "grad_norm": 0.5927590131759644,
+ "learning_rate": 0.0001887916668147673,
+ "loss": 1.3617,
+ "step": 456
+ },
+ {
+ "epoch": 0.45712856740958396,
+ "grad_norm": 0.7266496419906616,
+ "learning_rate": 0.00018874339827927846,
+ "loss": 1.3734,
+ "step": 457
+ },
+ {
+ "epoch": 0.45812884873870774,
+ "grad_norm": 0.6495805978775024,
+ "learning_rate": 0.00018869503223084414,
+ "loss": 1.5282,
+ "step": 458
+ },
+ {
+ "epoch": 0.45912913006783157,
+ "grad_norm": 0.6099816560745239,
+ "learning_rate": 0.00018864656872260985,
+ "loss": 1.4691,
+ "step": 459
+ },
+ {
+ "epoch": 0.4601294113969554,
+ "grad_norm": 0.5208227038383484,
+ "learning_rate": 0.00018859800780782828,
+ "loss": 1.3949,
+ "step": 460
+ },
+ {
+ "epoch": 0.46112969272607923,
+ "grad_norm": 0.5526600480079651,
+ "learning_rate": 0.000188549349539859,
+ "loss": 1.3557,
+ "step": 461
+ },
+ {
+ "epoch": 0.462129974055203,
+ "grad_norm": 0.5537740588188171,
+ "learning_rate": 0.00018850059397216876,
+ "loss": 1.4703,
+ "step": 462
+ },
+ {
+ "epoch": 0.46313025538432684,
+ "grad_norm": 0.5553976893424988,
+ "learning_rate": 0.00018845174115833099,
+ "loss": 1.4356,
+ "step": 463
+ },
+ {
+ "epoch": 0.46413053671345067,
+ "grad_norm": 0.6027779579162598,
+ "learning_rate": 0.0001884027911520262,
+ "loss": 1.4763,
+ "step": 464
+ },
+ {
+ "epoch": 0.4651308180425745,
+ "grad_norm": 0.5559154748916626,
+ "learning_rate": 0.00018835374400704154,
+ "loss": 1.4148,
+ "step": 465
+ },
+ {
+ "epoch": 0.4661310993716983,
+ "grad_norm": 0.6124109029769897,
+ "learning_rate": 0.00018830459977727096,
+ "loss": 1.4468,
+ "step": 466
+ },
+ {
+ "epoch": 0.4671313807008221,
+ "grad_norm": 0.4762580692768097,
+ "learning_rate": 0.0001882553585167151,
+ "loss": 1.3714,
+ "step": 467
+ },
+ {
+ "epoch": 0.46813166202994594,
+ "grad_norm": 0.5793487429618835,
+ "learning_rate": 0.00018820602027948114,
+ "loss": 1.4828,
+ "step": 468
+ },
+ {
+ "epoch": 0.4691319433590697,
+ "grad_norm": 0.55177241563797,
+ "learning_rate": 0.00018815658511978298,
+ "loss": 1.4157,
+ "step": 469
+ },
+ {
+ "epoch": 0.47013222468819355,
+ "grad_norm": 0.5065292716026306,
+ "learning_rate": 0.00018810705309194083,
+ "loss": 1.4519,
+ "step": 470
+ },
+ {
+ "epoch": 0.4711325060173174,
+ "grad_norm": 0.5401413440704346,
+ "learning_rate": 0.00018805742425038145,
+ "loss": 1.4344,
+ "step": 471
+ },
+ {
+ "epoch": 0.4721327873464412,
+ "grad_norm": 0.7173880338668823,
+ "learning_rate": 0.00018800769864963802,
+ "loss": 1.7325,
+ "step": 472
+ },
+ {
+ "epoch": 0.473133068675565,
+ "grad_norm": 0.507682204246521,
+ "learning_rate": 0.00018795787634434994,
+ "loss": 1.37,
+ "step": 473
+ },
+ {
+ "epoch": 0.4741333500046888,
+ "grad_norm": 0.551888644695282,
+ "learning_rate": 0.0001879079573892629,
+ "loss": 1.3695,
+ "step": 474
+ },
+ {
+ "epoch": 0.47513363133381264,
+ "grad_norm": 0.5109260082244873,
+ "learning_rate": 0.00018785794183922883,
+ "loss": 1.4001,
+ "step": 475
+ },
+ {
+ "epoch": 0.4761339126629365,
+ "grad_norm": 0.4565551280975342,
+ "learning_rate": 0.00018780782974920572,
+ "loss": 1.1752,
+ "step": 476
+ },
+ {
+ "epoch": 0.47713419399206025,
+ "grad_norm": 0.5651509761810303,
+ "learning_rate": 0.00018775762117425777,
+ "loss": 1.4291,
+ "step": 477
+ },
+ {
+ "epoch": 0.4781344753211841,
+ "grad_norm": 0.5827792286872864,
+ "learning_rate": 0.0001877073161695551,
+ "loss": 1.3438,
+ "step": 478
+ },
+ {
+ "epoch": 0.4791347566503079,
+ "grad_norm": 0.5719752907752991,
+ "learning_rate": 0.00018765691479037376,
+ "loss": 1.4683,
+ "step": 479
+ },
+ {
+ "epoch": 0.4801350379794317,
+ "grad_norm": 0.5153111815452576,
+ "learning_rate": 0.00018760641709209583,
+ "loss": 1.4392,
+ "step": 480
+ },
+ {
+ "epoch": 0.4811353193085555,
+ "grad_norm": 0.5455904603004456,
+ "learning_rate": 0.0001875558231302091,
+ "loss": 1.1603,
+ "step": 481
+ },
+ {
+ "epoch": 0.48213560063767935,
+ "grad_norm": 0.5857074856758118,
+ "learning_rate": 0.00018750513296030718,
+ "loss": 1.3099,
+ "step": 482
+ },
+ {
+ "epoch": 0.4831358819668032,
+ "grad_norm": 0.6051676273345947,
+ "learning_rate": 0.00018745434663808942,
+ "loss": 1.3587,
+ "step": 483
+ },
+ {
+ "epoch": 0.48413616329592696,
+ "grad_norm": 0.588749885559082,
+ "learning_rate": 0.0001874034642193608,
+ "loss": 1.5277,
+ "step": 484
+ },
+ {
+ "epoch": 0.4851364446250508,
+ "grad_norm": 0.5295410752296448,
+ "learning_rate": 0.0001873524857600319,
+ "loss": 1.2084,
+ "step": 485
+ },
+ {
+ "epoch": 0.4861367259541746,
+ "grad_norm": 0.5313368439674377,
+ "learning_rate": 0.00018730141131611882,
+ "loss": 1.4002,
+ "step": 486
+ },
+ {
+ "epoch": 0.48713700728329845,
+ "grad_norm": 0.5166353583335876,
+ "learning_rate": 0.00018725024094374315,
+ "loss": 1.208,
+ "step": 487
+ },
+ {
+ "epoch": 0.4881372886124222,
+ "grad_norm": 0.5478363037109375,
+ "learning_rate": 0.00018719897469913184,
+ "loss": 1.3236,
+ "step": 488
+ },
+ {
+ "epoch": 0.48913756994154606,
+ "grad_norm": 0.5531913042068481,
+ "learning_rate": 0.00018714761263861728,
+ "loss": 1.4938,
+ "step": 489
+ },
+ {
+ "epoch": 0.4901378512706699,
+ "grad_norm": 0.5334530472755432,
+ "learning_rate": 0.000187096154818637,
+ "loss": 1.4172,
+ "step": 490
+ },
+ {
+ "epoch": 0.4911381325997937,
+ "grad_norm": 0.5667001605033875,
+ "learning_rate": 0.00018704460129573391,
+ "loss": 1.3517,
+ "step": 491
+ },
+ {
+ "epoch": 0.4921384139289175,
+ "grad_norm": 0.5568780303001404,
+ "learning_rate": 0.00018699295212655596,
+ "loss": 1.4287,
+ "step": 492
+ },
+ {
+ "epoch": 0.4931386952580413,
+ "grad_norm": 0.6663610935211182,
+ "learning_rate": 0.00018694120736785632,
+ "loss": 1.5416,
+ "step": 493
+ },
+ {
+ "epoch": 0.49413897658716516,
+ "grad_norm": 0.5753045082092285,
+ "learning_rate": 0.00018688936707649304,
+ "loss": 1.5552,
+ "step": 494
+ },
+ {
+ "epoch": 0.49513925791628893,
+ "grad_norm": 0.5707410573959351,
+ "learning_rate": 0.00018683743130942928,
+ "loss": 1.5332,
+ "step": 495
+ },
+ {
+ "epoch": 0.49613953924541276,
+ "grad_norm": 0.5847951173782349,
+ "learning_rate": 0.00018678540012373302,
+ "loss": 1.3488,
+ "step": 496
+ },
+ {
+ "epoch": 0.4971398205745366,
+ "grad_norm": 0.60503751039505,
+ "learning_rate": 0.00018673327357657715,
+ "loss": 1.3924,
+ "step": 497
+ },
+ {
+ "epoch": 0.4981401019036604,
+ "grad_norm": 0.635142982006073,
+ "learning_rate": 0.0001866810517252393,
+ "loss": 1.4392,
+ "step": 498
+ },
+ {
+ "epoch": 0.4991403832327842,
+ "grad_norm": 0.5536782741546631,
+ "learning_rate": 0.00018662873462710184,
+ "loss": 1.286,
+ "step": 499
+ },
+ {
+ "epoch": 0.5001406645619081,
+ "grad_norm": 0.5676659345626831,
+ "learning_rate": 0.0001865763223396518,
+ "loss": 1.3006,
+ "step": 500
+ },
+ {
+ "epoch": 0.5011409458910319,
+ "grad_norm": 0.5546663403511047,
+ "learning_rate": 0.00018652381492048083,
+ "loss": 1.418,
+ "step": 501
+ },
+ {
+ "epoch": 0.5021412272201556,
+ "grad_norm": 0.5137162804603577,
+ "learning_rate": 0.00018647121242728506,
+ "loss": 1.3173,
+ "step": 502
+ },
+ {
+ "epoch": 0.5031415085492795,
+ "grad_norm": 0.5474348068237305,
+ "learning_rate": 0.00018641851491786512,
+ "loss": 1.6652,
+ "step": 503
+ },
+ {
+ "epoch": 0.5041417898784033,
+ "grad_norm": 0.5563383102416992,
+ "learning_rate": 0.00018636572245012606,
+ "loss": 1.4519,
+ "step": 504
+ },
+ {
+ "epoch": 0.5051420712075271,
+ "grad_norm": 0.5621083974838257,
+ "learning_rate": 0.00018631283508207725,
+ "loss": 1.5418,
+ "step": 505
+ },
+ {
+ "epoch": 0.506142352536651,
+ "grad_norm": 0.49915972352027893,
+ "learning_rate": 0.00018625985287183233,
+ "loss": 1.2969,
+ "step": 506
+ },
+ {
+ "epoch": 0.5071426338657747,
+ "grad_norm": 0.601996660232544,
+ "learning_rate": 0.00018620677587760916,
+ "loss": 1.4483,
+ "step": 507
+ },
+ {
+ "epoch": 0.5081429151948985,
+ "grad_norm": 0.5594652891159058,
+ "learning_rate": 0.00018615360415772978,
+ "loss": 1.4094,
+ "step": 508
+ },
+ {
+ "epoch": 0.5091431965240224,
+ "grad_norm": 0.557381808757782,
+ "learning_rate": 0.00018610033777062025,
+ "loss": 1.216,
+ "step": 509
+ },
+ {
+ "epoch": 0.5101434778531462,
+ "grad_norm": 0.5841740369796753,
+ "learning_rate": 0.0001860469767748108,
+ "loss": 1.4924,
+ "step": 510
+ },
+ {
+ "epoch": 0.5111437591822701,
+ "grad_norm": 0.4968324899673462,
+ "learning_rate": 0.00018599352122893539,
+ "loss": 1.2474,
+ "step": 511
+ },
+ {
+ "epoch": 0.5121440405113938,
+ "grad_norm": 0.5390318632125854,
+ "learning_rate": 0.00018593997119173205,
+ "loss": 1.4484,
+ "step": 512
+ },
+ {
+ "epoch": 0.5131443218405176,
+ "grad_norm": 0.6626128554344177,
+ "learning_rate": 0.00018588632672204264,
+ "loss": 1.5664,
+ "step": 513
+ },
+ {
+ "epoch": 0.5141446031696415,
+ "grad_norm": 0.6183133721351624,
+ "learning_rate": 0.0001858325878788126,
+ "loss": 1.5603,
+ "step": 514
+ },
+ {
+ "epoch": 0.5151448844987653,
+ "grad_norm": 0.5574773550033569,
+ "learning_rate": 0.00018577875472109134,
+ "loss": 1.3668,
+ "step": 515
+ },
+ {
+ "epoch": 0.516145165827889,
+ "grad_norm": 0.5127518773078918,
+ "learning_rate": 0.0001857248273080317,
+ "loss": 1.264,
+ "step": 516
+ },
+ {
+ "epoch": 0.5171454471570129,
+ "grad_norm": 0.6540619134902954,
+ "learning_rate": 0.00018567080569889015,
+ "loss": 1.3091,
+ "step": 517
+ },
+ {
+ "epoch": 0.5181457284861367,
+ "grad_norm": 0.5286336541175842,
+ "learning_rate": 0.00018561668995302667,
+ "loss": 1.3581,
+ "step": 518
+ },
+ {
+ "epoch": 0.5191460098152605,
+ "grad_norm": 0.6609972715377808,
+ "learning_rate": 0.00018556248012990468,
+ "loss": 1.3123,
+ "step": 519
+ },
+ {
+ "epoch": 0.5201462911443844,
+ "grad_norm": 0.48230236768722534,
+ "learning_rate": 0.000185508176289091,
+ "loss": 1.2372,
+ "step": 520
+ },
+ {
+ "epoch": 0.5211465724735082,
+ "grad_norm": 0.5173765420913696,
+ "learning_rate": 0.00018545377849025566,
+ "loss": 1.327,
+ "step": 521
+ },
+ {
+ "epoch": 0.522146853802632,
+ "grad_norm": 0.5822583436965942,
+ "learning_rate": 0.0001853992867931721,
+ "loss": 1.3851,
+ "step": 522
+ },
+ {
+ "epoch": 0.5231471351317558,
+ "grad_norm": 0.6025621891021729,
+ "learning_rate": 0.00018534470125771674,
+ "loss": 1.5627,
+ "step": 523
+ },
+ {
+ "epoch": 0.5241474164608796,
+ "grad_norm": 0.5516778230667114,
+ "learning_rate": 0.0001852900219438693,
+ "loss": 1.4036,
+ "step": 524
+ },
+ {
+ "epoch": 0.5251476977900035,
+ "grad_norm": 0.5738380551338196,
+ "learning_rate": 0.0001852352489117124,
+ "loss": 1.5042,
+ "step": 525
+ },
+ {
+ "epoch": 0.5261479791191273,
+ "grad_norm": 0.6360776424407959,
+ "learning_rate": 0.00018518038222143174,
+ "loss": 1.4101,
+ "step": 526
+ },
+ {
+ "epoch": 0.527148260448251,
+ "grad_norm": 0.5776675939559937,
+ "learning_rate": 0.00018512542193331583,
+ "loss": 1.6015,
+ "step": 527
+ },
+ {
+ "epoch": 0.5281485417773749,
+ "grad_norm": 0.5662726759910583,
+ "learning_rate": 0.00018507036810775615,
+ "loss": 1.3186,
+ "step": 528
+ },
+ {
+ "epoch": 0.5291488231064987,
+ "grad_norm": 0.6518335938453674,
+ "learning_rate": 0.00018501522080524688,
+ "loss": 1.4882,
+ "step": 529
+ },
+ {
+ "epoch": 0.5301491044356225,
+ "grad_norm": 0.5475590825080872,
+ "learning_rate": 0.0001849599800863849,
+ "loss": 1.487,
+ "step": 530
+ },
+ {
+ "epoch": 0.5311493857647464,
+ "grad_norm": 0.6275209188461304,
+ "learning_rate": 0.0001849046460118698,
+ "loss": 1.3563,
+ "step": 531
+ },
+ {
+ "epoch": 0.5321496670938701,
+ "grad_norm": 0.5629132390022278,
+ "learning_rate": 0.0001848492186425037,
+ "loss": 1.516,
+ "step": 532
+ },
+ {
+ "epoch": 0.533149948422994,
+ "grad_norm": 0.5251057744026184,
+ "learning_rate": 0.0001847936980391913,
+ "loss": 1.5254,
+ "step": 533
+ },
+ {
+ "epoch": 0.5341502297521178,
+ "grad_norm": 0.5635396838188171,
+ "learning_rate": 0.00018473808426293964,
+ "loss": 1.3408,
+ "step": 534
+ },
+ {
+ "epoch": 0.5351505110812416,
+ "grad_norm": 0.527082622051239,
+ "learning_rate": 0.00018468237737485823,
+ "loss": 1.2664,
+ "step": 535
+ },
+ {
+ "epoch": 0.5361507924103655,
+ "grad_norm": 0.6555044054985046,
+ "learning_rate": 0.00018462657743615888,
+ "loss": 1.464,
+ "step": 536
+ },
+ {
+ "epoch": 0.5371510737394892,
+ "grad_norm": 0.5468676686286926,
+ "learning_rate": 0.00018457068450815562,
+ "loss": 1.3733,
+ "step": 537
+ },
+ {
+ "epoch": 0.538151355068613,
+ "grad_norm": 0.5662835836410522,
+ "learning_rate": 0.00018451469865226464,
+ "loss": 1.509,
+ "step": 538
+ },
+ {
+ "epoch": 0.5391516363977369,
+ "grad_norm": 0.5553548336029053,
+ "learning_rate": 0.00018445861993000436,
+ "loss": 1.2476,
+ "step": 539
+ },
+ {
+ "epoch": 0.5401519177268607,
+ "grad_norm": 0.6240925192832947,
+ "learning_rate": 0.00018440244840299506,
+ "loss": 1.5835,
+ "step": 540
+ },
+ {
+ "epoch": 0.5411521990559846,
+ "grad_norm": 0.6107541918754578,
+ "learning_rate": 0.0001843461841329591,
+ "loss": 1.7176,
+ "step": 541
+ },
+ {
+ "epoch": 0.5421524803851083,
+ "grad_norm": 0.6990326642990112,
+ "learning_rate": 0.0001842898271817208,
+ "loss": 1.4235,
+ "step": 542
+ },
+ {
+ "epoch": 0.5431527617142321,
+ "grad_norm": 0.583871603012085,
+ "learning_rate": 0.00018423337761120618,
+ "loss": 1.5283,
+ "step": 543
+ },
+ {
+ "epoch": 0.544153043043356,
+ "grad_norm": 0.5585455894470215,
+ "learning_rate": 0.00018417683548344318,
+ "loss": 1.4875,
+ "step": 544
+ },
+ {
+ "epoch": 0.5451533243724798,
+ "grad_norm": 0.5199955701828003,
+ "learning_rate": 0.00018412020086056133,
+ "loss": 1.3989,
+ "step": 545
+ },
+ {
+ "epoch": 0.5461536057016035,
+ "grad_norm": 0.5517343878746033,
+ "learning_rate": 0.0001840634738047918,
+ "loss": 1.4073,
+ "step": 546
+ },
+ {
+ "epoch": 0.5471538870307274,
+ "grad_norm": 0.7140716314315796,
+ "learning_rate": 0.0001840066543784675,
+ "loss": 1.4477,
+ "step": 547
+ },
+ {
+ "epoch": 0.5481541683598512,
+ "grad_norm": 0.548422634601593,
+ "learning_rate": 0.00018394974264402257,
+ "loss": 1.4198,
+ "step": 548
+ },
+ {
+ "epoch": 0.549154449688975,
+ "grad_norm": 0.5907624363899231,
+ "learning_rate": 0.00018389273866399275,
+ "loss": 1.4033,
+ "step": 549
+ },
+ {
+ "epoch": 0.5501547310180989,
+ "grad_norm": 0.5327603220939636,
+ "learning_rate": 0.00018383564250101512,
+ "loss": 1.2674,
+ "step": 550
+ },
+ {
+ "epoch": 0.5511550123472226,
+ "grad_norm": 0.4678132236003876,
+ "learning_rate": 0.000183778454217828,
+ "loss": 1.3644,
+ "step": 551
+ },
+ {
+ "epoch": 0.5521552936763465,
+ "grad_norm": 0.674040675163269,
+ "learning_rate": 0.0001837211738772711,
+ "loss": 1.6942,
+ "step": 552
+ },
+ {
+ "epoch": 0.5531555750054703,
+ "grad_norm": 0.5374539494514465,
+ "learning_rate": 0.000183663801542285,
+ "loss": 1.1887,
+ "step": 553
+ },
+ {
+ "epoch": 0.5541558563345941,
+ "grad_norm": 0.5528072118759155,
+ "learning_rate": 0.00018360633727591155,
+ "loss": 1.2,
+ "step": 554
+ },
+ {
+ "epoch": 0.555156137663718,
+ "grad_norm": 0.6597411632537842,
+ "learning_rate": 0.00018354878114129367,
+ "loss": 1.402,
+ "step": 555
+ },
+ {
+ "epoch": 0.5561564189928417,
+ "grad_norm": 0.5931501388549805,
+ "learning_rate": 0.00018349113320167504,
+ "loss": 1.5583,
+ "step": 556
+ },
+ {
+ "epoch": 0.5571567003219655,
+ "grad_norm": 0.6331121921539307,
+ "learning_rate": 0.00018343339352040042,
+ "loss": 1.7882,
+ "step": 557
+ },
+ {
+ "epoch": 0.5581569816510894,
+ "grad_norm": 0.5221824645996094,
+ "learning_rate": 0.00018337556216091517,
+ "loss": 1.2457,
+ "step": 558
+ },
+ {
+ "epoch": 0.5591572629802132,
+ "grad_norm": 0.6008853912353516,
+ "learning_rate": 0.00018331763918676556,
+ "loss": 1.5916,
+ "step": 559
+ },
+ {
+ "epoch": 0.560157544309337,
+ "grad_norm": 0.5409006476402283,
+ "learning_rate": 0.00018325962466159848,
+ "loss": 1.3457,
+ "step": 560
+ },
+ {
+ "epoch": 0.5611578256384608,
+ "grad_norm": 0.5095859169960022,
+ "learning_rate": 0.00018320151864916135,
+ "loss": 1.3622,
+ "step": 561
+ },
+ {
+ "epoch": 0.5621581069675846,
+ "grad_norm": 0.5716331005096436,
+ "learning_rate": 0.00018314332121330225,
+ "loss": 1.6168,
+ "step": 562
+ },
+ {
+ "epoch": 0.5631583882967085,
+ "grad_norm": 0.600307047367096,
+ "learning_rate": 0.0001830850324179695,
+ "loss": 1.4117,
+ "step": 563
+ },
+ {
+ "epoch": 0.5641586696258323,
+ "grad_norm": 0.7528484463691711,
+ "learning_rate": 0.00018302665232721208,
+ "loss": 1.3418,
+ "step": 564
+ },
+ {
+ "epoch": 0.565158950954956,
+ "grad_norm": 0.6119087338447571,
+ "learning_rate": 0.0001829681810051791,
+ "loss": 1.4908,
+ "step": 565
+ },
+ {
+ "epoch": 0.5661592322840799,
+ "grad_norm": 0.6440190672874451,
+ "learning_rate": 0.00018290961851611995,
+ "loss": 1.3511,
+ "step": 566
+ },
+ {
+ "epoch": 0.5671595136132037,
+ "grad_norm": 0.647294282913208,
+ "learning_rate": 0.00018285096492438424,
+ "loss": 1.5165,
+ "step": 567
+ },
+ {
+ "epoch": 0.5681597949423275,
+ "grad_norm": 0.5499668717384338,
+ "learning_rate": 0.00018279222029442163,
+ "loss": 1.2876,
+ "step": 568
+ },
+ {
+ "epoch": 0.5691600762714514,
+ "grad_norm": 0.5629482865333557,
+ "learning_rate": 0.00018273338469078186,
+ "loss": 1.2256,
+ "step": 569
+ },
+ {
+ "epoch": 0.5701603576005752,
+ "grad_norm": 0.48661288619041443,
+ "learning_rate": 0.00018267445817811466,
+ "loss": 1.44,
+ "step": 570
+ },
+ {
+ "epoch": 0.5711606389296989,
+ "grad_norm": 0.5713567733764648,
+ "learning_rate": 0.00018261544082116954,
+ "loss": 1.741,
+ "step": 571
+ },
+ {
+ "epoch": 0.5721609202588228,
+ "grad_norm": 0.6130850315093994,
+ "learning_rate": 0.00018255633268479595,
+ "loss": 1.526,
+ "step": 572
+ },
+ {
+ "epoch": 0.5731612015879466,
+ "grad_norm": 0.5415536761283875,
+ "learning_rate": 0.00018249713383394303,
+ "loss": 1.2405,
+ "step": 573
+ },
+ {
+ "epoch": 0.5741614829170705,
+ "grad_norm": 0.600574791431427,
+ "learning_rate": 0.0001824378443336596,
+ "loss": 1.4534,
+ "step": 574
+ },
+ {
+ "epoch": 0.5751617642461943,
+ "grad_norm": 0.5479387044906616,
+ "learning_rate": 0.00018237846424909413,
+ "loss": 1.4277,
+ "step": 575
+ },
+ {
+ "epoch": 0.576162045575318,
+ "grad_norm": 0.5536132454872131,
+ "learning_rate": 0.00018231899364549455,
+ "loss": 1.3918,
+ "step": 576
+ },
+ {
+ "epoch": 0.5771623269044419,
+ "grad_norm": 0.6228598356246948,
+ "learning_rate": 0.00018225943258820833,
+ "loss": 1.413,
+ "step": 577
+ },
+ {
+ "epoch": 0.5781626082335657,
+ "grad_norm": 0.5498123168945312,
+ "learning_rate": 0.00018219978114268227,
+ "loss": 1.3558,
+ "step": 578
+ },
+ {
+ "epoch": 0.5791628895626895,
+ "grad_norm": 0.5427498817443848,
+ "learning_rate": 0.00018214003937446253,
+ "loss": 1.509,
+ "step": 579
+ },
+ {
+ "epoch": 0.5801631708918134,
+ "grad_norm": 0.522285521030426,
+ "learning_rate": 0.00018208020734919455,
+ "loss": 1.3847,
+ "step": 580
+ },
+ {
+ "epoch": 0.5811634522209371,
+ "grad_norm": 0.5963860750198364,
+ "learning_rate": 0.00018202028513262288,
+ "loss": 1.4605,
+ "step": 581
+ },
+ {
+ "epoch": 0.5821637335500609,
+ "grad_norm": 0.4854499101638794,
+ "learning_rate": 0.00018196027279059117,
+ "loss": 1.4968,
+ "step": 582
+ },
+ {
+ "epoch": 0.5831640148791848,
+ "grad_norm": 0.503466010093689,
+ "learning_rate": 0.00018190017038904215,
+ "loss": 1.2568,
+ "step": 583
+ },
+ {
+ "epoch": 0.5841642962083086,
+ "grad_norm": 0.6027483940124512,
+ "learning_rate": 0.0001818399779940175,
+ "loss": 1.5744,
+ "step": 584
+ },
+ {
+ "epoch": 0.5851645775374325,
+ "grad_norm": 0.5450258851051331,
+ "learning_rate": 0.0001817796956716578,
+ "loss": 1.2672,
+ "step": 585
+ },
+ {
+ "epoch": 0.5861648588665562,
+ "grad_norm": 0.5376724600791931,
+ "learning_rate": 0.00018171932348820234,
+ "loss": 1.5099,
+ "step": 586
+ },
+ {
+ "epoch": 0.58716514019568,
+ "grad_norm": 0.513921856880188,
+ "learning_rate": 0.0001816588615099893,
+ "loss": 1.3213,
+ "step": 587
+ },
+ {
+ "epoch": 0.5881654215248039,
+ "grad_norm": 0.7540159225463867,
+ "learning_rate": 0.00018159830980345548,
+ "loss": 1.2231,
+ "step": 588
+ },
+ {
+ "epoch": 0.5891657028539277,
+ "grad_norm": 0.5917702317237854,
+ "learning_rate": 0.0001815376684351362,
+ "loss": 1.6094,
+ "step": 589
+ },
+ {
+ "epoch": 0.5901659841830514,
+ "grad_norm": 0.5507463216781616,
+ "learning_rate": 0.00018147693747166534,
+ "loss": 1.3904,
+ "step": 590
+ },
+ {
+ "epoch": 0.5911662655121753,
+ "grad_norm": 0.545695960521698,
+ "learning_rate": 0.00018141611697977529,
+ "loss": 1.5172,
+ "step": 591
+ },
+ {
+ "epoch": 0.5921665468412991,
+ "grad_norm": 0.5876530408859253,
+ "learning_rate": 0.00018135520702629675,
+ "loss": 1.3676,
+ "step": 592
+ },
+ {
+ "epoch": 0.5931668281704229,
+ "grad_norm": 0.5510894060134888,
+ "learning_rate": 0.0001812942076781588,
+ "loss": 1.4379,
+ "step": 593
+ },
+ {
+ "epoch": 0.5941671094995468,
+ "grad_norm": 0.5105913877487183,
+ "learning_rate": 0.0001812331190023886,
+ "loss": 1.3687,
+ "step": 594
+ },
+ {
+ "epoch": 0.5951673908286705,
+ "grad_norm": 0.47876060009002686,
+ "learning_rate": 0.0001811719410661116,
+ "loss": 1.3178,
+ "step": 595
+ },
+ {
+ "epoch": 0.5961676721577944,
+ "grad_norm": 0.6079074144363403,
+ "learning_rate": 0.00018111067393655132,
+ "loss": 1.4713,
+ "step": 596
+ },
+ {
+ "epoch": 0.5971679534869182,
+ "grad_norm": 0.5363487601280212,
+ "learning_rate": 0.0001810493176810292,
+ "loss": 1.1868,
+ "step": 597
+ },
+ {
+ "epoch": 0.598168234816042,
+ "grad_norm": 0.5252292156219482,
+ "learning_rate": 0.00018098787236696474,
+ "loss": 1.303,
+ "step": 598
+ },
+ {
+ "epoch": 0.5991685161451659,
+ "grad_norm": 0.5377137064933777,
+ "learning_rate": 0.00018092633806187513,
+ "loss": 1.3653,
+ "step": 599
+ },
+ {
+ "epoch": 0.6001687974742896,
+ "grad_norm": 0.5274302363395691,
+ "learning_rate": 0.0001808647148333755,
+ "loss": 1.3693,
+ "step": 600
+ },
+ {
+ "epoch": 0.6011690788034134,
+ "grad_norm": 0.5664658546447754,
+ "learning_rate": 0.00018080300274917862,
+ "loss": 1.3807,
+ "step": 601
+ },
+ {
+ "epoch": 0.6021693601325373,
+ "grad_norm": 0.6609538197517395,
+ "learning_rate": 0.00018074120187709495,
+ "loss": 1.5015,
+ "step": 602
+ },
+ {
+ "epoch": 0.6031696414616611,
+ "grad_norm": 0.4943195879459381,
+ "learning_rate": 0.00018067931228503246,
+ "loss": 1.4436,
+ "step": 603
+ },
+ {
+ "epoch": 0.604169922790785,
+ "grad_norm": 0.549712598323822,
+ "learning_rate": 0.00018061733404099655,
+ "loss": 1.455,
+ "step": 604
+ },
+ {
+ "epoch": 0.6051702041199087,
+ "grad_norm": 0.5765941143035889,
+ "learning_rate": 0.00018055526721309016,
+ "loss": 1.3317,
+ "step": 605
+ },
+ {
+ "epoch": 0.6061704854490325,
+ "grad_norm": 0.5223068594932556,
+ "learning_rate": 0.0001804931118695135,
+ "loss": 1.3456,
+ "step": 606
+ },
+ {
+ "epoch": 0.6071707667781564,
+ "grad_norm": 0.5385129451751709,
+ "learning_rate": 0.00018043086807856403,
+ "loss": 1.3388,
+ "step": 607
+ },
+ {
+ "epoch": 0.6081710481072802,
+ "grad_norm": 0.5244528651237488,
+ "learning_rate": 0.00018036853590863648,
+ "loss": 1.398,
+ "step": 608
+ },
+ {
+ "epoch": 0.609171329436404,
+ "grad_norm": 0.5274112224578857,
+ "learning_rate": 0.00018030611542822257,
+ "loss": 1.3105,
+ "step": 609
+ },
+ {
+ "epoch": 0.6101716107655278,
+ "grad_norm": 0.5351893305778503,
+ "learning_rate": 0.00018024360670591114,
+ "loss": 1.3128,
+ "step": 610
+ },
+ {
+ "epoch": 0.6111718920946516,
+ "grad_norm": 0.5729460120201111,
+ "learning_rate": 0.00018018100981038798,
+ "loss": 1.3606,
+ "step": 611
+ },
+ {
+ "epoch": 0.6121721734237754,
+ "grad_norm": 0.5494408011436462,
+ "learning_rate": 0.00018011832481043576,
+ "loss": 1.4517,
+ "step": 612
+ },
+ {
+ "epoch": 0.6131724547528993,
+ "grad_norm": 0.5205882787704468,
+ "learning_rate": 0.00018005555177493394,
+ "loss": 1.4943,
+ "step": 613
+ },
+ {
+ "epoch": 0.614172736082023,
+ "grad_norm": 0.5488479137420654,
+ "learning_rate": 0.00017999269077285875,
+ "loss": 1.3939,
+ "step": 614
+ },
+ {
+ "epoch": 0.6151730174111469,
+ "grad_norm": 0.5779786109924316,
+ "learning_rate": 0.00017992974187328305,
+ "loss": 1.5744,
+ "step": 615
+ },
+ {
+ "epoch": 0.6161732987402707,
+ "grad_norm": 0.5576769113540649,
+ "learning_rate": 0.00017986670514537627,
+ "loss": 1.2284,
+ "step": 616
+ },
+ {
+ "epoch": 0.6171735800693945,
+ "grad_norm": 0.4912784993648529,
+ "learning_rate": 0.00017980358065840444,
+ "loss": 1.292,
+ "step": 617
+ },
+ {
+ "epoch": 0.6181738613985184,
+ "grad_norm": 0.657666027545929,
+ "learning_rate": 0.0001797403684817299,
+ "loss": 1.4918,
+ "step": 618
+ },
+ {
+ "epoch": 0.6191741427276422,
+ "grad_norm": 0.5642833113670349,
+ "learning_rate": 0.00017967706868481144,
+ "loss": 1.4718,
+ "step": 619
+ },
+ {
+ "epoch": 0.6201744240567659,
+ "grad_norm": 0.7243106961250305,
+ "learning_rate": 0.00017961368133720407,
+ "loss": 1.4342,
+ "step": 620
+ },
+ {
+ "epoch": 0.6211747053858898,
+ "grad_norm": 0.4982456564903259,
+ "learning_rate": 0.000179550206508559,
+ "loss": 1.4478,
+ "step": 621
+ },
+ {
+ "epoch": 0.6221749867150136,
+ "grad_norm": 0.5249592065811157,
+ "learning_rate": 0.00017948664426862364,
+ "loss": 1.485,
+ "step": 622
+ },
+ {
+ "epoch": 0.6231752680441374,
+ "grad_norm": 0.6167681217193604,
+ "learning_rate": 0.00017942299468724134,
+ "loss": 1.4813,
+ "step": 623
+ },
+ {
+ "epoch": 0.6241755493732613,
+ "grad_norm": 0.5300460457801819,
+ "learning_rate": 0.0001793592578343515,
+ "loss": 1.1364,
+ "step": 624
+ },
+ {
+ "epoch": 0.625175830702385,
+ "grad_norm": 0.5908417105674744,
+ "learning_rate": 0.0001792954337799894,
+ "loss": 1.4402,
+ "step": 625
+ },
+ {
+ "epoch": 0.6261761120315089,
+ "grad_norm": 0.5684035420417786,
+ "learning_rate": 0.00017923152259428612,
+ "loss": 1.4847,
+ "step": 626
+ },
+ {
+ "epoch": 0.6271763933606327,
+ "grad_norm": 0.5421493053436279,
+ "learning_rate": 0.00017916752434746856,
+ "loss": 1.3348,
+ "step": 627
+ },
+ {
+ "epoch": 0.6281766746897565,
+ "grad_norm": 0.5295160412788391,
+ "learning_rate": 0.0001791034391098591,
+ "loss": 1.4703,
+ "step": 628
+ },
+ {
+ "epoch": 0.6291769560188804,
+ "grad_norm": 0.5196051001548767,
+ "learning_rate": 0.00017903926695187595,
+ "loss": 1.3478,
+ "step": 629
+ },
+ {
+ "epoch": 0.6301772373480041,
+ "grad_norm": 0.4994469881057739,
+ "learning_rate": 0.0001789750079440326,
+ "loss": 1.2368,
+ "step": 630
+ },
+ {
+ "epoch": 0.6311775186771279,
+ "grad_norm": 0.5117055177688599,
+ "learning_rate": 0.00017891066215693817,
+ "loss": 1.3429,
+ "step": 631
+ },
+ {
+ "epoch": 0.6321778000062518,
+ "grad_norm": 0.49438026547431946,
+ "learning_rate": 0.00017884622966129695,
+ "loss": 1.301,
+ "step": 632
+ },
+ {
+ "epoch": 0.6331780813353756,
+ "grad_norm": 0.6113334894180298,
+ "learning_rate": 0.00017878171052790868,
+ "loss": 1.4636,
+ "step": 633
+ },
+ {
+ "epoch": 0.6341783626644993,
+ "grad_norm": 0.6063141822814941,
+ "learning_rate": 0.00017871710482766817,
+ "loss": 1.2262,
+ "step": 634
+ },
+ {
+ "epoch": 0.6351786439936232,
+ "grad_norm": 0.5604403614997864,
+ "learning_rate": 0.00017865241263156546,
+ "loss": 1.4112,
+ "step": 635
+ },
+ {
+ "epoch": 0.636178925322747,
+ "grad_norm": 0.523415207862854,
+ "learning_rate": 0.0001785876340106855,
+ "loss": 1.3281,
+ "step": 636
+ },
+ {
+ "epoch": 0.6371792066518709,
+ "grad_norm": 0.5602991580963135,
+ "learning_rate": 0.0001785227690362083,
+ "loss": 1.44,
+ "step": 637
+ },
+ {
+ "epoch": 0.6381794879809947,
+ "grad_norm": 0.46946853399276733,
+ "learning_rate": 0.00017845781777940878,
+ "loss": 1.2956,
+ "step": 638
+ },
+ {
+ "epoch": 0.6391797693101184,
+ "grad_norm": 0.5586503744125366,
+ "learning_rate": 0.00017839278031165658,
+ "loss": 1.5419,
+ "step": 639
+ },
+ {
+ "epoch": 0.6401800506392423,
+ "grad_norm": 0.5270752310752869,
+ "learning_rate": 0.00017832765670441612,
+ "loss": 1.305,
+ "step": 640
+ },
+ {
+ "epoch": 0.6411803319683661,
+ "grad_norm": 0.57756108045578,
+ "learning_rate": 0.0001782624470292465,
+ "loss": 1.2145,
+ "step": 641
+ },
+ {
+ "epoch": 0.6421806132974899,
+ "grad_norm": 0.5709058046340942,
+ "learning_rate": 0.0001781971513578013,
+ "loss": 1.4804,
+ "step": 642
+ },
+ {
+ "epoch": 0.6431808946266138,
+ "grad_norm": 0.505849301815033,
+ "learning_rate": 0.00017813176976182873,
+ "loss": 1.3964,
+ "step": 643
+ },
+ {
+ "epoch": 0.6441811759557375,
+ "grad_norm": 0.5171617269515991,
+ "learning_rate": 0.00017806630231317127,
+ "loss": 1.3283,
+ "step": 644
+ },
+ {
+ "epoch": 0.6451814572848613,
+ "grad_norm": 0.5567512512207031,
+ "learning_rate": 0.00017800074908376584,
+ "loss": 1.481,
+ "step": 645
+ },
+ {
+ "epoch": 0.6461817386139852,
+ "grad_norm": 0.5000666379928589,
+ "learning_rate": 0.00017793511014564358,
+ "loss": 1.2856,
+ "step": 646
+ },
+ {
+ "epoch": 0.647182019943109,
+ "grad_norm": 0.49550777673721313,
+ "learning_rate": 0.00017786938557092983,
+ "loss": 1.3447,
+ "step": 647
+ },
+ {
+ "epoch": 0.6481823012722329,
+ "grad_norm": 0.5904624462127686,
+ "learning_rate": 0.00017780357543184397,
+ "loss": 1.241,
+ "step": 648
+ },
+ {
+ "epoch": 0.6491825826013566,
+ "grad_norm": 0.4615901708602905,
+ "learning_rate": 0.00017773767980069945,
+ "loss": 1.3436,
+ "step": 649
+ },
+ {
+ "epoch": 0.6501828639304804,
+ "grad_norm": 0.48083069920539856,
+ "learning_rate": 0.0001776716987499037,
+ "loss": 1.3906,
+ "step": 650
+ },
+ {
+ "epoch": 0.6511831452596043,
+ "grad_norm": 0.4525931775569916,
+ "learning_rate": 0.0001776056323519579,
+ "loss": 1.3417,
+ "step": 651
+ },
+ {
+ "epoch": 0.6521834265887281,
+ "grad_norm": 0.6179555058479309,
+ "learning_rate": 0.00017753948067945712,
+ "loss": 1.3438,
+ "step": 652
+ },
+ {
+ "epoch": 0.6531837079178519,
+ "grad_norm": 0.5525293946266174,
+ "learning_rate": 0.00017747324380509006,
+ "loss": 1.4551,
+ "step": 653
+ },
+ {
+ "epoch": 0.6541839892469757,
+ "grad_norm": 0.533028781414032,
+ "learning_rate": 0.00017740692180163908,
+ "loss": 1.4396,
+ "step": 654
+ },
+ {
+ "epoch": 0.6551842705760995,
+ "grad_norm": 0.5196881890296936,
+ "learning_rate": 0.00017734051474198003,
+ "loss": 1.3032,
+ "step": 655
+ },
+ {
+ "epoch": 0.6561845519052233,
+ "grad_norm": 0.5190469622612,
+ "learning_rate": 0.0001772740226990823,
+ "loss": 1.4049,
+ "step": 656
+ },
+ {
+ "epoch": 0.6571848332343472,
+ "grad_norm": 0.49517175555229187,
+ "learning_rate": 0.00017720744574600863,
+ "loss": 1.3696,
+ "step": 657
+ },
+ {
+ "epoch": 0.658185114563471,
+ "grad_norm": 0.5165138244628906,
+ "learning_rate": 0.00017714078395591502,
+ "loss": 1.3667,
+ "step": 658
+ },
+ {
+ "epoch": 0.6591853958925948,
+ "grad_norm": 0.5624507665634155,
+ "learning_rate": 0.00017707403740205071,
+ "loss": 1.2109,
+ "step": 659
+ },
+ {
+ "epoch": 0.6601856772217186,
+ "grad_norm": 0.45942649245262146,
+ "learning_rate": 0.00017700720615775812,
+ "loss": 1.259,
+ "step": 660
+ },
+ {
+ "epoch": 0.6611859585508424,
+ "grad_norm": 0.5019019842147827,
+ "learning_rate": 0.0001769402902964727,
+ "loss": 1.3739,
+ "step": 661
+ },
+ {
+ "epoch": 0.6621862398799663,
+ "grad_norm": 0.4661652743816376,
+ "learning_rate": 0.00017687328989172288,
+ "loss": 1.2606,
+ "step": 662
+ },
+ {
+ "epoch": 0.66318652120909,
+ "grad_norm": 0.5310545563697815,
+ "learning_rate": 0.00017680620501712996,
+ "loss": 1.3406,
+ "step": 663
+ },
+ {
+ "epoch": 0.6641868025382138,
+ "grad_norm": 0.5190532207489014,
+ "learning_rate": 0.00017673903574640814,
+ "loss": 1.3052,
+ "step": 664
+ },
+ {
+ "epoch": 0.6651870838673377,
+ "grad_norm": 0.5265533328056335,
+ "learning_rate": 0.00017667178215336423,
+ "loss": 1.2326,
+ "step": 665
+ },
+ {
+ "epoch": 0.6661873651964615,
+ "grad_norm": 0.5971291065216064,
+ "learning_rate": 0.0001766044443118978,
+ "loss": 1.4291,
+ "step": 666
+ },
+ {
+ "epoch": 0.6671876465255854,
+ "grad_norm": 0.5295760631561279,
+ "learning_rate": 0.000176537022296001,
+ "loss": 1.2781,
+ "step": 667
+ },
+ {
+ "epoch": 0.6681879278547092,
+ "grad_norm": 0.5124595761299133,
+ "learning_rate": 0.00017646951617975837,
+ "loss": 1.318,
+ "step": 668
+ },
+ {
+ "epoch": 0.6691882091838329,
+ "grad_norm": 0.5968078970909119,
+ "learning_rate": 0.00017640192603734692,
+ "loss": 1.1483,
+ "step": 669
+ },
+ {
+ "epoch": 0.6701884905129568,
+ "grad_norm": 0.6211404204368591,
+ "learning_rate": 0.00017633425194303606,
+ "loss": 1.1164,
+ "step": 670
+ },
+ {
+ "epoch": 0.6711887718420806,
+ "grad_norm": 0.5539883375167847,
+ "learning_rate": 0.00017626649397118734,
+ "loss": 1.453,
+ "step": 671
+ },
+ {
+ "epoch": 0.6721890531712044,
+ "grad_norm": 0.5188294649124146,
+ "learning_rate": 0.00017619865219625452,
+ "loss": 1.5201,
+ "step": 672
+ },
+ {
+ "epoch": 0.6731893345003283,
+ "grad_norm": 0.531973659992218,
+ "learning_rate": 0.00017613072669278343,
+ "loss": 1.3176,
+ "step": 673
+ },
+ {
+ "epoch": 0.674189615829452,
+ "grad_norm": 0.5878707766532898,
+ "learning_rate": 0.00017606271753541192,
+ "loss": 1.5326,
+ "step": 674
+ },
+ {
+ "epoch": 0.6751898971585758,
+ "grad_norm": 0.595443844795227,
+ "learning_rate": 0.00017599462479886974,
+ "loss": 1.4033,
+ "step": 675
+ },
+ {
+ "epoch": 0.6761901784876997,
+ "grad_norm": 0.5093846321105957,
+ "learning_rate": 0.00017592644855797854,
+ "loss": 1.2995,
+ "step": 676
+ },
+ {
+ "epoch": 0.6771904598168235,
+ "grad_norm": 0.5521978735923767,
+ "learning_rate": 0.00017585818888765168,
+ "loss": 1.2912,
+ "step": 677
+ },
+ {
+ "epoch": 0.6781907411459474,
+ "grad_norm": 0.4612530469894409,
+ "learning_rate": 0.0001757898458628941,
+ "loss": 1.1902,
+ "step": 678
+ },
+ {
+ "epoch": 0.6791910224750711,
+ "grad_norm": 0.4973600506782532,
+ "learning_rate": 0.00017572141955880252,
+ "loss": 1.3547,
+ "step": 679
+ },
+ {
+ "epoch": 0.6801913038041949,
+ "grad_norm": 0.606407105922699,
+ "learning_rate": 0.00017565291005056504,
+ "loss": 1.371,
+ "step": 680
+ },
+ {
+ "epoch": 0.6811915851333188,
+ "grad_norm": 0.5027814507484436,
+ "learning_rate": 0.00017558431741346122,
+ "loss": 1.4551,
+ "step": 681
+ },
+ {
+ "epoch": 0.6821918664624426,
+ "grad_norm": 0.5732039213180542,
+ "learning_rate": 0.00017551564172286197,
+ "loss": 1.4181,
+ "step": 682
+ },
+ {
+ "epoch": 0.6831921477915663,
+ "grad_norm": 0.6327995657920837,
+ "learning_rate": 0.00017544688305422943,
+ "loss": 1.237,
+ "step": 683
+ },
+ {
+ "epoch": 0.6841924291206902,
+ "grad_norm": 0.5779625177383423,
+ "learning_rate": 0.00017537804148311695,
+ "loss": 1.5356,
+ "step": 684
+ },
+ {
+ "epoch": 0.685192710449814,
+ "grad_norm": 0.6031951308250427,
+ "learning_rate": 0.00017530911708516902,
+ "loss": 1.3776,
+ "step": 685
+ },
+ {
+ "epoch": 0.6861929917789378,
+ "grad_norm": 0.4811258018016815,
+ "learning_rate": 0.00017524010993612098,
+ "loss": 1.185,
+ "step": 686
+ },
+ {
+ "epoch": 0.6871932731080617,
+ "grad_norm": 0.5048002600669861,
+ "learning_rate": 0.00017517102011179933,
+ "loss": 1.3335,
+ "step": 687
+ },
+ {
+ "epoch": 0.6881935544371854,
+ "grad_norm": 0.5963343977928162,
+ "learning_rate": 0.0001751018476881212,
+ "loss": 1.4326,
+ "step": 688
+ },
+ {
+ "epoch": 0.6891938357663093,
+ "grad_norm": 0.4770168960094452,
+ "learning_rate": 0.00017503259274109464,
+ "loss": 1.4664,
+ "step": 689
+ },
+ {
+ "epoch": 0.6901941170954331,
+ "grad_norm": 0.5020537376403809,
+ "learning_rate": 0.00017496325534681825,
+ "loss": 1.349,
+ "step": 690
+ },
+ {
+ "epoch": 0.6911943984245569,
+ "grad_norm": 0.5567785501480103,
+ "learning_rate": 0.00017489383558148136,
+ "loss": 1.452,
+ "step": 691
+ },
+ {
+ "epoch": 0.6921946797536808,
+ "grad_norm": 0.5167350769042969,
+ "learning_rate": 0.00017482433352136365,
+ "loss": 1.1148,
+ "step": 692
+ },
+ {
+ "epoch": 0.6931949610828045,
+ "grad_norm": 0.6030716300010681,
+ "learning_rate": 0.00017475474924283536,
+ "loss": 1.3473,
+ "step": 693
+ },
+ {
+ "epoch": 0.6941952424119283,
+ "grad_norm": 0.5643062591552734,
+ "learning_rate": 0.00017468508282235704,
+ "loss": 1.3476,
+ "step": 694
+ },
+ {
+ "epoch": 0.6951955237410522,
+ "grad_norm": 0.5124102234840393,
+ "learning_rate": 0.00017461533433647946,
+ "loss": 1.339,
+ "step": 695
+ },
+ {
+ "epoch": 0.696195805070176,
+ "grad_norm": 0.5690215229988098,
+ "learning_rate": 0.00017454550386184362,
+ "loss": 1.3816,
+ "step": 696
+ },
+ {
+ "epoch": 0.6971960863992998,
+ "grad_norm": 0.5938367247581482,
+ "learning_rate": 0.00017447559147518055,
+ "loss": 1.4554,
+ "step": 697
+ },
+ {
+ "epoch": 0.6981963677284236,
+ "grad_norm": 0.5288996696472168,
+ "learning_rate": 0.00017440559725331135,
+ "loss": 1.2904,
+ "step": 698
+ },
+ {
+ "epoch": 0.6991966490575474,
+ "grad_norm": 0.5047140121459961,
+ "learning_rate": 0.000174335521273147,
+ "loss": 1.2362,
+ "step": 699
+ },
+ {
+ "epoch": 0.7001969303866713,
+ "grad_norm": 0.5563321709632874,
+ "learning_rate": 0.00017426536361168834,
+ "loss": 1.2863,
+ "step": 700
+ },
+ {
+ "epoch": 0.7011972117157951,
+ "grad_norm": 0.48857688903808594,
+ "learning_rate": 0.00017419512434602594,
+ "loss": 1.3387,
+ "step": 701
+ },
+ {
+ "epoch": 0.7021974930449189,
+ "grad_norm": 0.5205016732215881,
+ "learning_rate": 0.00017412480355334005,
+ "loss": 1.3874,
+ "step": 702
+ },
+ {
+ "epoch": 0.7031977743740427,
+ "grad_norm": 0.5850381851196289,
+ "learning_rate": 0.00017405440131090048,
+ "loss": 1.5369,
+ "step": 703
+ },
+ {
+ "epoch": 0.7041980557031665,
+ "grad_norm": 0.5708681344985962,
+ "learning_rate": 0.00017398391769606658,
+ "loss": 1.3622,
+ "step": 704
+ },
+ {
+ "epoch": 0.7051983370322903,
+ "grad_norm": 0.5743641257286072,
+ "learning_rate": 0.00017391335278628712,
+ "loss": 1.2946,
+ "step": 705
+ },
+ {
+ "epoch": 0.7061986183614142,
+ "grad_norm": 0.5376024842262268,
+ "learning_rate": 0.00017384270665910014,
+ "loss": 1.2952,
+ "step": 706
+ },
+ {
+ "epoch": 0.707198899690538,
+ "grad_norm": 0.6123641133308411,
+ "learning_rate": 0.000173771979392133,
+ "loss": 1.4239,
+ "step": 707
+ },
+ {
+ "epoch": 0.7081991810196617,
+ "grad_norm": 0.5639240741729736,
+ "learning_rate": 0.00017370117106310214,
+ "loss": 1.3627,
+ "step": 708
+ },
+ {
+ "epoch": 0.7091994623487856,
+ "grad_norm": 0.5551653504371643,
+ "learning_rate": 0.0001736302817498131,
+ "loss": 1.3435,
+ "step": 709
+ },
+ {
+ "epoch": 0.7101997436779094,
+ "grad_norm": 0.4746958911418915,
+ "learning_rate": 0.00017355931153016044,
+ "loss": 1.2402,
+ "step": 710
+ },
+ {
+ "epoch": 0.7112000250070333,
+ "grad_norm": 0.4722553491592407,
+ "learning_rate": 0.0001734882604821276,
+ "loss": 1.3962,
+ "step": 711
+ },
+ {
+ "epoch": 0.712200306336157,
+ "grad_norm": 0.5038101077079773,
+ "learning_rate": 0.0001734171286837868,
+ "loss": 1.3261,
+ "step": 712
+ },
+ {
+ "epoch": 0.7132005876652808,
+ "grad_norm": 0.5004639625549316,
+ "learning_rate": 0.00017334591621329906,
+ "loss": 1.4943,
+ "step": 713
+ },
+ {
+ "epoch": 0.7142008689944047,
+ "grad_norm": 0.5141516327857971,
+ "learning_rate": 0.00017327462314891402,
+ "loss": 1.2754,
+ "step": 714
+ },
+ {
+ "epoch": 0.7152011503235285,
+ "grad_norm": 0.5491873025894165,
+ "learning_rate": 0.00017320324956896977,
+ "loss": 1.3052,
+ "step": 715
+ },
+ {
+ "epoch": 0.7162014316526523,
+ "grad_norm": 0.49937358498573303,
+ "learning_rate": 0.00017313179555189306,
+ "loss": 1.2277,
+ "step": 716
+ },
+ {
+ "epoch": 0.7172017129817762,
+ "grad_norm": 0.6419594287872314,
+ "learning_rate": 0.00017306026117619889,
+ "loss": 1.4844,
+ "step": 717
+ },
+ {
+ "epoch": 0.7182019943108999,
+ "grad_norm": 0.521108090877533,
+ "learning_rate": 0.0001729886465204906,
+ "loss": 1.2917,
+ "step": 718
+ },
+ {
+ "epoch": 0.7192022756400237,
+ "grad_norm": 0.532421886920929,
+ "learning_rate": 0.0001729169516634598,
+ "loss": 1.4555,
+ "step": 719
+ },
+ {
+ "epoch": 0.7202025569691476,
+ "grad_norm": 0.5168073177337646,
+ "learning_rate": 0.0001728451766838861,
+ "loss": 1.2116,
+ "step": 720
+ },
+ {
+ "epoch": 0.7212028382982714,
+ "grad_norm": 0.5593972206115723,
+ "learning_rate": 0.00017277332166063726,
+ "loss": 1.4345,
+ "step": 721
+ },
+ {
+ "epoch": 0.7222031196273953,
+ "grad_norm": 0.5317432284355164,
+ "learning_rate": 0.00017270138667266894,
+ "loss": 1.2987,
+ "step": 722
+ },
+ {
+ "epoch": 0.723203400956519,
+ "grad_norm": 0.6262248158454895,
+ "learning_rate": 0.00017262937179902472,
+ "loss": 1.2591,
+ "step": 723
+ },
+ {
+ "epoch": 0.7242036822856428,
+ "grad_norm": 0.5377100110054016,
+ "learning_rate": 0.00017255727711883588,
+ "loss": 1.366,
+ "step": 724
+ },
+ {
+ "epoch": 0.7252039636147667,
+ "grad_norm": 0.5637168288230896,
+ "learning_rate": 0.00017248510271132144,
+ "loss": 1.4593,
+ "step": 725
+ },
+ {
+ "epoch": 0.7262042449438905,
+ "grad_norm": 0.5360320210456848,
+ "learning_rate": 0.00017241284865578802,
+ "loss": 1.4797,
+ "step": 726
+ },
+ {
+ "epoch": 0.7272045262730142,
+ "grad_norm": 0.48500168323516846,
+ "learning_rate": 0.00017234051503162978,
+ "loss": 1.3875,
+ "step": 727
+ },
+ {
+ "epoch": 0.7282048076021381,
+ "grad_norm": 0.5666176080703735,
+ "learning_rate": 0.0001722681019183283,
+ "loss": 1.4683,
+ "step": 728
+ },
+ {
+ "epoch": 0.7292050889312619,
+ "grad_norm": 0.5710940361022949,
+ "learning_rate": 0.00017219560939545246,
+ "loss": 1.5538,
+ "step": 729
+ },
+ {
+ "epoch": 0.7302053702603858,
+ "grad_norm": 0.5658044219017029,
+ "learning_rate": 0.00017212303754265843,
+ "loss": 1.248,
+ "step": 730
+ },
+ {
+ "epoch": 0.7312056515895096,
+ "grad_norm": 0.5355331301689148,
+ "learning_rate": 0.0001720503864396896,
+ "loss": 1.259,
+ "step": 731
+ },
+ {
+ "epoch": 0.7322059329186333,
+ "grad_norm": 0.5683363676071167,
+ "learning_rate": 0.00017197765616637636,
+ "loss": 1.4242,
+ "step": 732
+ },
+ {
+ "epoch": 0.7332062142477572,
+ "grad_norm": 0.488972932100296,
+ "learning_rate": 0.0001719048468026361,
+ "loss": 1.3442,
+ "step": 733
+ },
+ {
+ "epoch": 0.734206495576881,
+ "grad_norm": 0.45563748478889465,
+ "learning_rate": 0.00017183195842847322,
+ "loss": 1.3236,
+ "step": 734
+ },
+ {
+ "epoch": 0.7352067769060048,
+ "grad_norm": 0.5114185214042664,
+ "learning_rate": 0.0001717589911239788,
+ "loss": 1.3071,
+ "step": 735
+ },
+ {
+ "epoch": 0.7362070582351287,
+ "grad_norm": 0.558686375617981,
+ "learning_rate": 0.00017168594496933074,
+ "loss": 1.2889,
+ "step": 736
+ },
+ {
+ "epoch": 0.7372073395642524,
+ "grad_norm": 0.49099281430244446,
+ "learning_rate": 0.00017161282004479351,
+ "loss": 1.1701,
+ "step": 737
+ },
+ {
+ "epoch": 0.7382076208933762,
+ "grad_norm": 0.549524188041687,
+ "learning_rate": 0.0001715396164307182,
+ "loss": 1.2853,
+ "step": 738
+ },
+ {
+ "epoch": 0.7392079022225001,
+ "grad_norm": 0.5683863162994385,
+ "learning_rate": 0.0001714663342075424,
+ "loss": 1.4201,
+ "step": 739
+ },
+ {
+ "epoch": 0.7402081835516239,
+ "grad_norm": 0.5957104563713074,
+ "learning_rate": 0.00017139297345578994,
+ "loss": 1.3406,
+ "step": 740
+ },
+ {
+ "epoch": 0.7412084648807478,
+ "grad_norm": 0.4645147919654846,
+ "learning_rate": 0.00017131953425607104,
+ "loss": 1.2344,
+ "step": 741
+ },
+ {
+ "epoch": 0.7422087462098715,
+ "grad_norm": 0.4981783330440521,
+ "learning_rate": 0.00017124601668908212,
+ "loss": 1.422,
+ "step": 742
+ },
+ {
+ "epoch": 0.7432090275389953,
+ "grad_norm": 0.5426530838012695,
+ "learning_rate": 0.00017117242083560568,
+ "loss": 1.4275,
+ "step": 743
+ },
+ {
+ "epoch": 0.7442093088681192,
+ "grad_norm": 0.5585354566574097,
+ "learning_rate": 0.00017109874677651024,
+ "loss": 1.5049,
+ "step": 744
+ },
+ {
+ "epoch": 0.745209590197243,
+ "grad_norm": 0.5639151930809021,
+ "learning_rate": 0.0001710249945927503,
+ "loss": 1.4019,
+ "step": 745
+ },
+ {
+ "epoch": 0.7462098715263668,
+ "grad_norm": 0.8334717750549316,
+ "learning_rate": 0.00017095116436536612,
+ "loss": 1.5607,
+ "step": 746
+ },
+ {
+ "epoch": 0.7472101528554906,
+ "grad_norm": 0.513970673084259,
+ "learning_rate": 0.00017087725617548385,
+ "loss": 1.1967,
+ "step": 747
+ },
+ {
+ "epoch": 0.7482104341846144,
+ "grad_norm": 0.6200702786445618,
+ "learning_rate": 0.00017080327010431513,
+ "loss": 1.2298,
+ "step": 748
+ },
+ {
+ "epoch": 0.7492107155137382,
+ "grad_norm": 0.54522305727005,
+ "learning_rate": 0.00017072920623315734,
+ "loss": 1.3214,
+ "step": 749
+ },
+ {
+ "epoch": 0.7502109968428621,
+ "grad_norm": 0.6682360172271729,
+ "learning_rate": 0.00017065506464339326,
+ "loss": 1.4631,
+ "step": 750
+ },
+ {
+ "epoch": 0.7512112781719859,
+ "grad_norm": 0.5061535239219666,
+ "learning_rate": 0.00017058084541649106,
+ "loss": 1.5062,
+ "step": 751
+ },
+ {
+ "epoch": 0.7522115595011097,
+ "grad_norm": 0.5790627598762512,
+ "learning_rate": 0.00017050654863400429,
+ "loss": 1.1371,
+ "step": 752
+ },
+ {
+ "epoch": 0.7532118408302335,
+ "grad_norm": 0.6058077216148376,
+ "learning_rate": 0.00017043217437757164,
+ "loss": 1.2185,
+ "step": 753
+ },
+ {
+ "epoch": 0.7542121221593573,
+ "grad_norm": 0.5494515895843506,
+ "learning_rate": 0.00017035772272891702,
+ "loss": 1.2468,
+ "step": 754
+ },
+ {
+ "epoch": 0.7552124034884812,
+ "grad_norm": 0.5687912106513977,
+ "learning_rate": 0.00017028319376984928,
+ "loss": 1.5621,
+ "step": 755
+ },
+ {
+ "epoch": 0.756212684817605,
+ "grad_norm": 0.5341185927391052,
+ "learning_rate": 0.00017020858758226229,
+ "loss": 1.3598,
+ "step": 756
+ },
+ {
+ "epoch": 0.7572129661467287,
+ "grad_norm": 0.5373026132583618,
+ "learning_rate": 0.0001701339042481347,
+ "loss": 1.4185,
+ "step": 757
+ },
+ {
+ "epoch": 0.7582132474758526,
+ "grad_norm": 0.46508973836898804,
+ "learning_rate": 0.00017005914384953007,
+ "loss": 1.2962,
+ "step": 758
+ },
+ {
+ "epoch": 0.7592135288049764,
+ "grad_norm": 0.4580937325954437,
+ "learning_rate": 0.00016998430646859654,
+ "loss": 1.0707,
+ "step": 759
+ },
+ {
+ "epoch": 0.7602138101341002,
+ "grad_norm": 0.5277093052864075,
+ "learning_rate": 0.00016990939218756683,
+ "loss": 1.2529,
+ "step": 760
+ },
+ {
+ "epoch": 0.761214091463224,
+ "grad_norm": 0.5356671214103699,
+ "learning_rate": 0.0001698344010887582,
+ "loss": 1.4032,
+ "step": 761
+ },
+ {
+ "epoch": 0.7622143727923478,
+ "grad_norm": 0.6881769299507141,
+ "learning_rate": 0.0001697593332545723,
+ "loss": 1.4885,
+ "step": 762
+ },
+ {
+ "epoch": 0.7632146541214717,
+ "grad_norm": 0.5370383262634277,
+ "learning_rate": 0.0001696841887674951,
+ "loss": 1.3271,
+ "step": 763
+ },
+ {
+ "epoch": 0.7642149354505955,
+ "grad_norm": 0.4792316257953644,
+ "learning_rate": 0.00016960896771009684,
+ "loss": 1.2274,
+ "step": 764
+ },
+ {
+ "epoch": 0.7652152167797193,
+ "grad_norm": 0.5276592373847961,
+ "learning_rate": 0.00016953367016503182,
+ "loss": 1.2399,
+ "step": 765
+ },
+ {
+ "epoch": 0.7662154981088432,
+ "grad_norm": 0.4789050221443176,
+ "learning_rate": 0.00016945829621503838,
+ "loss": 1.4002,
+ "step": 766
+ },
+ {
+ "epoch": 0.7672157794379669,
+ "grad_norm": 0.492712140083313,
+ "learning_rate": 0.00016938284594293897,
+ "loss": 1.3897,
+ "step": 767
+ },
+ {
+ "epoch": 0.7682160607670907,
+ "grad_norm": 0.5009675621986389,
+ "learning_rate": 0.00016930731943163972,
+ "loss": 1.3797,
+ "step": 768
+ },
+ {
+ "epoch": 0.7692163420962146,
+ "grad_norm": 0.4863432049751282,
+ "learning_rate": 0.00016923171676413063,
+ "loss": 1.4251,
+ "step": 769
+ },
+ {
+ "epoch": 0.7702166234253384,
+ "grad_norm": 0.5190616846084595,
+ "learning_rate": 0.00016915603802348535,
+ "loss": 1.4265,
+ "step": 770
+ },
+ {
+ "epoch": 0.7712169047544621,
+ "grad_norm": 0.5603469610214233,
+ "learning_rate": 0.00016908028329286112,
+ "loss": 1.2852,
+ "step": 771
+ },
+ {
+ "epoch": 0.772217186083586,
+ "grad_norm": 0.5128753185272217,
+ "learning_rate": 0.0001690044526554987,
+ "loss": 1.3324,
+ "step": 772
+ },
+ {
+ "epoch": 0.7732174674127098,
+ "grad_norm": 0.4992072284221649,
+ "learning_rate": 0.00016892854619472223,
+ "loss": 1.2498,
+ "step": 773
+ },
+ {
+ "epoch": 0.7742177487418337,
+ "grad_norm": 0.6128407716751099,
+ "learning_rate": 0.00016885256399393924,
+ "loss": 1.2967,
+ "step": 774
+ },
+ {
+ "epoch": 0.7752180300709575,
+ "grad_norm": 0.5186858177185059,
+ "learning_rate": 0.00016877650613664034,
+ "loss": 1.2654,
+ "step": 775
+ },
+ {
+ "epoch": 0.7762183114000812,
+ "grad_norm": 0.5207421183586121,
+ "learning_rate": 0.00016870037270639942,
+ "loss": 1.2994,
+ "step": 776
+ },
+ {
+ "epoch": 0.7772185927292051,
+ "grad_norm": 0.509912371635437,
+ "learning_rate": 0.0001686241637868734,
+ "loss": 1.3971,
+ "step": 777
+ },
+ {
+ "epoch": 0.7782188740583289,
+ "grad_norm": 0.47703370451927185,
+ "learning_rate": 0.00016854787946180198,
+ "loss": 1.282,
+ "step": 778
+ },
+ {
+ "epoch": 0.7792191553874527,
+ "grad_norm": 0.5404442548751831,
+ "learning_rate": 0.00016847151981500789,
+ "loss": 1.1986,
+ "step": 779
+ },
+ {
+ "epoch": 0.7802194367165766,
+ "grad_norm": 0.541050136089325,
+ "learning_rate": 0.00016839508493039657,
+ "loss": 1.4478,
+ "step": 780
+ },
+ {
+ "epoch": 0.7812197180457003,
+ "grad_norm": 0.46520569920539856,
+ "learning_rate": 0.00016831857489195618,
+ "loss": 1.2385,
+ "step": 781
+ },
+ {
+ "epoch": 0.7822199993748241,
+ "grad_norm": 0.5150445699691772,
+ "learning_rate": 0.00016824198978375736,
+ "loss": 1.3695,
+ "step": 782
+ },
+ {
+ "epoch": 0.783220280703948,
+ "grad_norm": 0.5754334926605225,
+ "learning_rate": 0.00016816532968995328,
+ "loss": 1.3026,
+ "step": 783
+ },
+ {
+ "epoch": 0.7842205620330718,
+ "grad_norm": 0.5335776209831238,
+ "learning_rate": 0.0001680885946947796,
+ "loss": 1.3391,
+ "step": 784
+ },
+ {
+ "epoch": 0.7852208433621957,
+ "grad_norm": 0.6596659421920776,
+ "learning_rate": 0.00016801178488255413,
+ "loss": 1.3224,
+ "step": 785
+ },
+ {
+ "epoch": 0.7862211246913194,
+ "grad_norm": 0.5251991748809814,
+ "learning_rate": 0.00016793490033767698,
+ "loss": 1.1744,
+ "step": 786
+ },
+ {
+ "epoch": 0.7872214060204432,
+ "grad_norm": 0.5112204551696777,
+ "learning_rate": 0.00016785794114463037,
+ "loss": 1.2455,
+ "step": 787
+ },
+ {
+ "epoch": 0.7882216873495671,
+ "grad_norm": 0.532893717288971,
+ "learning_rate": 0.00016778090738797853,
+ "loss": 1.2437,
+ "step": 788
+ },
+ {
+ "epoch": 0.7892219686786909,
+ "grad_norm": 0.5534240007400513,
+ "learning_rate": 0.00016770379915236766,
+ "loss": 1.396,
+ "step": 789
+ },
+ {
+ "epoch": 0.7902222500078147,
+ "grad_norm": 0.5164292454719543,
+ "learning_rate": 0.00016762661652252567,
+ "loss": 1.3138,
+ "step": 790
+ },
+ {
+ "epoch": 0.7912225313369385,
+ "grad_norm": 0.5660764575004578,
+ "learning_rate": 0.00016754935958326244,
+ "loss": 1.3014,
+ "step": 791
+ },
+ {
+ "epoch": 0.7922228126660623,
+ "grad_norm": 0.5137651562690735,
+ "learning_rate": 0.00016747202841946928,
+ "loss": 1.2834,
+ "step": 792
+ },
+ {
+ "epoch": 0.7932230939951862,
+ "grad_norm": 0.5546874403953552,
+ "learning_rate": 0.00016739462311611919,
+ "loss": 1.2841,
+ "step": 793
+ },
+ {
+ "epoch": 0.79422337532431,
+ "grad_norm": 0.5112007260322571,
+ "learning_rate": 0.00016731714375826657,
+ "loss": 1.1873,
+ "step": 794
+ },
+ {
+ "epoch": 0.7952236566534338,
+ "grad_norm": 0.5462679862976074,
+ "learning_rate": 0.00016723959043104728,
+ "loss": 1.2602,
+ "step": 795
+ },
+ {
+ "epoch": 0.7962239379825576,
+ "grad_norm": 0.5083702802658081,
+ "learning_rate": 0.00016716196321967832,
+ "loss": 1.334,
+ "step": 796
+ },
+ {
+ "epoch": 0.7972242193116814,
+ "grad_norm": 0.5491913557052612,
+ "learning_rate": 0.00016708426220945802,
+ "loss": 1.335,
+ "step": 797
+ },
+ {
+ "epoch": 0.7982245006408052,
+ "grad_norm": 0.5257419943809509,
+ "learning_rate": 0.00016700648748576574,
+ "loss": 1.374,
+ "step": 798
+ },
+ {
+ "epoch": 0.7992247819699291,
+ "grad_norm": 0.5252013206481934,
+ "learning_rate": 0.0001669286391340618,
+ "loss": 1.281,
+ "step": 799
+ },
+ {
+ "epoch": 0.8002250632990529,
+ "grad_norm": 0.5784058570861816,
+ "learning_rate": 0.00016685071723988748,
+ "loss": 1.385,
+ "step": 800
+ },
+ {
+ "epoch": 0.8012253446281766,
+ "grad_norm": 0.5508819818496704,
+ "learning_rate": 0.00016677272188886483,
+ "loss": 1.5138,
+ "step": 801
+ },
+ {
+ "epoch": 0.8022256259573005,
+ "grad_norm": 0.5943104028701782,
+ "learning_rate": 0.00016669465316669667,
+ "loss": 1.2341,
+ "step": 802
+ },
+ {
+ "epoch": 0.8032259072864243,
+ "grad_norm": 0.5109750032424927,
+ "learning_rate": 0.00016661651115916642,
+ "loss": 1.361,
+ "step": 803
+ },
+ {
+ "epoch": 0.8042261886155482,
+ "grad_norm": 0.5322972536087036,
+ "learning_rate": 0.00016653829595213794,
+ "loss": 1.3383,
+ "step": 804
+ },
+ {
+ "epoch": 0.805226469944672,
+ "grad_norm": 0.4870489537715912,
+ "learning_rate": 0.00016646000763155568,
+ "loss": 1.2932,
+ "step": 805
+ },
+ {
+ "epoch": 0.8062267512737957,
+ "grad_norm": 0.6070749163627625,
+ "learning_rate": 0.00016638164628344425,
+ "loss": 1.3517,
+ "step": 806
+ },
+ {
+ "epoch": 0.8072270326029196,
+ "grad_norm": 0.5695485472679138,
+ "learning_rate": 0.00016630321199390867,
+ "loss": 1.295,
+ "step": 807
+ },
+ {
+ "epoch": 0.8082273139320434,
+ "grad_norm": 0.49092933535575867,
+ "learning_rate": 0.00016622470484913406,
+ "loss": 1.1708,
+ "step": 808
+ },
+ {
+ "epoch": 0.8092275952611672,
+ "grad_norm": 0.5488709807395935,
+ "learning_rate": 0.00016614612493538551,
+ "loss": 1.3101,
+ "step": 809
+ },
+ {
+ "epoch": 0.810227876590291,
+ "grad_norm": 0.6875150799751282,
+ "learning_rate": 0.00016606747233900815,
+ "loss": 1.3,
+ "step": 810
+ },
+ {
+ "epoch": 0.8112281579194148,
+ "grad_norm": 0.5599775910377502,
+ "learning_rate": 0.00016598874714642697,
+ "loss": 1.5711,
+ "step": 811
+ },
+ {
+ "epoch": 0.8122284392485386,
+ "grad_norm": 0.7102994322776794,
+ "learning_rate": 0.00016590994944414678,
+ "loss": 1.4553,
+ "step": 812
+ },
+ {
+ "epoch": 0.8132287205776625,
+ "grad_norm": 0.5191233158111572,
+ "learning_rate": 0.00016583107931875192,
+ "loss": 1.4292,
+ "step": 813
+ },
+ {
+ "epoch": 0.8142290019067863,
+ "grad_norm": 0.4739600718021393,
+ "learning_rate": 0.0001657521368569064,
+ "loss": 1.3776,
+ "step": 814
+ },
+ {
+ "epoch": 0.8152292832359102,
+ "grad_norm": 0.5282078981399536,
+ "learning_rate": 0.0001656731221453537,
+ "loss": 1.4359,
+ "step": 815
+ },
+ {
+ "epoch": 0.8162295645650339,
+ "grad_norm": 0.690367579460144,
+ "learning_rate": 0.00016559403527091675,
+ "loss": 1.1747,
+ "step": 816
+ },
+ {
+ "epoch": 0.8172298458941577,
+ "grad_norm": 0.5715120434761047,
+ "learning_rate": 0.0001655148763204977,
+ "loss": 1.3289,
+ "step": 817
+ },
+ {
+ "epoch": 0.8182301272232816,
+ "grad_norm": 0.7024423480033875,
+ "learning_rate": 0.00016543564538107797,
+ "loss": 1.4758,
+ "step": 818
+ },
+ {
+ "epoch": 0.8192304085524054,
+ "grad_norm": 0.5568886399269104,
+ "learning_rate": 0.00016535634253971794,
+ "loss": 1.5172,
+ "step": 819
+ },
+ {
+ "epoch": 0.8202306898815291,
+ "grad_norm": 0.5847441554069519,
+ "learning_rate": 0.00016527696788355714,
+ "loss": 1.1993,
+ "step": 820
+ },
+ {
+ "epoch": 0.821230971210653,
+ "grad_norm": 0.5402149558067322,
+ "learning_rate": 0.00016519752149981397,
+ "loss": 1.2921,
+ "step": 821
+ },
+ {
+ "epoch": 0.8222312525397768,
+ "grad_norm": 0.6050311326980591,
+ "learning_rate": 0.0001651180034757856,
+ "loss": 1.59,
+ "step": 822
+ },
+ {
+ "epoch": 0.8232315338689006,
+ "grad_norm": 0.6215486526489258,
+ "learning_rate": 0.00016503841389884798,
+ "loss": 1.4562,
+ "step": 823
+ },
+ {
+ "epoch": 0.8242318151980245,
+ "grad_norm": 0.6507789492607117,
+ "learning_rate": 0.00016495875285645566,
+ "loss": 1.349,
+ "step": 824
+ },
+ {
+ "epoch": 0.8252320965271482,
+ "grad_norm": 0.5273147225379944,
+ "learning_rate": 0.00016487902043614173,
+ "loss": 1.4016,
+ "step": 825
+ },
+ {
+ "epoch": 0.8262323778562721,
+ "grad_norm": 0.579987645149231,
+ "learning_rate": 0.0001647992167255177,
+ "loss": 1.4077,
+ "step": 826
+ },
+ {
+ "epoch": 0.8272326591853959,
+ "grad_norm": 0.5068405270576477,
+ "learning_rate": 0.0001647193418122734,
+ "loss": 1.5075,
+ "step": 827
+ },
+ {
+ "epoch": 0.8282329405145197,
+ "grad_norm": 0.519982099533081,
+ "learning_rate": 0.00016463939578417692,
+ "loss": 1.2721,
+ "step": 828
+ },
+ {
+ "epoch": 0.8292332218436436,
+ "grad_norm": 0.5181561708450317,
+ "learning_rate": 0.0001645593787290745,
+ "loss": 1.2299,
+ "step": 829
+ },
+ {
+ "epoch": 0.8302335031727673,
+ "grad_norm": 0.47413337230682373,
+ "learning_rate": 0.0001644792907348904,
+ "loss": 1.2462,
+ "step": 830
+ },
+ {
+ "epoch": 0.8312337845018911,
+ "grad_norm": 0.5426570773124695,
+ "learning_rate": 0.00016439913188962685,
+ "loss": 1.4496,
+ "step": 831
+ },
+ {
+ "epoch": 0.832234065831015,
+ "grad_norm": 0.5744379758834839,
+ "learning_rate": 0.0001643189022813639,
+ "loss": 1.3284,
+ "step": 832
+ },
+ {
+ "epoch": 0.8332343471601388,
+ "grad_norm": 0.49693235754966736,
+ "learning_rate": 0.0001642386019982594,
+ "loss": 1.4082,
+ "step": 833
+ },
+ {
+ "epoch": 0.8342346284892626,
+ "grad_norm": 0.5346773862838745,
+ "learning_rate": 0.00016415823112854883,
+ "loss": 1.4238,
+ "step": 834
+ },
+ {
+ "epoch": 0.8352349098183864,
+ "grad_norm": 0.6201802492141724,
+ "learning_rate": 0.00016407778976054526,
+ "loss": 1.3288,
+ "step": 835
+ },
+ {
+ "epoch": 0.8362351911475102,
+ "grad_norm": 0.5161807537078857,
+ "learning_rate": 0.0001639972779826392,
+ "loss": 1.3798,
+ "step": 836
+ },
+ {
+ "epoch": 0.8372354724766341,
+ "grad_norm": 0.4670160412788391,
+ "learning_rate": 0.0001639166958832985,
+ "loss": 1.3765,
+ "step": 837
+ },
+ {
+ "epoch": 0.8382357538057579,
+ "grad_norm": 0.6492543816566467,
+ "learning_rate": 0.00016383604355106837,
+ "loss": 1.5277,
+ "step": 838
+ },
+ {
+ "epoch": 0.8392360351348817,
+ "grad_norm": 0.5766328573226929,
+ "learning_rate": 0.00016375532107457108,
+ "loss": 1.2481,
+ "step": 839
+ },
+ {
+ "epoch": 0.8402363164640055,
+ "grad_norm": 0.6431072950363159,
+ "learning_rate": 0.00016367452854250603,
+ "loss": 1.2755,
+ "step": 840
+ },
+ {
+ "epoch": 0.8412365977931293,
+ "grad_norm": 0.5121828317642212,
+ "learning_rate": 0.00016359366604364972,
+ "loss": 1.2927,
+ "step": 841
+ },
+ {
+ "epoch": 0.8422368791222531,
+ "grad_norm": 0.5222392678260803,
+ "learning_rate": 0.00016351273366685526,
+ "loss": 1.2626,
+ "step": 842
+ },
+ {
+ "epoch": 0.843237160451377,
+ "grad_norm": 0.5536903142929077,
+ "learning_rate": 0.00016343173150105278,
+ "loss": 1.1892,
+ "step": 843
+ },
+ {
+ "epoch": 0.8442374417805008,
+ "grad_norm": 0.5569381713867188,
+ "learning_rate": 0.00016335065963524897,
+ "loss": 1.4263,
+ "step": 844
+ },
+ {
+ "epoch": 0.8452377231096245,
+ "grad_norm": 0.6490715742111206,
+ "learning_rate": 0.0001632695181585272,
+ "loss": 1.452,
+ "step": 845
+ },
+ {
+ "epoch": 0.8462380044387484,
+ "grad_norm": 0.5965350270271301,
+ "learning_rate": 0.00016318830716004722,
+ "loss": 1.4189,
+ "step": 846
+ },
+ {
+ "epoch": 0.8472382857678722,
+ "grad_norm": 0.45904603600502014,
+ "learning_rate": 0.00016310702672904528,
+ "loss": 1.4024,
+ "step": 847
+ },
+ {
+ "epoch": 0.8482385670969961,
+ "grad_norm": 0.4320334494113922,
+ "learning_rate": 0.00016302567695483382,
+ "loss": 1.2105,
+ "step": 848
+ },
+ {
+ "epoch": 0.8492388484261199,
+ "grad_norm": 0.527032196521759,
+ "learning_rate": 0.0001629442579268016,
+ "loss": 1.1996,
+ "step": 849
+ },
+ {
+ "epoch": 0.8502391297552436,
+ "grad_norm": 0.6317036747932434,
+ "learning_rate": 0.00016286276973441333,
+ "loss": 1.4811,
+ "step": 850
+ },
+ {
+ "epoch": 0.8512394110843675,
+ "grad_norm": 0.5726277828216553,
+ "learning_rate": 0.00016278121246720987,
+ "loss": 1.3249,
+ "step": 851
+ },
+ {
+ "epoch": 0.8522396924134913,
+ "grad_norm": 0.4624577462673187,
+ "learning_rate": 0.00016269958621480788,
+ "loss": 1.3291,
+ "step": 852
+ },
+ {
+ "epoch": 0.8532399737426151,
+ "grad_norm": 0.5774461627006531,
+ "learning_rate": 0.0001626178910668998,
+ "loss": 1.2891,
+ "step": 853
+ },
+ {
+ "epoch": 0.854240255071739,
+ "grad_norm": 0.503584086894989,
+ "learning_rate": 0.00016253612711325386,
+ "loss": 1.3048,
+ "step": 854
+ },
+ {
+ "epoch": 0.8552405364008627,
+ "grad_norm": 0.4560583233833313,
+ "learning_rate": 0.0001624542944437139,
+ "loss": 1.2658,
+ "step": 855
+ },
+ {
+ "epoch": 0.8562408177299866,
+ "grad_norm": 0.49611610174179077,
+ "learning_rate": 0.00016237239314819917,
+ "loss": 1.1017,
+ "step": 856
+ },
+ {
+ "epoch": 0.8572410990591104,
+ "grad_norm": 0.5600405931472778,
+ "learning_rate": 0.0001622904233167044,
+ "loss": 1.3274,
+ "step": 857
+ },
+ {
+ "epoch": 0.8582413803882342,
+ "grad_norm": 0.5849353075027466,
+ "learning_rate": 0.0001622083850392996,
+ "loss": 1.274,
+ "step": 858
+ },
+ {
+ "epoch": 0.859241661717358,
+ "grad_norm": 0.5781377553939819,
+ "learning_rate": 0.00016212627840613003,
+ "loss": 1.4157,
+ "step": 859
+ },
+ {
+ "epoch": 0.8602419430464818,
+ "grad_norm": 0.4908173680305481,
+ "learning_rate": 0.000162044103507416,
+ "loss": 1.3,
+ "step": 860
+ },
+ {
+ "epoch": 0.8612422243756056,
+ "grad_norm": 0.5844553112983704,
+ "learning_rate": 0.00016196186043345288,
+ "loss": 1.2325,
+ "step": 861
+ },
+ {
+ "epoch": 0.8622425057047295,
+ "grad_norm": 0.5381117463111877,
+ "learning_rate": 0.00016187954927461093,
+ "loss": 1.41,
+ "step": 862
+ },
+ {
+ "epoch": 0.8632427870338533,
+ "grad_norm": 0.5468165278434753,
+ "learning_rate": 0.00016179717012133521,
+ "loss": 1.4272,
+ "step": 863
+ },
+ {
+ "epoch": 0.864243068362977,
+ "grad_norm": 0.5702970027923584,
+ "learning_rate": 0.00016171472306414554,
+ "loss": 1.3624,
+ "step": 864
+ },
+ {
+ "epoch": 0.8652433496921009,
+ "grad_norm": 0.5430637001991272,
+ "learning_rate": 0.00016163220819363628,
+ "loss": 1.2555,
+ "step": 865
+ },
+ {
+ "epoch": 0.8662436310212247,
+ "grad_norm": 0.5266844034194946,
+ "learning_rate": 0.00016154962560047643,
+ "loss": 1.3743,
+ "step": 866
+ },
+ {
+ "epoch": 0.8672439123503486,
+ "grad_norm": 0.5201333165168762,
+ "learning_rate": 0.00016146697537540924,
+ "loss": 1.3959,
+ "step": 867
+ },
+ {
+ "epoch": 0.8682441936794724,
+ "grad_norm": 0.44362199306488037,
+ "learning_rate": 0.0001613842576092524,
+ "loss": 1.2661,
+ "step": 868
+ },
+ {
+ "epoch": 0.8692444750085961,
+ "grad_norm": 0.5465226769447327,
+ "learning_rate": 0.00016130147239289778,
+ "loss": 1.3688,
+ "step": 869
+ },
+ {
+ "epoch": 0.87024475633772,
+ "grad_norm": 0.5353460907936096,
+ "learning_rate": 0.00016121861981731135,
+ "loss": 1.2327,
+ "step": 870
+ },
+ {
+ "epoch": 0.8712450376668438,
+ "grad_norm": 0.5463739633560181,
+ "learning_rate": 0.00016113569997353312,
+ "loss": 1.2994,
+ "step": 871
+ },
+ {
+ "epoch": 0.8722453189959676,
+ "grad_norm": 0.5219647288322449,
+ "learning_rate": 0.000161052712952677,
+ "loss": 1.3916,
+ "step": 872
+ },
+ {
+ "epoch": 0.8732456003250915,
+ "grad_norm": 0.4675636887550354,
+ "learning_rate": 0.0001609696588459307,
+ "loss": 1.2786,
+ "step": 873
+ },
+ {
+ "epoch": 0.8742458816542152,
+ "grad_norm": 0.48863986134529114,
+ "learning_rate": 0.00016088653774455568,
+ "loss": 1.1762,
+ "step": 874
+ },
+ {
+ "epoch": 0.875246162983339,
+ "grad_norm": 0.48759785294532776,
+ "learning_rate": 0.00016080334973988695,
+ "loss": 1.2107,
+ "step": 875
+ },
+ {
+ "epoch": 0.8762464443124629,
+ "grad_norm": 0.7353807687759399,
+ "learning_rate": 0.00016072009492333318,
+ "loss": 1.4855,
+ "step": 876
+ },
+ {
+ "epoch": 0.8772467256415867,
+ "grad_norm": 0.4878953993320465,
+ "learning_rate": 0.0001606367733863763,
+ "loss": 1.2343,
+ "step": 877
+ },
+ {
+ "epoch": 0.8782470069707106,
+ "grad_norm": 0.4764840304851532,
+ "learning_rate": 0.00016055338522057158,
+ "loss": 1.3159,
+ "step": 878
+ },
+ {
+ "epoch": 0.8792472882998343,
+ "grad_norm": 0.5289160013198853,
+ "learning_rate": 0.00016046993051754756,
+ "loss": 1.3298,
+ "step": 879
+ },
+ {
+ "epoch": 0.8802475696289581,
+ "grad_norm": 0.5421459078788757,
+ "learning_rate": 0.00016038640936900586,
+ "loss": 1.4081,
+ "step": 880
+ },
+ {
+ "epoch": 0.881247850958082,
+ "grad_norm": 0.5096681118011475,
+ "learning_rate": 0.00016030282186672116,
+ "loss": 1.2406,
+ "step": 881
+ },
+ {
+ "epoch": 0.8822481322872058,
+ "grad_norm": 0.5783627033233643,
+ "learning_rate": 0.00016021916810254097,
+ "loss": 1.3505,
+ "step": 882
+ },
+ {
+ "epoch": 0.8832484136163296,
+ "grad_norm": 0.5718142986297607,
+ "learning_rate": 0.00016013544816838565,
+ "loss": 1.4106,
+ "step": 883
+ },
+ {
+ "epoch": 0.8842486949454534,
+ "grad_norm": 0.551607072353363,
+ "learning_rate": 0.00016005166215624827,
+ "loss": 1.3474,
+ "step": 884
+ },
+ {
+ "epoch": 0.8852489762745772,
+ "grad_norm": 0.5464247465133667,
+ "learning_rate": 0.0001599678101581945,
+ "loss": 1.4443,
+ "step": 885
+ },
+ {
+ "epoch": 0.886249257603701,
+ "grad_norm": 0.5075456500053406,
+ "learning_rate": 0.00015988389226636253,
+ "loss": 1.4919,
+ "step": 886
+ },
+ {
+ "epoch": 0.8872495389328249,
+ "grad_norm": 0.48557186126708984,
+ "learning_rate": 0.00015979990857296295,
+ "loss": 1.4225,
+ "step": 887
+ },
+ {
+ "epoch": 0.8882498202619487,
+ "grad_norm": 0.5385611653327942,
+ "learning_rate": 0.00015971585917027862,
+ "loss": 1.2937,
+ "step": 888
+ },
+ {
+ "epoch": 0.8892501015910725,
+ "grad_norm": 0.6477749943733215,
+ "learning_rate": 0.00015963174415066468,
+ "loss": 1.5628,
+ "step": 889
+ },
+ {
+ "epoch": 0.8902503829201963,
+ "grad_norm": 0.6205973029136658,
+ "learning_rate": 0.0001595475636065483,
+ "loss": 1.4902,
+ "step": 890
+ },
+ {
+ "epoch": 0.8912506642493201,
+ "grad_norm": 0.45717301964759827,
+ "learning_rate": 0.00015946331763042867,
+ "loss": 1.1998,
+ "step": 891
+ },
+ {
+ "epoch": 0.892250945578444,
+ "grad_norm": 0.5279855132102966,
+ "learning_rate": 0.00015937900631487686,
+ "loss": 1.0668,
+ "step": 892
+ },
+ {
+ "epoch": 0.8932512269075678,
+ "grad_norm": 0.5207269787788391,
+ "learning_rate": 0.00015929462975253585,
+ "loss": 1.2774,
+ "step": 893
+ },
+ {
+ "epoch": 0.8942515082366915,
+ "grad_norm": 0.5200834274291992,
+ "learning_rate": 0.00015921018803612014,
+ "loss": 1.4316,
+ "step": 894
+ },
+ {
+ "epoch": 0.8952517895658154,
+ "grad_norm": 0.48317649960517883,
+ "learning_rate": 0.0001591256812584159,
+ "loss": 1.4101,
+ "step": 895
+ },
+ {
+ "epoch": 0.8962520708949392,
+ "grad_norm": 0.475483775138855,
+ "learning_rate": 0.00015904110951228082,
+ "loss": 1.2011,
+ "step": 896
+ },
+ {
+ "epoch": 0.897252352224063,
+ "grad_norm": 0.6542660593986511,
+ "learning_rate": 0.00015895647289064396,
+ "loss": 1.56,
+ "step": 897
+ },
+ {
+ "epoch": 0.8982526335531869,
+ "grad_norm": 0.5154829621315002,
+ "learning_rate": 0.00015887177148650564,
+ "loss": 1.3748,
+ "step": 898
+ },
+ {
+ "epoch": 0.8992529148823106,
+ "grad_norm": 0.5744799375534058,
+ "learning_rate": 0.0001587870053929374,
+ "loss": 1.4072,
+ "step": 899
+ },
+ {
+ "epoch": 0.9002531962114345,
+ "grad_norm": 0.4835909307003021,
+ "learning_rate": 0.00015870217470308188,
+ "loss": 1.3037,
+ "step": 900
+ },
+ {
+ "epoch": 0.9012534775405583,
+ "grad_norm": 0.5292366743087769,
+ "learning_rate": 0.0001586172795101526,
+ "loss": 1.2395,
+ "step": 901
+ },
+ {
+ "epoch": 0.9022537588696821,
+ "grad_norm": 0.5905430912971497,
+ "learning_rate": 0.00015853231990743406,
+ "loss": 1.29,
+ "step": 902
+ },
+ {
+ "epoch": 0.903254040198806,
+ "grad_norm": 0.4918007254600525,
+ "learning_rate": 0.0001584472959882815,
+ "loss": 1.2593,
+ "step": 903
+ },
+ {
+ "epoch": 0.9042543215279297,
+ "grad_norm": 0.4735652208328247,
+ "learning_rate": 0.00015836220784612085,
+ "loss": 1.1669,
+ "step": 904
+ },
+ {
+ "epoch": 0.9052546028570535,
+ "grad_norm": 0.6272550821304321,
+ "learning_rate": 0.00015827705557444852,
+ "loss": 1.3692,
+ "step": 905
+ },
+ {
+ "epoch": 0.9062548841861774,
+ "grad_norm": 0.5333564877510071,
+ "learning_rate": 0.00015819183926683153,
+ "loss": 1.3672,
+ "step": 906
+ },
+ {
+ "epoch": 0.9072551655153012,
+ "grad_norm": 0.44029948115348816,
+ "learning_rate": 0.00015810655901690715,
+ "loss": 1.2124,
+ "step": 907
+ },
+ {
+ "epoch": 0.9082554468444249,
+ "grad_norm": 0.5636379718780518,
+ "learning_rate": 0.00015802121491838297,
+ "loss": 1.3507,
+ "step": 908
+ },
+ {
+ "epoch": 0.9092557281735488,
+ "grad_norm": 0.4394778907299042,
+ "learning_rate": 0.0001579358070650367,
+ "loss": 1.3159,
+ "step": 909
+ },
+ {
+ "epoch": 0.9102560095026726,
+ "grad_norm": 0.5382723212242126,
+ "learning_rate": 0.00015785033555071616,
+ "loss": 1.3733,
+ "step": 910
+ },
+ {
+ "epoch": 0.9112562908317965,
+ "grad_norm": 0.5251659750938416,
+ "learning_rate": 0.00015776480046933905,
+ "loss": 1.2253,
+ "step": 911
+ },
+ {
+ "epoch": 0.9122565721609203,
+ "grad_norm": 0.4791383743286133,
+ "learning_rate": 0.000157679201914893,
+ "loss": 1.2341,
+ "step": 912
+ },
+ {
+ "epoch": 0.913256853490044,
+ "grad_norm": 0.5058613419532776,
+ "learning_rate": 0.00015759353998143528,
+ "loss": 1.2717,
+ "step": 913
+ },
+ {
+ "epoch": 0.9142571348191679,
+ "grad_norm": 0.46837320923805237,
+ "learning_rate": 0.00015750781476309288,
+ "loss": 1.2484,
+ "step": 914
+ },
+ {
+ "epoch": 0.9152574161482917,
+ "grad_norm": 0.524444580078125,
+ "learning_rate": 0.00015742202635406235,
+ "loss": 1.5512,
+ "step": 915
+ },
+ {
+ "epoch": 0.9162576974774155,
+ "grad_norm": 0.6169744729995728,
+ "learning_rate": 0.00015733617484860963,
+ "loss": 1.271,
+ "step": 916
+ },
+ {
+ "epoch": 0.9172579788065394,
+ "grad_norm": 0.48883670568466187,
+ "learning_rate": 0.00015725026034106996,
+ "loss": 1.4779,
+ "step": 917
+ },
+ {
+ "epoch": 0.9182582601356631,
+ "grad_norm": 0.5408645272254944,
+ "learning_rate": 0.00015716428292584787,
+ "loss": 1.3574,
+ "step": 918
+ },
+ {
+ "epoch": 0.919258541464787,
+ "grad_norm": 0.5622221231460571,
+ "learning_rate": 0.00015707824269741702,
+ "loss": 1.2146,
+ "step": 919
+ },
+ {
+ "epoch": 0.9202588227939108,
+ "grad_norm": 0.477328896522522,
+ "learning_rate": 0.00015699213975031996,
+ "loss": 1.162,
+ "step": 920
+ },
+ {
+ "epoch": 0.9212591041230346,
+ "grad_norm": 0.503027081489563,
+ "learning_rate": 0.0001569059741791684,
+ "loss": 1.1674,
+ "step": 921
+ },
+ {
+ "epoch": 0.9222593854521585,
+ "grad_norm": 0.5951637625694275,
+ "learning_rate": 0.0001568197460786426,
+ "loss": 1.3737,
+ "step": 922
+ },
+ {
+ "epoch": 0.9232596667812822,
+ "grad_norm": 0.5276626348495483,
+ "learning_rate": 0.0001567334555434917,
+ "loss": 1.2494,
+ "step": 923
+ },
+ {
+ "epoch": 0.924259948110406,
+ "grad_norm": 0.6354761123657227,
+ "learning_rate": 0.0001566471026685334,
+ "loss": 1.2052,
+ "step": 924
+ },
+ {
+ "epoch": 0.9252602294395299,
+ "grad_norm": 0.4227287471294403,
+ "learning_rate": 0.00015656068754865387,
+ "loss": 1.1446,
+ "step": 925
+ },
+ {
+ "epoch": 0.9262605107686537,
+ "grad_norm": 0.5290839076042175,
+ "learning_rate": 0.00015647421027880772,
+ "loss": 1.2057,
+ "step": 926
+ },
+ {
+ "epoch": 0.9272607920977775,
+ "grad_norm": 0.4961225986480713,
+ "learning_rate": 0.0001563876709540178,
+ "loss": 1.2788,
+ "step": 927
+ },
+ {
+ "epoch": 0.9282610734269013,
+ "grad_norm": 0.5095213651657104,
+ "learning_rate": 0.0001563010696693752,
+ "loss": 1.2751,
+ "step": 928
+ },
+ {
+ "epoch": 0.9292613547560251,
+ "grad_norm": 0.5027223825454712,
+ "learning_rate": 0.00015621440652003907,
+ "loss": 1.3653,
+ "step": 929
+ },
+ {
+ "epoch": 0.930261636085149,
+ "grad_norm": 0.49251896142959595,
+ "learning_rate": 0.00015612768160123652,
+ "loss": 1.1556,
+ "step": 930
+ },
+ {
+ "epoch": 0.9312619174142728,
+ "grad_norm": 0.5187139511108398,
+ "learning_rate": 0.00015604089500826257,
+ "loss": 1.3623,
+ "step": 931
+ },
+ {
+ "epoch": 0.9322621987433966,
+ "grad_norm": 0.5004428029060364,
+ "learning_rate": 0.00015595404683648,
+ "loss": 1.185,
+ "step": 932
+ },
+ {
+ "epoch": 0.9332624800725204,
+ "grad_norm": 0.5750531554222107,
+ "learning_rate": 0.00015586713718131922,
+ "loss": 1.2999,
+ "step": 933
+ },
+ {
+ "epoch": 0.9342627614016442,
+ "grad_norm": 0.482732355594635,
+ "learning_rate": 0.0001557801661382782,
+ "loss": 1.2635,
+ "step": 934
+ },
+ {
+ "epoch": 0.935263042730768,
+ "grad_norm": 0.47854143381118774,
+ "learning_rate": 0.00015569313380292248,
+ "loss": 1.2833,
+ "step": 935
+ },
+ {
+ "epoch": 0.9362633240598919,
+ "grad_norm": 0.49382665753364563,
+ "learning_rate": 0.00015560604027088477,
+ "loss": 1.2327,
+ "step": 936
+ },
+ {
+ "epoch": 0.9372636053890157,
+ "grad_norm": 0.5009885430335999,
+ "learning_rate": 0.00015551888563786515,
+ "loss": 1.2967,
+ "step": 937
+ },
+ {
+ "epoch": 0.9382638867181394,
+ "grad_norm": 0.5012707114219666,
+ "learning_rate": 0.00015543166999963076,
+ "loss": 1.3231,
+ "step": 938
+ },
+ {
+ "epoch": 0.9392641680472633,
+ "grad_norm": 0.6908506751060486,
+ "learning_rate": 0.0001553443934520159,
+ "loss": 1.4055,
+ "step": 939
+ },
+ {
+ "epoch": 0.9402644493763871,
+ "grad_norm": 0.7104817032814026,
+ "learning_rate": 0.00015525705609092157,
+ "loss": 1.3435,
+ "step": 940
+ },
+ {
+ "epoch": 0.941264730705511,
+ "grad_norm": 0.49263522028923035,
+ "learning_rate": 0.00015516965801231586,
+ "loss": 1.2259,
+ "step": 941
+ },
+ {
+ "epoch": 0.9422650120346348,
+ "grad_norm": 0.5337693691253662,
+ "learning_rate": 0.0001550821993122334,
+ "loss": 1.2863,
+ "step": 942
+ },
+ {
+ "epoch": 0.9432652933637585,
+ "grad_norm": 0.5506749153137207,
+ "learning_rate": 0.0001549946800867755,
+ "loss": 1.4061,
+ "step": 943
+ },
+ {
+ "epoch": 0.9442655746928824,
+ "grad_norm": 0.5121364593505859,
+ "learning_rate": 0.00015490710043210997,
+ "loss": 1.3567,
+ "step": 944
+ },
+ {
+ "epoch": 0.9452658560220062,
+ "grad_norm": 0.5326678156852722,
+ "learning_rate": 0.00015481946044447099,
+ "loss": 1.2719,
+ "step": 945
+ },
+ {
+ "epoch": 0.94626613735113,
+ "grad_norm": 0.6023722290992737,
+ "learning_rate": 0.00015473176022015906,
+ "loss": 1.1512,
+ "step": 946
+ },
+ {
+ "epoch": 0.9472664186802539,
+ "grad_norm": 0.4953387975692749,
+ "learning_rate": 0.0001546439998555409,
+ "loss": 1.556,
+ "step": 947
+ },
+ {
+ "epoch": 0.9482667000093776,
+ "grad_norm": 0.5187799334526062,
+ "learning_rate": 0.0001545561794470492,
+ "loss": 1.279,
+ "step": 948
+ },
+ {
+ "epoch": 0.9492669813385014,
+ "grad_norm": 0.5788894295692444,
+ "learning_rate": 0.00015446829909118275,
+ "loss": 1.3246,
+ "step": 949
+ },
+ {
+ "epoch": 0.9502672626676253,
+ "grad_norm": 0.5551681518554688,
+ "learning_rate": 0.00015438035888450623,
+ "loss": 1.2231,
+ "step": 950
+ },
+ {
+ "epoch": 0.9512675439967491,
+ "grad_norm": 0.4898390471935272,
+ "learning_rate": 0.00015429235892364994,
+ "loss": 1.2036,
+ "step": 951
+ },
+ {
+ "epoch": 0.952267825325873,
+ "grad_norm": 0.5427507162094116,
+ "learning_rate": 0.00015420429930530996,
+ "loss": 1.3614,
+ "step": 952
+ },
+ {
+ "epoch": 0.9532681066549967,
+ "grad_norm": 0.557054340839386,
+ "learning_rate": 0.00015411618012624786,
+ "loss": 1.4249,
+ "step": 953
+ },
+ {
+ "epoch": 0.9542683879841205,
+ "grad_norm": 0.5793543457984924,
+ "learning_rate": 0.00015402800148329071,
+ "loss": 1.4341,
+ "step": 954
+ },
+ {
+ "epoch": 0.9552686693132444,
+ "grad_norm": 0.5993456244468689,
+ "learning_rate": 0.00015393976347333088,
+ "loss": 1.0259,
+ "step": 955
+ },
+ {
+ "epoch": 0.9562689506423682,
+ "grad_norm": 0.554904580116272,
+ "learning_rate": 0.00015385146619332596,
+ "loss": 1.3558,
+ "step": 956
+ },
+ {
+ "epoch": 0.9572692319714919,
+ "grad_norm": 0.5488478541374207,
+ "learning_rate": 0.00015376310974029873,
+ "loss": 1.358,
+ "step": 957
+ },
+ {
+ "epoch": 0.9582695133006158,
+ "grad_norm": 0.5108879208564758,
+ "learning_rate": 0.00015367469421133695,
+ "loss": 1.3865,
+ "step": 958
+ },
+ {
+ "epoch": 0.9592697946297396,
+ "grad_norm": 0.4606814682483673,
+ "learning_rate": 0.00015358621970359325,
+ "loss": 1.2055,
+ "step": 959
+ },
+ {
+ "epoch": 0.9602700759588634,
+ "grad_norm": 0.4974004328250885,
+ "learning_rate": 0.00015349768631428519,
+ "loss": 1.2541,
+ "step": 960
+ },
+ {
+ "epoch": 0.9612703572879873,
+ "grad_norm": 0.5107241272926331,
+ "learning_rate": 0.00015340909414069488,
+ "loss": 1.1624,
+ "step": 961
+ },
+ {
+ "epoch": 0.962270638617111,
+ "grad_norm": 0.5587212443351746,
+ "learning_rate": 0.00015332044328016914,
+ "loss": 1.349,
+ "step": 962
+ },
+ {
+ "epoch": 0.9632709199462349,
+ "grad_norm": 0.5209497809410095,
+ "learning_rate": 0.0001532317338301192,
+ "loss": 1.3695,
+ "step": 963
+ },
+ {
+ "epoch": 0.9642712012753587,
+ "grad_norm": 0.4985620677471161,
+ "learning_rate": 0.00015314296588802076,
+ "loss": 1.4597,
+ "step": 964
+ },
+ {
+ "epoch": 0.9652714826044825,
+ "grad_norm": 0.5065100789070129,
+ "learning_rate": 0.00015305413955141365,
+ "loss": 1.4225,
+ "step": 965
+ },
+ {
+ "epoch": 0.9662717639336064,
+ "grad_norm": 0.5079792737960815,
+ "learning_rate": 0.00015296525491790205,
+ "loss": 1.057,
+ "step": 966
+ },
+ {
+ "epoch": 0.9672720452627301,
+ "grad_norm": 0.4673600196838379,
+ "learning_rate": 0.00015287631208515406,
+ "loss": 1.2531,
+ "step": 967
+ },
+ {
+ "epoch": 0.9682723265918539,
+ "grad_norm": 0.5309945344924927,
+ "learning_rate": 0.00015278731115090171,
+ "loss": 1.374,
+ "step": 968
+ },
+ {
+ "epoch": 0.9692726079209778,
+ "grad_norm": 0.4792092442512512,
+ "learning_rate": 0.00015269825221294098,
+ "loss": 1.3018,
+ "step": 969
+ },
+ {
+ "epoch": 0.9702728892501016,
+ "grad_norm": 0.5222868323326111,
+ "learning_rate": 0.00015260913536913154,
+ "loss": 1.4063,
+ "step": 970
+ },
+ {
+ "epoch": 0.9712731705792254,
+ "grad_norm": 0.5373417139053345,
+ "learning_rate": 0.00015251996071739664,
+ "loss": 1.2183,
+ "step": 971
+ },
+ {
+ "epoch": 0.9722734519083492,
+ "grad_norm": 0.5624721050262451,
+ "learning_rate": 0.00015243072835572318,
+ "loss": 1.2696,
+ "step": 972
+ },
+ {
+ "epoch": 0.973273733237473,
+ "grad_norm": 0.46938082575798035,
+ "learning_rate": 0.0001523414383821613,
+ "loss": 1.3544,
+ "step": 973
+ },
+ {
+ "epoch": 0.9742740145665969,
+ "grad_norm": 0.45348694920539856,
+ "learning_rate": 0.00015225209089482462,
+ "loss": 1.2078,
+ "step": 974
+ },
+ {
+ "epoch": 0.9752742958957207,
+ "grad_norm": 0.48000606894493103,
+ "learning_rate": 0.0001521626859918898,
+ "loss": 1.1914,
+ "step": 975
+ },
+ {
+ "epoch": 0.9762745772248445,
+ "grad_norm": 0.5106796622276306,
+ "learning_rate": 0.00015207322377159668,
+ "loss": 1.3249,
+ "step": 976
+ },
+ {
+ "epoch": 0.9772748585539683,
+ "grad_norm": 0.49865373969078064,
+ "learning_rate": 0.00015198370433224805,
+ "loss": 1.2876,
+ "step": 977
+ },
+ {
+ "epoch": 0.9782751398830921,
+ "grad_norm": 0.5271755456924438,
+ "learning_rate": 0.00015189412777220958,
+ "loss": 1.3049,
+ "step": 978
+ },
+ {
+ "epoch": 0.9792754212122159,
+ "grad_norm": 0.49824708700180054,
+ "learning_rate": 0.00015180449418990976,
+ "loss": 1.1614,
+ "step": 979
+ },
+ {
+ "epoch": 0.9802757025413398,
+ "grad_norm": 0.7327549457550049,
+ "learning_rate": 0.00015171480368383964,
+ "loss": 1.2923,
+ "step": 980
+ },
+ {
+ "epoch": 0.9812759838704636,
+ "grad_norm": 0.5170425176620483,
+ "learning_rate": 0.00015162505635255287,
+ "loss": 1.3097,
+ "step": 981
+ },
+ {
+ "epoch": 0.9822762651995874,
+ "grad_norm": 0.47041526436805725,
+ "learning_rate": 0.00015153525229466555,
+ "loss": 1.3508,
+ "step": 982
+ },
+ {
+ "epoch": 0.9832765465287112,
+ "grad_norm": 0.4670693278312683,
+ "learning_rate": 0.00015144539160885613,
+ "loss": 1.3974,
+ "step": 983
+ },
+ {
+ "epoch": 0.984276827857835,
+ "grad_norm": 0.5745754837989807,
+ "learning_rate": 0.00015135547439386516,
+ "loss": 1.2977,
+ "step": 984
+ },
+ {
+ "epoch": 0.9852771091869589,
+ "grad_norm": 0.5845474004745483,
+ "learning_rate": 0.0001512655007484955,
+ "loss": 1.3384,
+ "step": 985
+ },
+ {
+ "epoch": 0.9862773905160827,
+ "grad_norm": 0.5627439618110657,
+ "learning_rate": 0.00015117547077161185,
+ "loss": 1.1756,
+ "step": 986
+ },
+ {
+ "epoch": 0.9872776718452064,
+ "grad_norm": 0.6411226987838745,
+ "learning_rate": 0.0001510853845621409,
+ "loss": 1.3441,
+ "step": 987
+ },
+ {
+ "epoch": 0.9882779531743303,
+ "grad_norm": 0.545659601688385,
+ "learning_rate": 0.00015099524221907107,
+ "loss": 1.3766,
+ "step": 988
+ },
+ {
+ "epoch": 0.9892782345034541,
+ "grad_norm": 0.5058498382568359,
+ "learning_rate": 0.0001509050438414525,
+ "loss": 1.3171,
+ "step": 989
+ },
+ {
+ "epoch": 0.9902785158325779,
+ "grad_norm": 0.6247567534446716,
+ "learning_rate": 0.00015081478952839693,
+ "loss": 1.2141,
+ "step": 990
+ },
+ {
+ "epoch": 0.9912787971617018,
+ "grad_norm": 0.5492308139801025,
+ "learning_rate": 0.00015072447937907753,
+ "loss": 1.1626,
+ "step": 991
+ },
+ {
+ "epoch": 0.9922790784908255,
+ "grad_norm": 0.4795534908771515,
+ "learning_rate": 0.00015063411349272877,
+ "loss": 1.218,
+ "step": 992
+ },
+ {
+ "epoch": 0.9932793598199494,
+ "grad_norm": 0.5527793169021606,
+ "learning_rate": 0.00015054369196864644,
+ "loss": 1.3816,
+ "step": 993
+ },
+ {
+ "epoch": 0.9942796411490732,
+ "grad_norm": 0.5297475457191467,
+ "learning_rate": 0.00015045321490618748,
+ "loss": 1.2515,
+ "step": 994
+ },
+ {
+ "epoch": 0.995279922478197,
+ "grad_norm": 0.518803596496582,
+ "learning_rate": 0.00015036268240476978,
+ "loss": 1.3631,
+ "step": 995
+ },
+ {
+ "epoch": 0.9962802038073209,
+ "grad_norm": 0.47196391224861145,
+ "learning_rate": 0.00015027209456387218,
+ "loss": 1.0932,
+ "step": 996
+ },
+ {
+ "epoch": 0.9972804851364446,
+ "grad_norm": 0.5369086861610413,
+ "learning_rate": 0.00015018145148303438,
+ "loss": 1.1181,
+ "step": 997
+ },
+ {
+ "epoch": 0.9982807664655684,
+ "grad_norm": 0.5940788388252258,
+ "learning_rate": 0.00015009075326185667,
+ "loss": 1.561,
+ "step": 998
+ },
+ {
+ "epoch": 0.9992810477946923,
+ "grad_norm": 0.5340734124183655,
+ "learning_rate": 0.00015000000000000001,
+ "loss": 1.2909,
+ "step": 999
+ },
+ {
+ "epoch": 1.0002813291238162,
+ "grad_norm": 0.5133704543113708,
+ "learning_rate": 0.00014990919179718584,
+ "loss": 1.0441,
+ "step": 1000
+ }
+ ],
+ "logging_steps": 1,
+ "max_steps": 2997,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 3,
+ "save_steps": 500,
+ "stateful_callbacks": {
+ "TrainerControl": {
+ "args": {
+ "should_epoch_stop": false,
+ "should_evaluate": false,
+ "should_log": false,
+ "should_save": true,
+ "should_training_stop": false
+ },
+ "attributes": {}
+ }
+ },
+ "total_flos": 6.479774965540454e+16,
+ "train_batch_size": 2,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/checkpoint-1000/training_args.bin b/checkpoint-1000/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..2f5db54c03213459099040d14f85829b6aeb0666
--- /dev/null
+++ b/checkpoint-1000/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7cb43d78443117126e44061cb7a0c1f9a5c40f27f7bf1d5cd0232587a4334407
+size 5304
diff --git a/checkpoint-1500/config.json b/checkpoint-1500/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..06ec1df58f28234ccce22a5325e108ece94f0078
--- /dev/null
+++ b/checkpoint-1500/config.json
@@ -0,0 +1,34 @@
+{
+ "_name_or_path": "facebook/nllb-200-3.3B",
+ "activation_dropout": 0.0,
+ "activation_function": "relu",
+ "architectures": [
+ "M2M100ForConditionalGeneration"
+ ],
+ "attention_dropout": 0.1,
+ "bos_token_id": 0,
+ "d_model": 2048,
+ "decoder_attention_heads": 16,
+ "decoder_ffn_dim": 8192,
+ "decoder_layerdrop": 0,
+ "decoder_layers": 24,
+ "decoder_start_token_id": 2,
+ "dropout": 0.1,
+ "encoder_attention_heads": 16,
+ "encoder_ffn_dim": 8192,
+ "encoder_layerdrop": 0,
+ "encoder_layers": 24,
+ "eos_token_id": 2,
+ "init_std": 0.02,
+ "is_encoder_decoder": true,
+ "max_length": 200,
+ "max_position_embeddings": 1024,
+ "model_type": "m2m_100",
+ "num_hidden_layers": 24,
+ "pad_token_id": 1,
+ "scale_embedding": true,
+ "torch_dtype": "float32",
+ "transformers_version": "4.43.2",
+ "use_cache": true,
+ "vocab_size": 256206
+}
diff --git a/checkpoint-1500/generation_config.json b/checkpoint-1500/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..402a1a43d1af8c080466b8139184b4e5b7f3f47c
--- /dev/null
+++ b/checkpoint-1500/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 0,
+ "decoder_start_token_id": 2,
+ "eos_token_id": 2,
+ "max_length": 200,
+ "pad_token_id": 1,
+ "transformers_version": "4.43.2"
+}
diff --git a/checkpoint-1500/model-00001-of-00003.safetensors b/checkpoint-1500/model-00001-of-00003.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..12881085d6a64ae2320485b9c9512ddabb832717
--- /dev/null
+++ b/checkpoint-1500/model-00001-of-00003.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:524f1b2a3d487121b35e9413666471718655037ef32ea66ee7662de1dfe671a3
+size 4986088344
diff --git a/checkpoint-1500/model-00002-of-00003.safetensors b/checkpoint-1500/model-00002-of-00003.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..a88f6a6869876a3e23a466ac7480e58caab2d71b
--- /dev/null
+++ b/checkpoint-1500/model-00002-of-00003.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:61ed51810042b9ef0969b82467f35f3cea88f9d712136b7f22006b26a7f8d46e
+size 4985688360
diff --git a/checkpoint-1500/model-00003-of-00003.safetensors b/checkpoint-1500/model-00003-of-00003.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..2a257b4309716de040f6166f281d87150f460ee6
--- /dev/null
+++ b/checkpoint-1500/model-00003-of-00003.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:96c1468c8b1715d253c5a261e5cc744e06ed0c654f8bfaad28981b1349095126
+size 3407796744
diff --git a/checkpoint-1500/model.safetensors.index.json b/checkpoint-1500/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..66f5db79b23230955de24502c00adc6525edbdfc
--- /dev/null
+++ b/checkpoint-1500/model.safetensors.index.json
@@ -0,0 +1,1020 @@
+{
+ "metadata": {
+ "total_size": 13379452928
+ },
+ "weight_map": {
+ "model.decoder.layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.0.encoder_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.encoder_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.encoder_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.encoder_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.encoder_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.encoder_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.encoder_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.encoder_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.encoder_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.encoder_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.encoder_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.encoder_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.encoder_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.encoder_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.encoder_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.encoder_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.encoder_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.encoder_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.encoder_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.encoder_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.encoder_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.encoder_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.encoder_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.encoder_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.encoder_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.encoder_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.encoder_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.encoder_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.encoder_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.encoder_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.11.encoder_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.11.encoder_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.11.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.11.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.11.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.11.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.11.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.11.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.11.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.11.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.11.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.11.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.12.encoder_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.encoder_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.self_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.self_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.self_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.self_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.encoder_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.encoder_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.self_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.self_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.self_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.self_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.encoder_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.encoder_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.self_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.self_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.self_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.self_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.encoder_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.encoder_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.self_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.self_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.self_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.self_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.encoder_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.encoder_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.self_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.self_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.self_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.self_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.encoder_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.encoder_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.self_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.self_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.self_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.self_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.encoder_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.encoder_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.self_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.self_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.self_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.self_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.encoder_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.encoder_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.self_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.self_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.self_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.self_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.2.encoder_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.encoder_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.encoder_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.encoder_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.encoder_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.encoder_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.encoder_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.encoder_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.encoder_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.encoder_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.20.encoder_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.encoder_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.self_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.self_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.self_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.self_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.encoder_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.encoder_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.self_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.self_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.self_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.self_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.encoder_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.encoder_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.self_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.self_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.self_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.self_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.encoder_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.encoder_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.self_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.self_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.self_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.self_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.3.encoder_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.encoder_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.encoder_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.encoder_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.encoder_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.encoder_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.encoder_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.encoder_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.encoder_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.encoder_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.encoder_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.encoder_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.encoder_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.encoder_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.encoder_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.encoder_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.encoder_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.encoder_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.encoder_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.encoder_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.encoder_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.encoder_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.encoder_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.encoder_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.encoder_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.encoder_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.encoder_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.encoder_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.encoder_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.encoder_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.encoder_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.encoder_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.encoder_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.encoder_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.encoder_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.encoder_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.encoder_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.encoder_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.encoder_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.encoder_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.encoder_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.encoder_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.encoder_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.encoder_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.encoder_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.encoder_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.encoder_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.encoder_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.encoder_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.encoder_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.encoder_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.encoder_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.encoder_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.encoder_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.encoder_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.encoder_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.encoder_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.encoder_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.encoder_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.encoder_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.encoder_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.encoder_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.encoder_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.encoder_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.encoder_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.encoder_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.encoder_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.encoder_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.encoder_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.encoder_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.0.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.14.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.14.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.14.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.14.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.14.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.14.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.14.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.14.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.14.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.14.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.14.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.14.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.14.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.14.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.14.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.14.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.15.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.2.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.20.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.3.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.shared.weight": "model-00001-of-00003.safetensors"
+ }
+}
diff --git a/checkpoint-1500/optimizer.pt b/checkpoint-1500/optimizer.pt
new file mode 100644
index 0000000000000000000000000000000000000000..88cbba26c5a05296c0bca72855e9e3bd9cb76677
--- /dev/null
+++ b/checkpoint-1500/optimizer.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ffc2d4bb97b961a20e7140ceae840fbc600e40e460ed74602abc561298dbd2eb
+size 16695613
diff --git a/checkpoint-1500/rng_state.pth b/checkpoint-1500/rng_state.pth
new file mode 100644
index 0000000000000000000000000000000000000000..20012bcd8228e364443b82357ac00f5d6f845c81
--- /dev/null
+++ b/checkpoint-1500/rng_state.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cae694e4dc4a229e80ef6dc1dd9a1cb2d63d2baac73d6b43e28fd9f1ae739fa7
+size 14244
diff --git a/checkpoint-1500/scheduler.pt b/checkpoint-1500/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..2520c858524e632a2e79a9782d61ec95a50c147e
--- /dev/null
+++ b/checkpoint-1500/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a014688320d66d9abcc1740699cc308c75b18610ef38f5975f168109cf9e7822
+size 1064
diff --git a/checkpoint-1500/sentencepiece.bpe.model b/checkpoint-1500/sentencepiece.bpe.model
new file mode 100644
index 0000000000000000000000000000000000000000..dc2262d3e1d375b235eb71c24119c8e73f85d4ad
--- /dev/null
+++ b/checkpoint-1500/sentencepiece.bpe.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:14bb8dfb35c0ffdea7bc01e56cea38b9e3d5efcdcb9c251d6b40538e1aab555a
+size 4852054
diff --git a/checkpoint-1500/special_tokens_map.json b/checkpoint-1500/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..770c6f4e25faf27bbc3878b806f2ecfb88c5169e
--- /dev/null
+++ b/checkpoint-1500/special_tokens_map.json
@@ -0,0 +1,255 @@
+{
+ "additional_special_tokens": [
+ "ace_Arab",
+ "ace_Latn",
+ "acm_Arab",
+ "acq_Arab",
+ "aeb_Arab",
+ "afr_Latn",
+ "ajp_Arab",
+ "aka_Latn",
+ "amh_Ethi",
+ "apc_Arab",
+ "arb_Arab",
+ "ars_Arab",
+ "ary_Arab",
+ "arz_Arab",
+ "asm_Beng",
+ "ast_Latn",
+ "awa_Deva",
+ "ayr_Latn",
+ "azb_Arab",
+ "azj_Latn",
+ "bak_Cyrl",
+ "bam_Latn",
+ "ban_Latn",
+ "bel_Cyrl",
+ "bem_Latn",
+ "ben_Beng",
+ "bho_Deva",
+ "bjn_Arab",
+ "bjn_Latn",
+ "bod_Tibt",
+ "bos_Latn",
+ "bug_Latn",
+ "bul_Cyrl",
+ "cat_Latn",
+ "ceb_Latn",
+ "ces_Latn",
+ "cjk_Latn",
+ "ckb_Arab",
+ "crh_Latn",
+ "cym_Latn",
+ "dan_Latn",
+ "deu_Latn",
+ "dik_Latn",
+ "dyu_Latn",
+ "dzo_Tibt",
+ "ell_Grek",
+ "eng_Latn",
+ "epo_Latn",
+ "est_Latn",
+ "eus_Latn",
+ "ewe_Latn",
+ "fao_Latn",
+ "pes_Arab",
+ "fij_Latn",
+ "fin_Latn",
+ "fon_Latn",
+ "fra_Latn",
+ "fur_Latn",
+ "fuv_Latn",
+ "gla_Latn",
+ "gle_Latn",
+ "glg_Latn",
+ "grn_Latn",
+ "guj_Gujr",
+ "hat_Latn",
+ "hau_Latn",
+ "heb_Hebr",
+ "hin_Deva",
+ "hne_Deva",
+ "hrv_Latn",
+ "hun_Latn",
+ "hye_Armn",
+ "ibo_Latn",
+ "ilo_Latn",
+ "ind_Latn",
+ "isl_Latn",
+ "ita_Latn",
+ "jav_Latn",
+ "jpn_Jpan",
+ "kab_Latn",
+ "kac_Latn",
+ "kam_Latn",
+ "kan_Knda",
+ "kas_Arab",
+ "kas_Deva",
+ "kat_Geor",
+ "knc_Arab",
+ "knc_Latn",
+ "kaz_Cyrl",
+ "kbp_Latn",
+ "kea_Latn",
+ "khm_Khmr",
+ "kik_Latn",
+ "kin_Latn",
+ "kir_Cyrl",
+ "kmb_Latn",
+ "kon_Latn",
+ "kor_Hang",
+ "kmr_Latn",
+ "lao_Laoo",
+ "lvs_Latn",
+ "lij_Latn",
+ "lim_Latn",
+ "lin_Latn",
+ "lit_Latn",
+ "lmo_Latn",
+ "ltg_Latn",
+ "ltz_Latn",
+ "lua_Latn",
+ "lug_Latn",
+ "luo_Latn",
+ "lus_Latn",
+ "mag_Deva",
+ "mai_Deva",
+ "mal_Mlym",
+ "mar_Deva",
+ "min_Latn",
+ "mkd_Cyrl",
+ "plt_Latn",
+ "mlt_Latn",
+ "mni_Beng",
+ "khk_Cyrl",
+ "mos_Latn",
+ "mri_Latn",
+ "zsm_Latn",
+ "mya_Mymr",
+ "nld_Latn",
+ "nno_Latn",
+ "nob_Latn",
+ "npi_Deva",
+ "nso_Latn",
+ "nus_Latn",
+ "nya_Latn",
+ "oci_Latn",
+ "gaz_Latn",
+ "ory_Orya",
+ "pag_Latn",
+ "pan_Guru",
+ "pap_Latn",
+ "pol_Latn",
+ "por_Latn",
+ "prs_Arab",
+ "pbt_Arab",
+ "quy_Latn",
+ "ron_Latn",
+ "run_Latn",
+ "rus_Cyrl",
+ "sag_Latn",
+ "san_Deva",
+ "sat_Beng",
+ "scn_Latn",
+ "shn_Mymr",
+ "sin_Sinh",
+ "slk_Latn",
+ "slv_Latn",
+ "smo_Latn",
+ "sna_Latn",
+ "snd_Arab",
+ "som_Latn",
+ "sot_Latn",
+ "spa_Latn",
+ "als_Latn",
+ "srd_Latn",
+ "srp_Cyrl",
+ "ssw_Latn",
+ "sun_Latn",
+ "swe_Latn",
+ "swh_Latn",
+ "szl_Latn",
+ "tam_Taml",
+ "tat_Cyrl",
+ "tel_Telu",
+ "tgk_Cyrl",
+ "tgl_Latn",
+ "tha_Thai",
+ "tir_Ethi",
+ "taq_Latn",
+ "taq_Tfng",
+ "tpi_Latn",
+ "tsn_Latn",
+ "tso_Latn",
+ "tuk_Latn",
+ "tum_Latn",
+ "tur_Latn",
+ "twi_Latn",
+ "tzm_Tfng",
+ "uig_Arab",
+ "ukr_Cyrl",
+ "umb_Latn",
+ "urd_Arab",
+ "uzn_Latn",
+ "vec_Latn",
+ "vie_Latn",
+ "war_Latn",
+ "wol_Latn",
+ "xho_Latn",
+ "ydd_Hebr",
+ "yor_Latn",
+ "yue_Hant",
+ "zho_Hans",
+ "zho_Hant",
+ "zul_Latn"
+ ],
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "cls_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "mask_token": {
+ "content": "",
+ "lstrip": true,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "sep_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/checkpoint-1500/tokenizer.json b/checkpoint-1500/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..98050e98b98364c06d83b3f41864076220cb8408
--- /dev/null
+++ b/checkpoint-1500/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3b39b25b0763a1dd69dec54081fafcf10770d9f2538a3bd975a0c4be6d60a9c2
+size 17331294
diff --git a/checkpoint-1500/tokenizer_config.json b/checkpoint-1500/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f1424d3657c008568198b44be241646482e7e9f2
--- /dev/null
+++ b/checkpoint-1500/tokenizer_config.json
@@ -0,0 +1,1878 @@
+{
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "3": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256001": {
+ "content": "ace_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256002": {
+ "content": "ace_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256003": {
+ "content": "acm_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256004": {
+ "content": "acq_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256005": {
+ "content": "aeb_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256006": {
+ "content": "afr_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256007": {
+ "content": "ajp_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256008": {
+ "content": "aka_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256009": {
+ "content": "amh_Ethi",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256010": {
+ "content": "apc_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256011": {
+ "content": "arb_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256012": {
+ "content": "ars_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256013": {
+ "content": "ary_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256014": {
+ "content": "arz_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256015": {
+ "content": "asm_Beng",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256016": {
+ "content": "ast_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256017": {
+ "content": "awa_Deva",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256018": {
+ "content": "ayr_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256019": {
+ "content": "azb_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256020": {
+ "content": "azj_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256021": {
+ "content": "bak_Cyrl",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256022": {
+ "content": "bam_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256023": {
+ "content": "ban_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256024": {
+ "content": "bel_Cyrl",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256025": {
+ "content": "bem_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256026": {
+ "content": "ben_Beng",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256027": {
+ "content": "bho_Deva",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256028": {
+ "content": "bjn_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256029": {
+ "content": "bjn_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256030": {
+ "content": "bod_Tibt",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256031": {
+ "content": "bos_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256032": {
+ "content": "bug_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256033": {
+ "content": "bul_Cyrl",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256034": {
+ "content": "cat_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256035": {
+ "content": "ceb_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256036": {
+ "content": "ces_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256037": {
+ "content": "cjk_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256038": {
+ "content": "ckb_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256039": {
+ "content": "crh_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256040": {
+ "content": "cym_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256041": {
+ "content": "dan_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256042": {
+ "content": "deu_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256043": {
+ "content": "dik_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256044": {
+ "content": "dyu_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256045": {
+ "content": "dzo_Tibt",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256046": {
+ "content": "ell_Grek",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256047": {
+ "content": "eng_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256048": {
+ "content": "epo_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256049": {
+ "content": "est_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256050": {
+ "content": "eus_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256051": {
+ "content": "ewe_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256052": {
+ "content": "fao_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256053": {
+ "content": "pes_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256054": {
+ "content": "fij_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256055": {
+ "content": "fin_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256056": {
+ "content": "fon_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256057": {
+ "content": "fra_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256058": {
+ "content": "fur_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256059": {
+ "content": "fuv_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256060": {
+ "content": "gla_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256061": {
+ "content": "gle_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256062": {
+ "content": "glg_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256063": {
+ "content": "grn_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256064": {
+ "content": "guj_Gujr",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256065": {
+ "content": "hat_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256066": {
+ "content": "hau_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256067": {
+ "content": "heb_Hebr",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256068": {
+ "content": "hin_Deva",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256069": {
+ "content": "hne_Deva",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256070": {
+ "content": "hrv_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256071": {
+ "content": "hun_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256072": {
+ "content": "hye_Armn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256073": {
+ "content": "ibo_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256074": {
+ "content": "ilo_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256075": {
+ "content": "ind_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256076": {
+ "content": "isl_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256077": {
+ "content": "ita_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256078": {
+ "content": "jav_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256079": {
+ "content": "jpn_Jpan",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256080": {
+ "content": "kab_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256081": {
+ "content": "kac_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256082": {
+ "content": "kam_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256083": {
+ "content": "kan_Knda",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256084": {
+ "content": "kas_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256085": {
+ "content": "kas_Deva",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256086": {
+ "content": "kat_Geor",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256087": {
+ "content": "knc_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256088": {
+ "content": "knc_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256089": {
+ "content": "kaz_Cyrl",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256090": {
+ "content": "kbp_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256091": {
+ "content": "kea_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256092": {
+ "content": "khm_Khmr",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256093": {
+ "content": "kik_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256094": {
+ "content": "kin_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256095": {
+ "content": "kir_Cyrl",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256096": {
+ "content": "kmb_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256097": {
+ "content": "kon_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256098": {
+ "content": "kor_Hang",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256099": {
+ "content": "kmr_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256100": {
+ "content": "lao_Laoo",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256101": {
+ "content": "lvs_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256102": {
+ "content": "lij_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256103": {
+ "content": "lim_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256104": {
+ "content": "lin_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256105": {
+ "content": "lit_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256106": {
+ "content": "lmo_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256107": {
+ "content": "ltg_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256108": {
+ "content": "ltz_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256109": {
+ "content": "lua_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256110": {
+ "content": "lug_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256111": {
+ "content": "luo_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256112": {
+ "content": "lus_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256113": {
+ "content": "mag_Deva",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256114": {
+ "content": "mai_Deva",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256115": {
+ "content": "mal_Mlym",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256116": {
+ "content": "mar_Deva",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256117": {
+ "content": "min_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256118": {
+ "content": "mkd_Cyrl",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256119": {
+ "content": "plt_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256120": {
+ "content": "mlt_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256121": {
+ "content": "mni_Beng",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256122": {
+ "content": "khk_Cyrl",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256123": {
+ "content": "mos_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256124": {
+ "content": "mri_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256125": {
+ "content": "zsm_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256126": {
+ "content": "mya_Mymr",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256127": {
+ "content": "nld_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256128": {
+ "content": "nno_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256129": {
+ "content": "nob_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256130": {
+ "content": "npi_Deva",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256131": {
+ "content": "nso_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256132": {
+ "content": "nus_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256133": {
+ "content": "nya_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256134": {
+ "content": "oci_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256135": {
+ "content": "gaz_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256136": {
+ "content": "ory_Orya",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256137": {
+ "content": "pag_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256138": {
+ "content": "pan_Guru",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256139": {
+ "content": "pap_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256140": {
+ "content": "pol_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256141": {
+ "content": "por_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256142": {
+ "content": "prs_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256143": {
+ "content": "pbt_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256144": {
+ "content": "quy_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256145": {
+ "content": "ron_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256146": {
+ "content": "run_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256147": {
+ "content": "rus_Cyrl",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256148": {
+ "content": "sag_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256149": {
+ "content": "san_Deva",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256150": {
+ "content": "sat_Beng",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256151": {
+ "content": "scn_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256152": {
+ "content": "shn_Mymr",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256153": {
+ "content": "sin_Sinh",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256154": {
+ "content": "slk_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256155": {
+ "content": "slv_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256156": {
+ "content": "smo_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256157": {
+ "content": "sna_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256158": {
+ "content": "snd_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256159": {
+ "content": "som_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256160": {
+ "content": "sot_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256161": {
+ "content": "spa_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256162": {
+ "content": "als_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256163": {
+ "content": "srd_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256164": {
+ "content": "srp_Cyrl",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256165": {
+ "content": "ssw_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256166": {
+ "content": "sun_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256167": {
+ "content": "swe_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256168": {
+ "content": "swh_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256169": {
+ "content": "szl_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256170": {
+ "content": "tam_Taml",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256171": {
+ "content": "tat_Cyrl",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256172": {
+ "content": "tel_Telu",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256173": {
+ "content": "tgk_Cyrl",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256174": {
+ "content": "tgl_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256175": {
+ "content": "tha_Thai",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256176": {
+ "content": "tir_Ethi",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256177": {
+ "content": "taq_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256178": {
+ "content": "taq_Tfng",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256179": {
+ "content": "tpi_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256180": {
+ "content": "tsn_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256181": {
+ "content": "tso_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256182": {
+ "content": "tuk_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256183": {
+ "content": "tum_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256184": {
+ "content": "tur_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256185": {
+ "content": "twi_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256186": {
+ "content": "tzm_Tfng",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256187": {
+ "content": "uig_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256188": {
+ "content": "ukr_Cyrl",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256189": {
+ "content": "umb_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256190": {
+ "content": "urd_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256191": {
+ "content": "uzn_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256192": {
+ "content": "vec_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256193": {
+ "content": "vie_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256194": {
+ "content": "war_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256195": {
+ "content": "wol_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256196": {
+ "content": "xho_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256197": {
+ "content": "ydd_Hebr",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256198": {
+ "content": "yor_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256199": {
+ "content": "yue_Hant",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256200": {
+ "content": "zho_Hans",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256201": {
+ "content": "zho_Hant",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256202": {
+ "content": "zul_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256203": {
+ "content": "",
+ "lstrip": true,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "additional_special_tokens": [
+ "ace_Arab",
+ "ace_Latn",
+ "acm_Arab",
+ "acq_Arab",
+ "aeb_Arab",
+ "afr_Latn",
+ "ajp_Arab",
+ "aka_Latn",
+ "amh_Ethi",
+ "apc_Arab",
+ "arb_Arab",
+ "ars_Arab",
+ "ary_Arab",
+ "arz_Arab",
+ "asm_Beng",
+ "ast_Latn",
+ "awa_Deva",
+ "ayr_Latn",
+ "azb_Arab",
+ "azj_Latn",
+ "bak_Cyrl",
+ "bam_Latn",
+ "ban_Latn",
+ "bel_Cyrl",
+ "bem_Latn",
+ "ben_Beng",
+ "bho_Deva",
+ "bjn_Arab",
+ "bjn_Latn",
+ "bod_Tibt",
+ "bos_Latn",
+ "bug_Latn",
+ "bul_Cyrl",
+ "cat_Latn",
+ "ceb_Latn",
+ "ces_Latn",
+ "cjk_Latn",
+ "ckb_Arab",
+ "crh_Latn",
+ "cym_Latn",
+ "dan_Latn",
+ "deu_Latn",
+ "dik_Latn",
+ "dyu_Latn",
+ "dzo_Tibt",
+ "ell_Grek",
+ "eng_Latn",
+ "epo_Latn",
+ "est_Latn",
+ "eus_Latn",
+ "ewe_Latn",
+ "fao_Latn",
+ "pes_Arab",
+ "fij_Latn",
+ "fin_Latn",
+ "fon_Latn",
+ "fra_Latn",
+ "fur_Latn",
+ "fuv_Latn",
+ "gla_Latn",
+ "gle_Latn",
+ "glg_Latn",
+ "grn_Latn",
+ "guj_Gujr",
+ "hat_Latn",
+ "hau_Latn",
+ "heb_Hebr",
+ "hin_Deva",
+ "hne_Deva",
+ "hrv_Latn",
+ "hun_Latn",
+ "hye_Armn",
+ "ibo_Latn",
+ "ilo_Latn",
+ "ind_Latn",
+ "isl_Latn",
+ "ita_Latn",
+ "jav_Latn",
+ "jpn_Jpan",
+ "kab_Latn",
+ "kac_Latn",
+ "kam_Latn",
+ "kan_Knda",
+ "kas_Arab",
+ "kas_Deva",
+ "kat_Geor",
+ "knc_Arab",
+ "knc_Latn",
+ "kaz_Cyrl",
+ "kbp_Latn",
+ "kea_Latn",
+ "khm_Khmr",
+ "kik_Latn",
+ "kin_Latn",
+ "kir_Cyrl",
+ "kmb_Latn",
+ "kon_Latn",
+ "kor_Hang",
+ "kmr_Latn",
+ "lao_Laoo",
+ "lvs_Latn",
+ "lij_Latn",
+ "lim_Latn",
+ "lin_Latn",
+ "lit_Latn",
+ "lmo_Latn",
+ "ltg_Latn",
+ "ltz_Latn",
+ "lua_Latn",
+ "lug_Latn",
+ "luo_Latn",
+ "lus_Latn",
+ "mag_Deva",
+ "mai_Deva",
+ "mal_Mlym",
+ "mar_Deva",
+ "min_Latn",
+ "mkd_Cyrl",
+ "plt_Latn",
+ "mlt_Latn",
+ "mni_Beng",
+ "khk_Cyrl",
+ "mos_Latn",
+ "mri_Latn",
+ "zsm_Latn",
+ "mya_Mymr",
+ "nld_Latn",
+ "nno_Latn",
+ "nob_Latn",
+ "npi_Deva",
+ "nso_Latn",
+ "nus_Latn",
+ "nya_Latn",
+ "oci_Latn",
+ "gaz_Latn",
+ "ory_Orya",
+ "pag_Latn",
+ "pan_Guru",
+ "pap_Latn",
+ "pol_Latn",
+ "por_Latn",
+ "prs_Arab",
+ "pbt_Arab",
+ "quy_Latn",
+ "ron_Latn",
+ "run_Latn",
+ "rus_Cyrl",
+ "sag_Latn",
+ "san_Deva",
+ "sat_Beng",
+ "scn_Latn",
+ "shn_Mymr",
+ "sin_Sinh",
+ "slk_Latn",
+ "slv_Latn",
+ "smo_Latn",
+ "sna_Latn",
+ "snd_Arab",
+ "som_Latn",
+ "sot_Latn",
+ "spa_Latn",
+ "als_Latn",
+ "srd_Latn",
+ "srp_Cyrl",
+ "ssw_Latn",
+ "sun_Latn",
+ "swe_Latn",
+ "swh_Latn",
+ "szl_Latn",
+ "tam_Taml",
+ "tat_Cyrl",
+ "tel_Telu",
+ "tgk_Cyrl",
+ "tgl_Latn",
+ "tha_Thai",
+ "tir_Ethi",
+ "taq_Latn",
+ "taq_Tfng",
+ "tpi_Latn",
+ "tsn_Latn",
+ "tso_Latn",
+ "tuk_Latn",
+ "tum_Latn",
+ "tur_Latn",
+ "twi_Latn",
+ "tzm_Tfng",
+ "uig_Arab",
+ "ukr_Cyrl",
+ "umb_Latn",
+ "urd_Arab",
+ "uzn_Latn",
+ "vec_Latn",
+ "vie_Latn",
+ "war_Latn",
+ "wol_Latn",
+ "xho_Latn",
+ "ydd_Hebr",
+ "yor_Latn",
+ "yue_Hant",
+ "zho_Hans",
+ "zho_Hant",
+ "zul_Latn"
+ ],
+ "bos_token": "",
+ "clean_up_tokenization_spaces": true,
+ "cls_token": "",
+ "eos_token": "",
+ "legacy_behaviour": false,
+ "mask_token": "",
+ "model_max_length": 1024,
+ "pad_token": "",
+ "sep_token": "",
+ "sp_model_kwargs": {},
+ "src_lang": "eng_Latn",
+ "tgt_lang": null,
+ "tokenizer_class": "NllbTokenizer",
+ "unk_token": ""
+}
diff --git a/checkpoint-1500/trainer_state.json b/checkpoint-1500/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..14639bbf5339a1b183f2d7581f3c7370006275bc
--- /dev/null
+++ b/checkpoint-1500/trainer_state.json
@@ -0,0 +1,10533 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 1.5004219936857242,
+ "eval_steps": 500,
+ "global_step": 1500,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.001000281329123816,
+ "grad_norm": 5.902005195617676,
+ "learning_rate": 0.0001999999450590425,
+ "loss": 3.1875,
+ "step": 1
+ },
+ {
+ "epoch": 0.002000562658247632,
+ "grad_norm": 3.2577760219573975,
+ "learning_rate": 0.00019999978023623033,
+ "loss": 2.3666,
+ "step": 2
+ },
+ {
+ "epoch": 0.003000843987371448,
+ "grad_norm": 5.3700995445251465,
+ "learning_rate": 0.0001999995055317446,
+ "loss": 2.8282,
+ "step": 3
+ },
+ {
+ "epoch": 0.004001125316495264,
+ "grad_norm": 2.1445534229278564,
+ "learning_rate": 0.00019999912094588717,
+ "loss": 2.2322,
+ "step": 4
+ },
+ {
+ "epoch": 0.005001406645619081,
+ "grad_norm": 1.5143821239471436,
+ "learning_rate": 0.00019999862647908064,
+ "loss": 2.1709,
+ "step": 5
+ },
+ {
+ "epoch": 0.006001687974742896,
+ "grad_norm": 2.0491714477539062,
+ "learning_rate": 0.00019999802213186834,
+ "loss": 2.2863,
+ "step": 6
+ },
+ {
+ "epoch": 0.007001969303866712,
+ "grad_norm": 1.2016857862472534,
+ "learning_rate": 0.0001999973079049143,
+ "loss": 1.5595,
+ "step": 7
+ },
+ {
+ "epoch": 0.008002250632990529,
+ "grad_norm": 1.3860406875610352,
+ "learning_rate": 0.00019999648379900338,
+ "loss": 1.7264,
+ "step": 8
+ },
+ {
+ "epoch": 0.009002531962114344,
+ "grad_norm": 1.0861930847167969,
+ "learning_rate": 0.0001999955498150411,
+ "loss": 2.0533,
+ "step": 9
+ },
+ {
+ "epoch": 0.010002813291238161,
+ "grad_norm": 2.233243703842163,
+ "learning_rate": 0.00019999450595405374,
+ "loss": 1.9378,
+ "step": 10
+ },
+ {
+ "epoch": 0.011003094620361977,
+ "grad_norm": 1.302808165550232,
+ "learning_rate": 0.0001999933522171883,
+ "loss": 1.9182,
+ "step": 11
+ },
+ {
+ "epoch": 0.012003375949485792,
+ "grad_norm": 0.8285257816314697,
+ "learning_rate": 0.00019999208860571255,
+ "loss": 1.9146,
+ "step": 12
+ },
+ {
+ "epoch": 0.01300365727860961,
+ "grad_norm": 1.2248319387435913,
+ "learning_rate": 0.00019999071512101496,
+ "loss": 1.7467,
+ "step": 13
+ },
+ {
+ "epoch": 0.014003938607733425,
+ "grad_norm": 0.8307135105133057,
+ "learning_rate": 0.00019998923176460474,
+ "loss": 1.6896,
+ "step": 14
+ },
+ {
+ "epoch": 0.01500421993685724,
+ "grad_norm": 1.1531301736831665,
+ "learning_rate": 0.00019998763853811184,
+ "loss": 1.7549,
+ "step": 15
+ },
+ {
+ "epoch": 0.016004501265981057,
+ "grad_norm": 1.0071958303451538,
+ "learning_rate": 0.00019998593544328692,
+ "loss": 1.903,
+ "step": 16
+ },
+ {
+ "epoch": 0.017004782595104875,
+ "grad_norm": 0.9111937284469604,
+ "learning_rate": 0.00019998412248200138,
+ "loss": 1.8372,
+ "step": 17
+ },
+ {
+ "epoch": 0.01800506392422869,
+ "grad_norm": 0.9943836331367493,
+ "learning_rate": 0.00019998219965624734,
+ "loss": 1.7304,
+ "step": 18
+ },
+ {
+ "epoch": 0.019005345253352506,
+ "grad_norm": 0.8139007687568665,
+ "learning_rate": 0.0001999801669681376,
+ "loss": 1.6932,
+ "step": 19
+ },
+ {
+ "epoch": 0.020005626582476323,
+ "grad_norm": 0.7991273999214172,
+ "learning_rate": 0.00019997802441990573,
+ "loss": 1.9596,
+ "step": 20
+ },
+ {
+ "epoch": 0.021005907911600136,
+ "grad_norm": 0.832266628742218,
+ "learning_rate": 0.00019997577201390606,
+ "loss": 1.7116,
+ "step": 21
+ },
+ {
+ "epoch": 0.022006189240723954,
+ "grad_norm": 0.8465655446052551,
+ "learning_rate": 0.00019997340975261353,
+ "loss": 1.7711,
+ "step": 22
+ },
+ {
+ "epoch": 0.02300647056984777,
+ "grad_norm": 1.032426118850708,
+ "learning_rate": 0.00019997093763862383,
+ "loss": 1.6746,
+ "step": 23
+ },
+ {
+ "epoch": 0.024006751898971584,
+ "grad_norm": 1.0036743879318237,
+ "learning_rate": 0.0001999683556746534,
+ "loss": 1.7274,
+ "step": 24
+ },
+ {
+ "epoch": 0.0250070332280954,
+ "grad_norm": 0.9491412043571472,
+ "learning_rate": 0.0001999656638635393,
+ "loss": 2.0302,
+ "step": 25
+ },
+ {
+ "epoch": 0.02600731455721922,
+ "grad_norm": 0.9477822184562683,
+ "learning_rate": 0.0001999628622082394,
+ "loss": 1.6107,
+ "step": 26
+ },
+ {
+ "epoch": 0.027007595886343033,
+ "grad_norm": 1.0687041282653809,
+ "learning_rate": 0.0001999599507118322,
+ "loss": 1.8225,
+ "step": 27
+ },
+ {
+ "epoch": 0.02800787721546685,
+ "grad_norm": 1.6572712659835815,
+ "learning_rate": 0.00019995692937751683,
+ "loss": 1.896,
+ "step": 28
+ },
+ {
+ "epoch": 0.029008158544590667,
+ "grad_norm": 1.013258695602417,
+ "learning_rate": 0.0001999537982086133,
+ "loss": 1.7847,
+ "step": 29
+ },
+ {
+ "epoch": 0.03000843987371448,
+ "grad_norm": 0.7584932446479797,
+ "learning_rate": 0.00019995055720856218,
+ "loss": 1.5841,
+ "step": 30
+ },
+ {
+ "epoch": 0.031008721202838298,
+ "grad_norm": 1.1543537378311157,
+ "learning_rate": 0.00019994720638092468,
+ "loss": 1.8362,
+ "step": 31
+ },
+ {
+ "epoch": 0.032009002531962115,
+ "grad_norm": 0.8389608860015869,
+ "learning_rate": 0.00019994374572938277,
+ "loss": 1.7913,
+ "step": 32
+ },
+ {
+ "epoch": 0.03300928386108593,
+ "grad_norm": 0.7582125663757324,
+ "learning_rate": 0.00019994017525773913,
+ "loss": 1.5406,
+ "step": 33
+ },
+ {
+ "epoch": 0.03400956519020975,
+ "grad_norm": 0.7866935133934021,
+ "learning_rate": 0.00019993649496991705,
+ "loss": 1.5363,
+ "step": 34
+ },
+ {
+ "epoch": 0.03500984651933356,
+ "grad_norm": 0.8007768988609314,
+ "learning_rate": 0.00019993270486996046,
+ "loss": 1.7597,
+ "step": 35
+ },
+ {
+ "epoch": 0.03601012784845738,
+ "grad_norm": 0.8109031319618225,
+ "learning_rate": 0.000199928804962034,
+ "loss": 1.5554,
+ "step": 36
+ },
+ {
+ "epoch": 0.037010409177581194,
+ "grad_norm": 0.7722628116607666,
+ "learning_rate": 0.00019992479525042303,
+ "loss": 1.6437,
+ "step": 37
+ },
+ {
+ "epoch": 0.03801069050670501,
+ "grad_norm": 0.7336480021476746,
+ "learning_rate": 0.00019992067573953342,
+ "loss": 1.7276,
+ "step": 38
+ },
+ {
+ "epoch": 0.03901097183582883,
+ "grad_norm": 0.6940280795097351,
+ "learning_rate": 0.0001999164464338918,
+ "loss": 1.846,
+ "step": 39
+ },
+ {
+ "epoch": 0.040011253164952645,
+ "grad_norm": 0.7079702615737915,
+ "learning_rate": 0.0001999121073381454,
+ "loss": 1.7017,
+ "step": 40
+ },
+ {
+ "epoch": 0.041011534494076456,
+ "grad_norm": 0.7438498139381409,
+ "learning_rate": 0.0001999076584570621,
+ "loss": 1.665,
+ "step": 41
+ },
+ {
+ "epoch": 0.04201181582320027,
+ "grad_norm": 0.6951525211334229,
+ "learning_rate": 0.00019990309979553045,
+ "loss": 1.588,
+ "step": 42
+ },
+ {
+ "epoch": 0.04301209715232409,
+ "grad_norm": 0.9398604035377502,
+ "learning_rate": 0.00019989843135855958,
+ "loss": 1.6513,
+ "step": 43
+ },
+ {
+ "epoch": 0.04401237848144791,
+ "grad_norm": 0.7384347319602966,
+ "learning_rate": 0.00019989365315127922,
+ "loss": 1.5975,
+ "step": 44
+ },
+ {
+ "epoch": 0.045012659810571724,
+ "grad_norm": 0.9856846332550049,
+ "learning_rate": 0.0001998887651789398,
+ "loss": 1.644,
+ "step": 45
+ },
+ {
+ "epoch": 0.04601294113969554,
+ "grad_norm": 0.7322820425033569,
+ "learning_rate": 0.0001998837674469123,
+ "loss": 1.5207,
+ "step": 46
+ },
+ {
+ "epoch": 0.04701322246881936,
+ "grad_norm": 0.8695257902145386,
+ "learning_rate": 0.00019987865996068833,
+ "loss": 1.5572,
+ "step": 47
+ },
+ {
+ "epoch": 0.04801350379794317,
+ "grad_norm": 0.7231017351150513,
+ "learning_rate": 0.00019987344272588006,
+ "loss": 1.5841,
+ "step": 48
+ },
+ {
+ "epoch": 0.049013785127066986,
+ "grad_norm": 0.7147384285926819,
+ "learning_rate": 0.00019986811574822033,
+ "loss": 1.8628,
+ "step": 49
+ },
+ {
+ "epoch": 0.0500140664561908,
+ "grad_norm": 0.8631477355957031,
+ "learning_rate": 0.00019986267903356254,
+ "loss": 1.8487,
+ "step": 50
+ },
+ {
+ "epoch": 0.05101434778531462,
+ "grad_norm": 0.7995486855506897,
+ "learning_rate": 0.0001998571325878806,
+ "loss": 1.6491,
+ "step": 51
+ },
+ {
+ "epoch": 0.05201462911443844,
+ "grad_norm": 0.7828657031059265,
+ "learning_rate": 0.0001998514764172691,
+ "loss": 1.7496,
+ "step": 52
+ },
+ {
+ "epoch": 0.053014910443562255,
+ "grad_norm": 0.7789833545684814,
+ "learning_rate": 0.00019984571052794313,
+ "loss": 1.6628,
+ "step": 53
+ },
+ {
+ "epoch": 0.054015191772686065,
+ "grad_norm": 0.7077661752700806,
+ "learning_rate": 0.00019983983492623833,
+ "loss": 1.771,
+ "step": 54
+ },
+ {
+ "epoch": 0.05501547310180988,
+ "grad_norm": 0.7939582467079163,
+ "learning_rate": 0.00019983384961861096,
+ "loss": 1.707,
+ "step": 55
+ },
+ {
+ "epoch": 0.0560157544309337,
+ "grad_norm": 0.9438828229904175,
+ "learning_rate": 0.0001998277546116378,
+ "loss": 1.8334,
+ "step": 56
+ },
+ {
+ "epoch": 0.05701603576005752,
+ "grad_norm": 0.8028286695480347,
+ "learning_rate": 0.00019982154991201608,
+ "loss": 1.9117,
+ "step": 57
+ },
+ {
+ "epoch": 0.058016317089181334,
+ "grad_norm": 0.6563037037849426,
+ "learning_rate": 0.00019981523552656377,
+ "loss": 1.4767,
+ "step": 58
+ },
+ {
+ "epoch": 0.05901659841830515,
+ "grad_norm": 0.6600964665412903,
+ "learning_rate": 0.00019980881146221914,
+ "loss": 1.6656,
+ "step": 59
+ },
+ {
+ "epoch": 0.06001687974742896,
+ "grad_norm": 0.7966578602790833,
+ "learning_rate": 0.00019980227772604112,
+ "loss": 1.4844,
+ "step": 60
+ },
+ {
+ "epoch": 0.06101716107655278,
+ "grad_norm": 0.8019976615905762,
+ "learning_rate": 0.0001997956343252091,
+ "loss": 1.5682,
+ "step": 61
+ },
+ {
+ "epoch": 0.062017442405676595,
+ "grad_norm": 0.8935349583625793,
+ "learning_rate": 0.00019978888126702296,
+ "loss": 1.8131,
+ "step": 62
+ },
+ {
+ "epoch": 0.06301772373480041,
+ "grad_norm": 0.8085179924964905,
+ "learning_rate": 0.00019978201855890308,
+ "loss": 1.5602,
+ "step": 63
+ },
+ {
+ "epoch": 0.06401800506392423,
+ "grad_norm": 0.7631951570510864,
+ "learning_rate": 0.00019977504620839035,
+ "loss": 1.8008,
+ "step": 64
+ },
+ {
+ "epoch": 0.06501828639304805,
+ "grad_norm": 0.7315165996551514,
+ "learning_rate": 0.00019976796422314615,
+ "loss": 1.5735,
+ "step": 65
+ },
+ {
+ "epoch": 0.06601856772217186,
+ "grad_norm": 0.745726466178894,
+ "learning_rate": 0.00019976077261095226,
+ "loss": 1.5775,
+ "step": 66
+ },
+ {
+ "epoch": 0.06701884905129568,
+ "grad_norm": 0.9082249999046326,
+ "learning_rate": 0.00019975347137971098,
+ "loss": 1.7427,
+ "step": 67
+ },
+ {
+ "epoch": 0.0680191303804195,
+ "grad_norm": 0.6575669050216675,
+ "learning_rate": 0.00019974606053744503,
+ "loss": 1.5231,
+ "step": 68
+ },
+ {
+ "epoch": 0.06901941170954332,
+ "grad_norm": 0.7749233245849609,
+ "learning_rate": 0.00019973854009229763,
+ "loss": 1.5703,
+ "step": 69
+ },
+ {
+ "epoch": 0.07001969303866712,
+ "grad_norm": 0.7240824699401855,
+ "learning_rate": 0.00019973091005253232,
+ "loss": 1.5197,
+ "step": 70
+ },
+ {
+ "epoch": 0.07101997436779094,
+ "grad_norm": 0.8683856725692749,
+ "learning_rate": 0.0001997231704265332,
+ "loss": 1.6183,
+ "step": 71
+ },
+ {
+ "epoch": 0.07202025569691475,
+ "grad_norm": 0.6885640621185303,
+ "learning_rate": 0.00019971532122280464,
+ "loss": 1.6565,
+ "step": 72
+ },
+ {
+ "epoch": 0.07302053702603857,
+ "grad_norm": 0.6648329496383667,
+ "learning_rate": 0.0001997073624499716,
+ "loss": 1.5943,
+ "step": 73
+ },
+ {
+ "epoch": 0.07402081835516239,
+ "grad_norm": 0.8867416977882385,
+ "learning_rate": 0.0001996992941167792,
+ "loss": 1.7855,
+ "step": 74
+ },
+ {
+ "epoch": 0.0750210996842862,
+ "grad_norm": 0.7790491580963135,
+ "learning_rate": 0.00019969111623209323,
+ "loss": 1.6723,
+ "step": 75
+ },
+ {
+ "epoch": 0.07602138101341002,
+ "grad_norm": 0.7999201416969299,
+ "learning_rate": 0.00019968282880489957,
+ "loss": 1.5619,
+ "step": 76
+ },
+ {
+ "epoch": 0.07702166234253384,
+ "grad_norm": 0.6316407322883606,
+ "learning_rate": 0.00019967443184430467,
+ "loss": 1.6377,
+ "step": 77
+ },
+ {
+ "epoch": 0.07802194367165766,
+ "grad_norm": 0.7680445313453674,
+ "learning_rate": 0.0001996659253595353,
+ "loss": 1.5433,
+ "step": 78
+ },
+ {
+ "epoch": 0.07902222500078147,
+ "grad_norm": 0.7158446907997131,
+ "learning_rate": 0.0001996573093599385,
+ "loss": 1.5436,
+ "step": 79
+ },
+ {
+ "epoch": 0.08002250632990529,
+ "grad_norm": 0.7354825139045715,
+ "learning_rate": 0.00019964858385498172,
+ "loss": 1.6512,
+ "step": 80
+ },
+ {
+ "epoch": 0.08102278765902911,
+ "grad_norm": 0.7031210660934448,
+ "learning_rate": 0.00019963974885425266,
+ "loss": 1.6411,
+ "step": 81
+ },
+ {
+ "epoch": 0.08202306898815291,
+ "grad_norm": 0.8451045751571655,
+ "learning_rate": 0.00019963080436745945,
+ "loss": 1.6622,
+ "step": 82
+ },
+ {
+ "epoch": 0.08302335031727673,
+ "grad_norm": 0.8329521417617798,
+ "learning_rate": 0.00019962175040443044,
+ "loss": 1.9269,
+ "step": 83
+ },
+ {
+ "epoch": 0.08402363164640055,
+ "grad_norm": 0.6967645883560181,
+ "learning_rate": 0.0001996125869751143,
+ "loss": 1.7243,
+ "step": 84
+ },
+ {
+ "epoch": 0.08502391297552436,
+ "grad_norm": 0.8699042797088623,
+ "learning_rate": 0.00019960331408957997,
+ "loss": 1.7211,
+ "step": 85
+ },
+ {
+ "epoch": 0.08602419430464818,
+ "grad_norm": 0.6780512928962708,
+ "learning_rate": 0.00019959393175801671,
+ "loss": 1.6376,
+ "step": 86
+ },
+ {
+ "epoch": 0.087024475633772,
+ "grad_norm": 0.7213720679283142,
+ "learning_rate": 0.00019958443999073397,
+ "loss": 1.6048,
+ "step": 87
+ },
+ {
+ "epoch": 0.08802475696289581,
+ "grad_norm": 0.6077585816383362,
+ "learning_rate": 0.00019957483879816151,
+ "loss": 1.5231,
+ "step": 88
+ },
+ {
+ "epoch": 0.08902503829201963,
+ "grad_norm": 0.6854611039161682,
+ "learning_rate": 0.00019956512819084928,
+ "loss": 1.4726,
+ "step": 89
+ },
+ {
+ "epoch": 0.09002531962114345,
+ "grad_norm": 0.6969390511512756,
+ "learning_rate": 0.00019955530817946748,
+ "loss": 1.6435,
+ "step": 90
+ },
+ {
+ "epoch": 0.09102560095026727,
+ "grad_norm": 0.7178792953491211,
+ "learning_rate": 0.00019954537877480655,
+ "loss": 1.6967,
+ "step": 91
+ },
+ {
+ "epoch": 0.09202588227939108,
+ "grad_norm": 0.8248458504676819,
+ "learning_rate": 0.00019953533998777706,
+ "loss": 1.5884,
+ "step": 92
+ },
+ {
+ "epoch": 0.0930261636085149,
+ "grad_norm": 0.6472075581550598,
+ "learning_rate": 0.00019952519182940993,
+ "loss": 1.5074,
+ "step": 93
+ },
+ {
+ "epoch": 0.09402644493763872,
+ "grad_norm": 0.7548672556877136,
+ "learning_rate": 0.00019951493431085603,
+ "loss": 1.6934,
+ "step": 94
+ },
+ {
+ "epoch": 0.09502672626676252,
+ "grad_norm": 0.6680666208267212,
+ "learning_rate": 0.00019950456744338658,
+ "loss": 1.4014,
+ "step": 95
+ },
+ {
+ "epoch": 0.09602700759588634,
+ "grad_norm": 0.7270862460136414,
+ "learning_rate": 0.00019949409123839288,
+ "loss": 1.6991,
+ "step": 96
+ },
+ {
+ "epoch": 0.09702728892501016,
+ "grad_norm": 0.682833731174469,
+ "learning_rate": 0.00019948350570738642,
+ "loss": 1.4926,
+ "step": 97
+ },
+ {
+ "epoch": 0.09802757025413397,
+ "grad_norm": 0.6598315238952637,
+ "learning_rate": 0.0001994728108619987,
+ "loss": 1.6329,
+ "step": 98
+ },
+ {
+ "epoch": 0.09902785158325779,
+ "grad_norm": 0.830845832824707,
+ "learning_rate": 0.0001994620067139815,
+ "loss": 1.8517,
+ "step": 99
+ },
+ {
+ "epoch": 0.1000281329123816,
+ "grad_norm": 0.6964694857597351,
+ "learning_rate": 0.00019945109327520658,
+ "loss": 1.5459,
+ "step": 100
+ },
+ {
+ "epoch": 0.10102841424150542,
+ "grad_norm": 0.8640177249908447,
+ "learning_rate": 0.00019944007055766586,
+ "loss": 1.6638,
+ "step": 101
+ },
+ {
+ "epoch": 0.10202869557062924,
+ "grad_norm": 0.6485210657119751,
+ "learning_rate": 0.00019942893857347128,
+ "loss": 1.8025,
+ "step": 102
+ },
+ {
+ "epoch": 0.10302897689975306,
+ "grad_norm": 0.6746248006820679,
+ "learning_rate": 0.00019941769733485494,
+ "loss": 1.6954,
+ "step": 103
+ },
+ {
+ "epoch": 0.10402925822887688,
+ "grad_norm": 0.7386549115180969,
+ "learning_rate": 0.00019940634685416888,
+ "loss": 1.4547,
+ "step": 104
+ },
+ {
+ "epoch": 0.10502953955800069,
+ "grad_norm": 0.7518633008003235,
+ "learning_rate": 0.00019939488714388524,
+ "loss": 1.5098,
+ "step": 105
+ },
+ {
+ "epoch": 0.10602982088712451,
+ "grad_norm": 0.7350422739982605,
+ "learning_rate": 0.00019938331821659614,
+ "loss": 1.5452,
+ "step": 106
+ },
+ {
+ "epoch": 0.10703010221624833,
+ "grad_norm": 0.6544668674468994,
+ "learning_rate": 0.0001993716400850138,
+ "loss": 1.5106,
+ "step": 107
+ },
+ {
+ "epoch": 0.10803038354537213,
+ "grad_norm": 0.6108564138412476,
+ "learning_rate": 0.0001993598527619703,
+ "loss": 1.5818,
+ "step": 108
+ },
+ {
+ "epoch": 0.10903066487449595,
+ "grad_norm": 0.731071949005127,
+ "learning_rate": 0.00019934795626041783,
+ "loss": 1.4819,
+ "step": 109
+ },
+ {
+ "epoch": 0.11003094620361976,
+ "grad_norm": 0.5978986620903015,
+ "learning_rate": 0.0001993359505934285,
+ "loss": 1.5469,
+ "step": 110
+ },
+ {
+ "epoch": 0.11103122753274358,
+ "grad_norm": 0.7249881029129028,
+ "learning_rate": 0.00019932383577419432,
+ "loss": 1.7466,
+ "step": 111
+ },
+ {
+ "epoch": 0.1120315088618674,
+ "grad_norm": 0.6161806583404541,
+ "learning_rate": 0.0001993116118160273,
+ "loss": 1.3411,
+ "step": 112
+ },
+ {
+ "epoch": 0.11303179019099122,
+ "grad_norm": 0.6745229363441467,
+ "learning_rate": 0.00019929927873235938,
+ "loss": 1.5615,
+ "step": 113
+ },
+ {
+ "epoch": 0.11403207152011503,
+ "grad_norm": 0.6489872336387634,
+ "learning_rate": 0.00019928683653674237,
+ "loss": 1.6279,
+ "step": 114
+ },
+ {
+ "epoch": 0.11503235284923885,
+ "grad_norm": 0.7769975662231445,
+ "learning_rate": 0.00019927428524284805,
+ "loss": 1.6155,
+ "step": 115
+ },
+ {
+ "epoch": 0.11603263417836267,
+ "grad_norm": 0.734336793422699,
+ "learning_rate": 0.00019926162486446792,
+ "loss": 1.6837,
+ "step": 116
+ },
+ {
+ "epoch": 0.11703291550748648,
+ "grad_norm": 0.6966903805732727,
+ "learning_rate": 0.0001992488554155135,
+ "loss": 1.6659,
+ "step": 117
+ },
+ {
+ "epoch": 0.1180331968366103,
+ "grad_norm": 0.6714586615562439,
+ "learning_rate": 0.00019923597691001615,
+ "loss": 1.5161,
+ "step": 118
+ },
+ {
+ "epoch": 0.11903347816573412,
+ "grad_norm": 0.6390894651412964,
+ "learning_rate": 0.0001992229893621269,
+ "loss": 1.4561,
+ "step": 119
+ },
+ {
+ "epoch": 0.12003375949485792,
+ "grad_norm": 0.6481143832206726,
+ "learning_rate": 0.00019920989278611687,
+ "loss": 1.6331,
+ "step": 120
+ },
+ {
+ "epoch": 0.12103404082398174,
+ "grad_norm": 0.6819384694099426,
+ "learning_rate": 0.0001991966871963767,
+ "loss": 1.6508,
+ "step": 121
+ },
+ {
+ "epoch": 0.12203432215310556,
+ "grad_norm": 0.6839059591293335,
+ "learning_rate": 0.000199183372607417,
+ "loss": 1.6514,
+ "step": 122
+ },
+ {
+ "epoch": 0.12303460348222937,
+ "grad_norm": 0.6401050090789795,
+ "learning_rate": 0.0001991699490338681,
+ "loss": 1.8065,
+ "step": 123
+ },
+ {
+ "epoch": 0.12403488481135319,
+ "grad_norm": 0.6860588788986206,
+ "learning_rate": 0.00019915641649048005,
+ "loss": 1.7658,
+ "step": 124
+ },
+ {
+ "epoch": 0.12503516614047702,
+ "grad_norm": 0.6286434531211853,
+ "learning_rate": 0.0001991427749921227,
+ "loss": 1.7678,
+ "step": 125
+ },
+ {
+ "epoch": 0.12603544746960083,
+ "grad_norm": 0.6609922647476196,
+ "learning_rate": 0.00019912902455378556,
+ "loss": 1.4934,
+ "step": 126
+ },
+ {
+ "epoch": 0.12703572879872463,
+ "grad_norm": 0.7058399319648743,
+ "learning_rate": 0.00019911516519057788,
+ "loss": 1.6058,
+ "step": 127
+ },
+ {
+ "epoch": 0.12803601012784846,
+ "grad_norm": 0.6362051963806152,
+ "learning_rate": 0.00019910119691772863,
+ "loss": 1.502,
+ "step": 128
+ },
+ {
+ "epoch": 0.12903629145697226,
+ "grad_norm": 0.7493100762367249,
+ "learning_rate": 0.00019908711975058637,
+ "loss": 1.5287,
+ "step": 129
+ },
+ {
+ "epoch": 0.1300365727860961,
+ "grad_norm": 0.6492393612861633,
+ "learning_rate": 0.0001990729337046194,
+ "loss": 1.5716,
+ "step": 130
+ },
+ {
+ "epoch": 0.1310368541152199,
+ "grad_norm": 0.70331871509552,
+ "learning_rate": 0.0001990586387954156,
+ "loss": 1.5882,
+ "step": 131
+ },
+ {
+ "epoch": 0.13203713544434373,
+ "grad_norm": 0.7581572532653809,
+ "learning_rate": 0.00019904423503868247,
+ "loss": 1.7627,
+ "step": 132
+ },
+ {
+ "epoch": 0.13303741677346753,
+ "grad_norm": 0.7087228894233704,
+ "learning_rate": 0.00019902972245024715,
+ "loss": 1.6257,
+ "step": 133
+ },
+ {
+ "epoch": 0.13403769810259136,
+ "grad_norm": 0.7920627593994141,
+ "learning_rate": 0.00019901510104605637,
+ "loss": 1.572,
+ "step": 134
+ },
+ {
+ "epoch": 0.13503797943171517,
+ "grad_norm": 0.6869202256202698,
+ "learning_rate": 0.00019900037084217637,
+ "loss": 1.5478,
+ "step": 135
+ },
+ {
+ "epoch": 0.136038260760839,
+ "grad_norm": 0.6879409551620483,
+ "learning_rate": 0.00019898553185479303,
+ "loss": 1.3104,
+ "step": 136
+ },
+ {
+ "epoch": 0.1370385420899628,
+ "grad_norm": 0.6574143767356873,
+ "learning_rate": 0.00019897058410021167,
+ "loss": 1.7041,
+ "step": 137
+ },
+ {
+ "epoch": 0.13803882341908663,
+ "grad_norm": 0.7793259620666504,
+ "learning_rate": 0.00019895552759485722,
+ "loss": 1.5417,
+ "step": 138
+ },
+ {
+ "epoch": 0.13903910474821043,
+ "grad_norm": 0.6310438513755798,
+ "learning_rate": 0.00019894036235527395,
+ "loss": 1.4978,
+ "step": 139
+ },
+ {
+ "epoch": 0.14003938607733424,
+ "grad_norm": 0.6298012137413025,
+ "learning_rate": 0.00019892508839812584,
+ "loss": 1.5367,
+ "step": 140
+ },
+ {
+ "epoch": 0.14103966740645807,
+ "grad_norm": 0.5647856593132019,
+ "learning_rate": 0.00019890970574019617,
+ "loss": 1.537,
+ "step": 141
+ },
+ {
+ "epoch": 0.14203994873558187,
+ "grad_norm": 0.6491876244544983,
+ "learning_rate": 0.00019889421439838763,
+ "loss": 1.6992,
+ "step": 142
+ },
+ {
+ "epoch": 0.1430402300647057,
+ "grad_norm": 0.6574720144271851,
+ "learning_rate": 0.00019887861438972246,
+ "loss": 1.3837,
+ "step": 143
+ },
+ {
+ "epoch": 0.1440405113938295,
+ "grad_norm": 0.6267092227935791,
+ "learning_rate": 0.00019886290573134228,
+ "loss": 1.6307,
+ "step": 144
+ },
+ {
+ "epoch": 0.14504079272295334,
+ "grad_norm": 0.6785029172897339,
+ "learning_rate": 0.000198847088440508,
+ "loss": 1.574,
+ "step": 145
+ },
+ {
+ "epoch": 0.14604107405207714,
+ "grad_norm": 0.6218644380569458,
+ "learning_rate": 0.0001988311625346,
+ "loss": 1.4676,
+ "step": 146
+ },
+ {
+ "epoch": 0.14704135538120097,
+ "grad_norm": 0.6047986745834351,
+ "learning_rate": 0.00019881512803111796,
+ "loss": 1.4316,
+ "step": 147
+ },
+ {
+ "epoch": 0.14804163671032478,
+ "grad_norm": 0.7340937256813049,
+ "learning_rate": 0.00019879898494768093,
+ "loss": 1.5185,
+ "step": 148
+ },
+ {
+ "epoch": 0.1490419180394486,
+ "grad_norm": 0.5874620676040649,
+ "learning_rate": 0.00019878273330202717,
+ "loss": 1.5031,
+ "step": 149
+ },
+ {
+ "epoch": 0.1500421993685724,
+ "grad_norm": 0.6943556666374207,
+ "learning_rate": 0.00019876637311201433,
+ "loss": 1.7323,
+ "step": 150
+ },
+ {
+ "epoch": 0.15104248069769624,
+ "grad_norm": 0.6345832347869873,
+ "learning_rate": 0.00019874990439561934,
+ "loss": 1.4691,
+ "step": 151
+ },
+ {
+ "epoch": 0.15204276202682004,
+ "grad_norm": 0.7047753930091858,
+ "learning_rate": 0.0001987333271709383,
+ "loss": 1.5198,
+ "step": 152
+ },
+ {
+ "epoch": 0.15304304335594385,
+ "grad_norm": 0.6043322086334229,
+ "learning_rate": 0.00019871664145618657,
+ "loss": 1.5488,
+ "step": 153
+ },
+ {
+ "epoch": 0.15404332468506768,
+ "grad_norm": 0.5978446006774902,
+ "learning_rate": 0.00019869984726969878,
+ "loss": 1.4278,
+ "step": 154
+ },
+ {
+ "epoch": 0.15504360601419148,
+ "grad_norm": 0.6796436905860901,
+ "learning_rate": 0.00019868294462992866,
+ "loss": 1.5845,
+ "step": 155
+ },
+ {
+ "epoch": 0.1560438873433153,
+ "grad_norm": 0.7113372087478638,
+ "learning_rate": 0.00019866593355544922,
+ "loss": 1.7509,
+ "step": 156
+ },
+ {
+ "epoch": 0.15704416867243912,
+ "grad_norm": 0.5908107757568359,
+ "learning_rate": 0.00019864881406495246,
+ "loss": 1.5693,
+ "step": 157
+ },
+ {
+ "epoch": 0.15804445000156295,
+ "grad_norm": 0.7135252952575684,
+ "learning_rate": 0.00019863158617724967,
+ "loss": 1.6109,
+ "step": 158
+ },
+ {
+ "epoch": 0.15904473133068675,
+ "grad_norm": 0.5621710419654846,
+ "learning_rate": 0.00019861424991127115,
+ "loss": 1.5368,
+ "step": 159
+ },
+ {
+ "epoch": 0.16004501265981058,
+ "grad_norm": 0.6205443143844604,
+ "learning_rate": 0.00019859680528606637,
+ "loss": 1.5181,
+ "step": 160
+ },
+ {
+ "epoch": 0.16104529398893438,
+ "grad_norm": 0.6933260560035706,
+ "learning_rate": 0.00019857925232080373,
+ "loss": 1.4508,
+ "step": 161
+ },
+ {
+ "epoch": 0.16204557531805822,
+ "grad_norm": 0.6911661028862,
+ "learning_rate": 0.00019856159103477086,
+ "loss": 1.5423,
+ "step": 162
+ },
+ {
+ "epoch": 0.16304585664718202,
+ "grad_norm": 0.7684744000434875,
+ "learning_rate": 0.00019854382144737426,
+ "loss": 1.4097,
+ "step": 163
+ },
+ {
+ "epoch": 0.16404613797630582,
+ "grad_norm": 0.6657288074493408,
+ "learning_rate": 0.00019852594357813952,
+ "loss": 1.6145,
+ "step": 164
+ },
+ {
+ "epoch": 0.16504641930542965,
+ "grad_norm": 0.7030160427093506,
+ "learning_rate": 0.00019850795744671116,
+ "loss": 1.6551,
+ "step": 165
+ },
+ {
+ "epoch": 0.16604670063455346,
+ "grad_norm": 0.87894207239151,
+ "learning_rate": 0.0001984898630728527,
+ "loss": 1.6316,
+ "step": 166
+ },
+ {
+ "epoch": 0.1670469819636773,
+ "grad_norm": 0.6282681226730347,
+ "learning_rate": 0.0001984716604764466,
+ "loss": 1.451,
+ "step": 167
+ },
+ {
+ "epoch": 0.1680472632928011,
+ "grad_norm": 0.6729792952537537,
+ "learning_rate": 0.0001984533496774942,
+ "loss": 1.4381,
+ "step": 168
+ },
+ {
+ "epoch": 0.16904754462192492,
+ "grad_norm": 0.7300116419792175,
+ "learning_rate": 0.0001984349306961158,
+ "loss": 1.4244,
+ "step": 169
+ },
+ {
+ "epoch": 0.17004782595104873,
+ "grad_norm": 0.6853480935096741,
+ "learning_rate": 0.00019841640355255043,
+ "loss": 1.6174,
+ "step": 170
+ },
+ {
+ "epoch": 0.17104810728017256,
+ "grad_norm": 0.735612690448761,
+ "learning_rate": 0.00019839776826715614,
+ "loss": 1.5085,
+ "step": 171
+ },
+ {
+ "epoch": 0.17204838860929636,
+ "grad_norm": 0.6735563278198242,
+ "learning_rate": 0.00019837902486040978,
+ "loss": 1.507,
+ "step": 172
+ },
+ {
+ "epoch": 0.1730486699384202,
+ "grad_norm": 0.6617917418479919,
+ "learning_rate": 0.0001983601733529069,
+ "loss": 1.6774,
+ "step": 173
+ },
+ {
+ "epoch": 0.174048951267544,
+ "grad_norm": 0.7137823700904846,
+ "learning_rate": 0.00019834121376536187,
+ "loss": 1.4665,
+ "step": 174
+ },
+ {
+ "epoch": 0.17504923259666783,
+ "grad_norm": 0.6372626423835754,
+ "learning_rate": 0.00019832214611860793,
+ "loss": 1.3597,
+ "step": 175
+ },
+ {
+ "epoch": 0.17604951392579163,
+ "grad_norm": 0.7131632566452026,
+ "learning_rate": 0.00019830297043359692,
+ "loss": 1.4833,
+ "step": 176
+ },
+ {
+ "epoch": 0.17704979525491543,
+ "grad_norm": 0.7538559436798096,
+ "learning_rate": 0.00019828368673139947,
+ "loss": 1.4714,
+ "step": 177
+ },
+ {
+ "epoch": 0.17805007658403926,
+ "grad_norm": 0.5684806108474731,
+ "learning_rate": 0.0001982642950332049,
+ "loss": 1.5012,
+ "step": 178
+ },
+ {
+ "epoch": 0.17905035791316307,
+ "grad_norm": 0.621658444404602,
+ "learning_rate": 0.00019824479536032112,
+ "loss": 1.9119,
+ "step": 179
+ },
+ {
+ "epoch": 0.1800506392422869,
+ "grad_norm": 0.6564679741859436,
+ "learning_rate": 0.0001982251877341748,
+ "loss": 1.5131,
+ "step": 180
+ },
+ {
+ "epoch": 0.1810509205714107,
+ "grad_norm": 0.6546526551246643,
+ "learning_rate": 0.00019820547217631117,
+ "loss": 1.4493,
+ "step": 181
+ },
+ {
+ "epoch": 0.18205120190053453,
+ "grad_norm": 0.6504479050636292,
+ "learning_rate": 0.00019818564870839405,
+ "loss": 1.6131,
+ "step": 182
+ },
+ {
+ "epoch": 0.18305148322965833,
+ "grad_norm": 0.6269803047180176,
+ "learning_rate": 0.00019816571735220583,
+ "loss": 1.5936,
+ "step": 183
+ },
+ {
+ "epoch": 0.18405176455878217,
+ "grad_norm": 0.6303942799568176,
+ "learning_rate": 0.00019814567812964748,
+ "loss": 1.6948,
+ "step": 184
+ },
+ {
+ "epoch": 0.18505204588790597,
+ "grad_norm": 0.6562885046005249,
+ "learning_rate": 0.00019812553106273847,
+ "loss": 1.5542,
+ "step": 185
+ },
+ {
+ "epoch": 0.1860523272170298,
+ "grad_norm": 0.5844212174415588,
+ "learning_rate": 0.00019810527617361681,
+ "loss": 1.539,
+ "step": 186
+ },
+ {
+ "epoch": 0.1870526085461536,
+ "grad_norm": 0.6402295231819153,
+ "learning_rate": 0.00019808491348453894,
+ "loss": 1.4748,
+ "step": 187
+ },
+ {
+ "epoch": 0.18805288987527743,
+ "grad_norm": 0.6579477190971375,
+ "learning_rate": 0.00019806444301787978,
+ "loss": 1.5114,
+ "step": 188
+ },
+ {
+ "epoch": 0.18905317120440124,
+ "grad_norm": 0.6511597037315369,
+ "learning_rate": 0.0001980438647961327,
+ "loss": 1.4678,
+ "step": 189
+ },
+ {
+ "epoch": 0.19005345253352504,
+ "grad_norm": 0.6911427974700928,
+ "learning_rate": 0.00019802317884190935,
+ "loss": 1.6876,
+ "step": 190
+ },
+ {
+ "epoch": 0.19105373386264887,
+ "grad_norm": 0.6146433353424072,
+ "learning_rate": 0.00019800238517793996,
+ "loss": 1.5986,
+ "step": 191
+ },
+ {
+ "epoch": 0.19205401519177268,
+ "grad_norm": 0.6126302480697632,
+ "learning_rate": 0.00019798148382707296,
+ "loss": 1.571,
+ "step": 192
+ },
+ {
+ "epoch": 0.1930542965208965,
+ "grad_norm": 0.5751072764396667,
+ "learning_rate": 0.00019796047481227515,
+ "loss": 1.4921,
+ "step": 193
+ },
+ {
+ "epoch": 0.1940545778500203,
+ "grad_norm": 0.6484839916229248,
+ "learning_rate": 0.00019793935815663163,
+ "loss": 1.7495,
+ "step": 194
+ },
+ {
+ "epoch": 0.19505485917914414,
+ "grad_norm": 0.6875973343849182,
+ "learning_rate": 0.00019791813388334581,
+ "loss": 1.5782,
+ "step": 195
+ },
+ {
+ "epoch": 0.19605514050826794,
+ "grad_norm": 0.8130943179130554,
+ "learning_rate": 0.00019789680201573933,
+ "loss": 1.4964,
+ "step": 196
+ },
+ {
+ "epoch": 0.19705542183739178,
+ "grad_norm": 0.6734403371810913,
+ "learning_rate": 0.00019787536257725202,
+ "loss": 1.4787,
+ "step": 197
+ },
+ {
+ "epoch": 0.19805570316651558,
+ "grad_norm": 0.6480582356452942,
+ "learning_rate": 0.00019785381559144196,
+ "loss": 1.5629,
+ "step": 198
+ },
+ {
+ "epoch": 0.1990559844956394,
+ "grad_norm": 0.6554624438285828,
+ "learning_rate": 0.00019783216108198542,
+ "loss": 1.5806,
+ "step": 199
+ },
+ {
+ "epoch": 0.2000562658247632,
+ "grad_norm": 0.705443263053894,
+ "learning_rate": 0.00019781039907267677,
+ "loss": 1.8372,
+ "step": 200
+ },
+ {
+ "epoch": 0.20105654715388704,
+ "grad_norm": 0.706923246383667,
+ "learning_rate": 0.00019778852958742853,
+ "loss": 1.6405,
+ "step": 201
+ },
+ {
+ "epoch": 0.20205682848301085,
+ "grad_norm": 0.7062544822692871,
+ "learning_rate": 0.00019776655265027127,
+ "loss": 1.6,
+ "step": 202
+ },
+ {
+ "epoch": 0.20305710981213465,
+ "grad_norm": 0.7227569222450256,
+ "learning_rate": 0.00019774446828535371,
+ "loss": 1.5172,
+ "step": 203
+ },
+ {
+ "epoch": 0.20405739114125848,
+ "grad_norm": 0.6762563586235046,
+ "learning_rate": 0.00019772227651694256,
+ "loss": 1.6753,
+ "step": 204
+ },
+ {
+ "epoch": 0.20505767247038229,
+ "grad_norm": 0.6048421859741211,
+ "learning_rate": 0.00019769997736942258,
+ "loss": 1.4827,
+ "step": 205
+ },
+ {
+ "epoch": 0.20605795379950612,
+ "grad_norm": 0.6002956032752991,
+ "learning_rate": 0.00019767757086729647,
+ "loss": 1.5438,
+ "step": 206
+ },
+ {
+ "epoch": 0.20705823512862992,
+ "grad_norm": 0.7948954701423645,
+ "learning_rate": 0.00019765505703518496,
+ "loss": 1.4988,
+ "step": 207
+ },
+ {
+ "epoch": 0.20805851645775375,
+ "grad_norm": 0.6495680809020996,
+ "learning_rate": 0.00019763243589782662,
+ "loss": 1.5738,
+ "step": 208
+ },
+ {
+ "epoch": 0.20905879778687755,
+ "grad_norm": 0.6413107514381409,
+ "learning_rate": 0.00019760970748007803,
+ "loss": 1.3794,
+ "step": 209
+ },
+ {
+ "epoch": 0.21005907911600138,
+ "grad_norm": 0.5999665260314941,
+ "learning_rate": 0.0001975868718069136,
+ "loss": 1.4313,
+ "step": 210
+ },
+ {
+ "epoch": 0.2110593604451252,
+ "grad_norm": 0.6355773210525513,
+ "learning_rate": 0.00019756392890342563,
+ "loss": 1.5107,
+ "step": 211
+ },
+ {
+ "epoch": 0.21205964177424902,
+ "grad_norm": 0.6068251729011536,
+ "learning_rate": 0.00019754087879482422,
+ "loss": 1.536,
+ "step": 212
+ },
+ {
+ "epoch": 0.21305992310337282,
+ "grad_norm": 0.5568909049034119,
+ "learning_rate": 0.00019751772150643722,
+ "loss": 1.5372,
+ "step": 213
+ },
+ {
+ "epoch": 0.21406020443249665,
+ "grad_norm": 0.5771281719207764,
+ "learning_rate": 0.00019749445706371038,
+ "loss": 1.487,
+ "step": 214
+ },
+ {
+ "epoch": 0.21506048576162046,
+ "grad_norm": 0.6146671772003174,
+ "learning_rate": 0.00019747108549220702,
+ "loss": 1.4585,
+ "step": 215
+ },
+ {
+ "epoch": 0.21606076709074426,
+ "grad_norm": 0.5595754981040955,
+ "learning_rate": 0.00019744760681760832,
+ "loss": 1.4224,
+ "step": 216
+ },
+ {
+ "epoch": 0.2170610484198681,
+ "grad_norm": 0.5873929858207703,
+ "learning_rate": 0.00019742402106571314,
+ "loss": 1.4581,
+ "step": 217
+ },
+ {
+ "epoch": 0.2180613297489919,
+ "grad_norm": 0.5725668668746948,
+ "learning_rate": 0.00019740032826243788,
+ "loss": 1.4393,
+ "step": 218
+ },
+ {
+ "epoch": 0.21906161107811573,
+ "grad_norm": 0.6452648043632507,
+ "learning_rate": 0.0001973765284338167,
+ "loss": 1.6048,
+ "step": 219
+ },
+ {
+ "epoch": 0.22006189240723953,
+ "grad_norm": 0.6166092753410339,
+ "learning_rate": 0.00019735262160600127,
+ "loss": 1.4976,
+ "step": 220
+ },
+ {
+ "epoch": 0.22106217373636336,
+ "grad_norm": 0.7053269147872925,
+ "learning_rate": 0.00019732860780526088,
+ "loss": 1.6882,
+ "step": 221
+ },
+ {
+ "epoch": 0.22206245506548716,
+ "grad_norm": 0.7072796821594238,
+ "learning_rate": 0.00019730448705798239,
+ "loss": 1.5441,
+ "step": 222
+ },
+ {
+ "epoch": 0.223062736394611,
+ "grad_norm": 0.6704496145248413,
+ "learning_rate": 0.00019728025939067008,
+ "loss": 1.3791,
+ "step": 223
+ },
+ {
+ "epoch": 0.2240630177237348,
+ "grad_norm": 0.6141743659973145,
+ "learning_rate": 0.00019725592482994583,
+ "loss": 1.5831,
+ "step": 224
+ },
+ {
+ "epoch": 0.22506329905285863,
+ "grad_norm": 0.6235673427581787,
+ "learning_rate": 0.00019723148340254892,
+ "loss": 1.6103,
+ "step": 225
+ },
+ {
+ "epoch": 0.22606358038198243,
+ "grad_norm": 0.6383673548698425,
+ "learning_rate": 0.00019720693513533598,
+ "loss": 1.6284,
+ "step": 226
+ },
+ {
+ "epoch": 0.22706386171110624,
+ "grad_norm": 0.7666104435920715,
+ "learning_rate": 0.00019718228005528122,
+ "loss": 1.702,
+ "step": 227
+ },
+ {
+ "epoch": 0.22806414304023007,
+ "grad_norm": 0.6431383490562439,
+ "learning_rate": 0.00019715751818947603,
+ "loss": 1.4571,
+ "step": 228
+ },
+ {
+ "epoch": 0.22906442436935387,
+ "grad_norm": 0.6177626252174377,
+ "learning_rate": 0.0001971326495651293,
+ "loss": 1.4326,
+ "step": 229
+ },
+ {
+ "epoch": 0.2300647056984777,
+ "grad_norm": 0.7352898120880127,
+ "learning_rate": 0.00019710767420956705,
+ "loss": 1.7427,
+ "step": 230
+ },
+ {
+ "epoch": 0.2310649870276015,
+ "grad_norm": 0.6259469389915466,
+ "learning_rate": 0.0001970825921502328,
+ "loss": 1.634,
+ "step": 231
+ },
+ {
+ "epoch": 0.23206526835672533,
+ "grad_norm": 0.6699635982513428,
+ "learning_rate": 0.0001970574034146871,
+ "loss": 1.4705,
+ "step": 232
+ },
+ {
+ "epoch": 0.23306554968584914,
+ "grad_norm": 0.5577033162117004,
+ "learning_rate": 0.00019703210803060782,
+ "loss": 1.5438,
+ "step": 233
+ },
+ {
+ "epoch": 0.23406583101497297,
+ "grad_norm": 0.6063429117202759,
+ "learning_rate": 0.00019700670602579008,
+ "loss": 1.555,
+ "step": 234
+ },
+ {
+ "epoch": 0.23506611234409677,
+ "grad_norm": 0.6069104671478271,
+ "learning_rate": 0.00019698119742814606,
+ "loss": 1.5036,
+ "step": 235
+ },
+ {
+ "epoch": 0.2360663936732206,
+ "grad_norm": 0.6158379316329956,
+ "learning_rate": 0.00019695558226570507,
+ "loss": 1.3741,
+ "step": 236
+ },
+ {
+ "epoch": 0.2370666750023444,
+ "grad_norm": 0.6366294622421265,
+ "learning_rate": 0.00019692986056661356,
+ "loss": 1.4467,
+ "step": 237
+ },
+ {
+ "epoch": 0.23806695633146824,
+ "grad_norm": 0.6726595163345337,
+ "learning_rate": 0.00019690403235913504,
+ "loss": 1.3861,
+ "step": 238
+ },
+ {
+ "epoch": 0.23906723766059204,
+ "grad_norm": 0.6546512842178345,
+ "learning_rate": 0.00019687809767165,
+ "loss": 1.6886,
+ "step": 239
+ },
+ {
+ "epoch": 0.24006751898971584,
+ "grad_norm": 0.6623121500015259,
+ "learning_rate": 0.000196852056532656,
+ "loss": 1.5925,
+ "step": 240
+ },
+ {
+ "epoch": 0.24106780031883968,
+ "grad_norm": 0.6577529311180115,
+ "learning_rate": 0.00019682590897076752,
+ "loss": 1.4509,
+ "step": 241
+ },
+ {
+ "epoch": 0.24206808164796348,
+ "grad_norm": 0.5586327314376831,
+ "learning_rate": 0.00019679965501471608,
+ "loss": 1.6346,
+ "step": 242
+ },
+ {
+ "epoch": 0.2430683629770873,
+ "grad_norm": 0.6459937691688538,
+ "learning_rate": 0.0001967732946933499,
+ "loss": 1.4129,
+ "step": 243
+ },
+ {
+ "epoch": 0.2440686443062111,
+ "grad_norm": 0.778732180595398,
+ "learning_rate": 0.00019674682803563428,
+ "loss": 1.5129,
+ "step": 244
+ },
+ {
+ "epoch": 0.24506892563533494,
+ "grad_norm": 0.7264451384544373,
+ "learning_rate": 0.00019672025507065131,
+ "loss": 1.4483,
+ "step": 245
+ },
+ {
+ "epoch": 0.24606920696445875,
+ "grad_norm": 0.616084635257721,
+ "learning_rate": 0.00019669357582759983,
+ "loss": 1.5947,
+ "step": 246
+ },
+ {
+ "epoch": 0.24706948829358258,
+ "grad_norm": 0.5911642909049988,
+ "learning_rate": 0.00019666679033579552,
+ "loss": 1.6407,
+ "step": 247
+ },
+ {
+ "epoch": 0.24806976962270638,
+ "grad_norm": 0.6102796792984009,
+ "learning_rate": 0.00019663989862467082,
+ "loss": 1.5251,
+ "step": 248
+ },
+ {
+ "epoch": 0.2490700509518302,
+ "grad_norm": 0.5973434448242188,
+ "learning_rate": 0.00019661290072377482,
+ "loss": 1.3969,
+ "step": 249
+ },
+ {
+ "epoch": 0.25007033228095404,
+ "grad_norm": 0.8515523076057434,
+ "learning_rate": 0.00019658579666277334,
+ "loss": 1.5687,
+ "step": 250
+ },
+ {
+ "epoch": 0.2510706136100778,
+ "grad_norm": 0.5003417134284973,
+ "learning_rate": 0.0001965585864714488,
+ "loss": 1.4102,
+ "step": 251
+ },
+ {
+ "epoch": 0.25207089493920165,
+ "grad_norm": 0.5215190052986145,
+ "learning_rate": 0.00019653127017970034,
+ "loss": 1.2471,
+ "step": 252
+ },
+ {
+ "epoch": 0.2530711762683255,
+ "grad_norm": 0.6491619348526001,
+ "learning_rate": 0.0001965038478175436,
+ "loss": 1.6969,
+ "step": 253
+ },
+ {
+ "epoch": 0.25407145759744926,
+ "grad_norm": 0.6176133155822754,
+ "learning_rate": 0.00019647631941511082,
+ "loss": 1.5351,
+ "step": 254
+ },
+ {
+ "epoch": 0.2550717389265731,
+ "grad_norm": 0.6913408041000366,
+ "learning_rate": 0.0001964486850026507,
+ "loss": 1.4309,
+ "step": 255
+ },
+ {
+ "epoch": 0.2560720202556969,
+ "grad_norm": 0.5875718593597412,
+ "learning_rate": 0.00019642094461052852,
+ "loss": 1.4679,
+ "step": 256
+ },
+ {
+ "epoch": 0.25707230158482075,
+ "grad_norm": 0.6682264804840088,
+ "learning_rate": 0.00019639309826922585,
+ "loss": 1.5393,
+ "step": 257
+ },
+ {
+ "epoch": 0.2580725829139445,
+ "grad_norm": 0.7241432666778564,
+ "learning_rate": 0.0001963651460093409,
+ "loss": 1.4998,
+ "step": 258
+ },
+ {
+ "epoch": 0.25907286424306836,
+ "grad_norm": 0.5210353136062622,
+ "learning_rate": 0.00019633708786158806,
+ "loss": 1.3837,
+ "step": 259
+ },
+ {
+ "epoch": 0.2600731455721922,
+ "grad_norm": 0.584020733833313,
+ "learning_rate": 0.00019630892385679818,
+ "loss": 1.4961,
+ "step": 260
+ },
+ {
+ "epoch": 0.261073426901316,
+ "grad_norm": 0.6708115935325623,
+ "learning_rate": 0.00019628065402591845,
+ "loss": 1.5277,
+ "step": 261
+ },
+ {
+ "epoch": 0.2620737082304398,
+ "grad_norm": 0.5480003952980042,
+ "learning_rate": 0.00019625227840001225,
+ "loss": 1.556,
+ "step": 262
+ },
+ {
+ "epoch": 0.2630739895595636,
+ "grad_norm": 0.595191478729248,
+ "learning_rate": 0.0001962237970102593,
+ "loss": 1.3514,
+ "step": 263
+ },
+ {
+ "epoch": 0.26407427088868746,
+ "grad_norm": 0.7332099080085754,
+ "learning_rate": 0.0001961952098879555,
+ "loss": 1.5394,
+ "step": 264
+ },
+ {
+ "epoch": 0.26507455221781123,
+ "grad_norm": 0.596319317817688,
+ "learning_rate": 0.00019616651706451287,
+ "loss": 1.3828,
+ "step": 265
+ },
+ {
+ "epoch": 0.26607483354693506,
+ "grad_norm": 0.5998026132583618,
+ "learning_rate": 0.0001961377185714597,
+ "loss": 1.4479,
+ "step": 266
+ },
+ {
+ "epoch": 0.2670751148760589,
+ "grad_norm": 0.6220220923423767,
+ "learning_rate": 0.0001961088144404403,
+ "loss": 1.5121,
+ "step": 267
+ },
+ {
+ "epoch": 0.2680753962051827,
+ "grad_norm": 0.5865943431854248,
+ "learning_rate": 0.00019607980470321505,
+ "loss": 1.6747,
+ "step": 268
+ },
+ {
+ "epoch": 0.2690756775343065,
+ "grad_norm": 0.5790852904319763,
+ "learning_rate": 0.00019605068939166045,
+ "loss": 1.3798,
+ "step": 269
+ },
+ {
+ "epoch": 0.27007595886343033,
+ "grad_norm": 0.6157498955726624,
+ "learning_rate": 0.00019602146853776894,
+ "loss": 1.6799,
+ "step": 270
+ },
+ {
+ "epoch": 0.27107624019255416,
+ "grad_norm": 0.6214422583580017,
+ "learning_rate": 0.000195992142173649,
+ "loss": 1.4782,
+ "step": 271
+ },
+ {
+ "epoch": 0.272076521521678,
+ "grad_norm": 0.6460129618644714,
+ "learning_rate": 0.0001959627103315249,
+ "loss": 1.4874,
+ "step": 272
+ },
+ {
+ "epoch": 0.27307680285080177,
+ "grad_norm": 0.5928930640220642,
+ "learning_rate": 0.00019593317304373705,
+ "loss": 1.4557,
+ "step": 273
+ },
+ {
+ "epoch": 0.2740770841799256,
+ "grad_norm": 0.5123687982559204,
+ "learning_rate": 0.00019590353034274144,
+ "loss": 1.445,
+ "step": 274
+ },
+ {
+ "epoch": 0.27507736550904943,
+ "grad_norm": 0.607455313205719,
+ "learning_rate": 0.00019587378226111014,
+ "loss": 1.4468,
+ "step": 275
+ },
+ {
+ "epoch": 0.27607764683817326,
+ "grad_norm": 0.6108120083808899,
+ "learning_rate": 0.00019584392883153088,
+ "loss": 1.3834,
+ "step": 276
+ },
+ {
+ "epoch": 0.27707792816729704,
+ "grad_norm": 0.680404543876648,
+ "learning_rate": 0.00019581397008680717,
+ "loss": 1.5094,
+ "step": 277
+ },
+ {
+ "epoch": 0.27807820949642087,
+ "grad_norm": 0.6419563889503479,
+ "learning_rate": 0.00019578390605985826,
+ "loss": 1.6933,
+ "step": 278
+ },
+ {
+ "epoch": 0.2790784908255447,
+ "grad_norm": 0.5788853764533997,
+ "learning_rate": 0.00019575373678371909,
+ "loss": 1.4754,
+ "step": 279
+ },
+ {
+ "epoch": 0.2800787721546685,
+ "grad_norm": 0.5943770408630371,
+ "learning_rate": 0.00019572346229154025,
+ "loss": 1.2949,
+ "step": 280
+ },
+ {
+ "epoch": 0.2810790534837923,
+ "grad_norm": 0.5997135043144226,
+ "learning_rate": 0.00019569308261658787,
+ "loss": 1.5365,
+ "step": 281
+ },
+ {
+ "epoch": 0.28207933481291614,
+ "grad_norm": 0.692401647567749,
+ "learning_rate": 0.00019566259779224378,
+ "loss": 1.4946,
+ "step": 282
+ },
+ {
+ "epoch": 0.28307961614203997,
+ "grad_norm": 0.5856708884239197,
+ "learning_rate": 0.00019563200785200526,
+ "loss": 1.426,
+ "step": 283
+ },
+ {
+ "epoch": 0.28407989747116374,
+ "grad_norm": 1.2516822814941406,
+ "learning_rate": 0.00019560131282948516,
+ "loss": 1.5119,
+ "step": 284
+ },
+ {
+ "epoch": 0.2850801788002876,
+ "grad_norm": 0.6360501050949097,
+ "learning_rate": 0.0001955705127584117,
+ "loss": 1.3916,
+ "step": 285
+ },
+ {
+ "epoch": 0.2860804601294114,
+ "grad_norm": 0.6822036504745483,
+ "learning_rate": 0.00019553960767262863,
+ "loss": 1.5565,
+ "step": 286
+ },
+ {
+ "epoch": 0.28708074145853524,
+ "grad_norm": 0.6973714828491211,
+ "learning_rate": 0.00019550859760609503,
+ "loss": 1.5559,
+ "step": 287
+ },
+ {
+ "epoch": 0.288081022787659,
+ "grad_norm": 0.6595618724822998,
+ "learning_rate": 0.00019547748259288536,
+ "loss": 1.5824,
+ "step": 288
+ },
+ {
+ "epoch": 0.28908130411678284,
+ "grad_norm": 0.5625808238983154,
+ "learning_rate": 0.0001954462626671894,
+ "loss": 1.2669,
+ "step": 289
+ },
+ {
+ "epoch": 0.2900815854459067,
+ "grad_norm": 0.6318663358688354,
+ "learning_rate": 0.0001954149378633122,
+ "loss": 1.3896,
+ "step": 290
+ },
+ {
+ "epoch": 0.29108186677503045,
+ "grad_norm": 0.6655906438827515,
+ "learning_rate": 0.00019538350821567404,
+ "loss": 1.3889,
+ "step": 291
+ },
+ {
+ "epoch": 0.2920821481041543,
+ "grad_norm": 0.5947337746620178,
+ "learning_rate": 0.00019535197375881045,
+ "loss": 1.6112,
+ "step": 292
+ },
+ {
+ "epoch": 0.2930824294332781,
+ "grad_norm": 0.6139295101165771,
+ "learning_rate": 0.00019532033452737205,
+ "loss": 1.5185,
+ "step": 293
+ },
+ {
+ "epoch": 0.29408271076240194,
+ "grad_norm": 0.579953670501709,
+ "learning_rate": 0.00019528859055612468,
+ "loss": 1.3874,
+ "step": 294
+ },
+ {
+ "epoch": 0.2950829920915257,
+ "grad_norm": 0.6101506352424622,
+ "learning_rate": 0.0001952567418799492,
+ "loss": 1.5965,
+ "step": 295
+ },
+ {
+ "epoch": 0.29608327342064955,
+ "grad_norm": 0.6393965482711792,
+ "learning_rate": 0.00019522478853384155,
+ "loss": 1.4124,
+ "step": 296
+ },
+ {
+ "epoch": 0.2970835547497734,
+ "grad_norm": 0.6147856712341309,
+ "learning_rate": 0.00019519273055291266,
+ "loss": 1.3776,
+ "step": 297
+ },
+ {
+ "epoch": 0.2980838360788972,
+ "grad_norm": 0.6056416630744934,
+ "learning_rate": 0.00019516056797238846,
+ "loss": 1.4453,
+ "step": 298
+ },
+ {
+ "epoch": 0.299084117408021,
+ "grad_norm": 0.6705831289291382,
+ "learning_rate": 0.00019512830082760987,
+ "loss": 1.3248,
+ "step": 299
+ },
+ {
+ "epoch": 0.3000843987371448,
+ "grad_norm": 0.6664314866065979,
+ "learning_rate": 0.00019509592915403255,
+ "loss": 1.5865,
+ "step": 300
+ },
+ {
+ "epoch": 0.30108468006626865,
+ "grad_norm": 0.5325604677200317,
+ "learning_rate": 0.00019506345298722717,
+ "loss": 1.0646,
+ "step": 301
+ },
+ {
+ "epoch": 0.3020849613953925,
+ "grad_norm": 0.589242160320282,
+ "learning_rate": 0.00019503087236287913,
+ "loss": 1.2297,
+ "step": 302
+ },
+ {
+ "epoch": 0.30308524272451626,
+ "grad_norm": 0.5677699446678162,
+ "learning_rate": 0.00019499818731678873,
+ "loss": 1.3961,
+ "step": 303
+ },
+ {
+ "epoch": 0.3040855240536401,
+ "grad_norm": 0.5676394701004028,
+ "learning_rate": 0.00019496539788487082,
+ "loss": 1.3276,
+ "step": 304
+ },
+ {
+ "epoch": 0.3050858053827639,
+ "grad_norm": 0.7280861139297485,
+ "learning_rate": 0.0001949325041031551,
+ "loss": 1.6731,
+ "step": 305
+ },
+ {
+ "epoch": 0.3060860867118877,
+ "grad_norm": 0.690636396408081,
+ "learning_rate": 0.0001948995060077859,
+ "loss": 1.5443,
+ "step": 306
+ },
+ {
+ "epoch": 0.3070863680410115,
+ "grad_norm": 0.611426055431366,
+ "learning_rate": 0.0001948664036350221,
+ "loss": 1.5827,
+ "step": 307
+ },
+ {
+ "epoch": 0.30808664937013536,
+ "grad_norm": 0.7112497091293335,
+ "learning_rate": 0.00019483319702123732,
+ "loss": 1.5401,
+ "step": 308
+ },
+ {
+ "epoch": 0.3090869306992592,
+ "grad_norm": 0.6598275303840637,
+ "learning_rate": 0.00019479988620291956,
+ "loss": 1.6432,
+ "step": 309
+ },
+ {
+ "epoch": 0.31008721202838296,
+ "grad_norm": 0.5019932985305786,
+ "learning_rate": 0.00019476647121667137,
+ "loss": 1.2561,
+ "step": 310
+ },
+ {
+ "epoch": 0.3110874933575068,
+ "grad_norm": 0.7777897715568542,
+ "learning_rate": 0.00019473295209920983,
+ "loss": 1.6118,
+ "step": 311
+ },
+ {
+ "epoch": 0.3120877746866306,
+ "grad_norm": 0.6028640866279602,
+ "learning_rate": 0.00019469932888736632,
+ "loss": 1.4682,
+ "step": 312
+ },
+ {
+ "epoch": 0.31308805601575446,
+ "grad_norm": 0.554381251335144,
+ "learning_rate": 0.00019466560161808674,
+ "loss": 1.4179,
+ "step": 313
+ },
+ {
+ "epoch": 0.31408833734487823,
+ "grad_norm": 0.6212736368179321,
+ "learning_rate": 0.00019463177032843124,
+ "loss": 1.4327,
+ "step": 314
+ },
+ {
+ "epoch": 0.31508861867400206,
+ "grad_norm": 0.6829814910888672,
+ "learning_rate": 0.00019459783505557424,
+ "loss": 1.4455,
+ "step": 315
+ },
+ {
+ "epoch": 0.3160889000031259,
+ "grad_norm": 0.5808065533638,
+ "learning_rate": 0.00019456379583680452,
+ "loss": 1.3583,
+ "step": 316
+ },
+ {
+ "epoch": 0.31708918133224967,
+ "grad_norm": 0.6354159712791443,
+ "learning_rate": 0.000194529652709525,
+ "loss": 1.6916,
+ "step": 317
+ },
+ {
+ "epoch": 0.3180894626613735,
+ "grad_norm": 0.6299159526824951,
+ "learning_rate": 0.00019449540571125286,
+ "loss": 1.47,
+ "step": 318
+ },
+ {
+ "epoch": 0.31908974399049733,
+ "grad_norm": 0.6222877502441406,
+ "learning_rate": 0.00019446105487961926,
+ "loss": 1.4137,
+ "step": 319
+ },
+ {
+ "epoch": 0.32009002531962116,
+ "grad_norm": 0.5995916724205017,
+ "learning_rate": 0.0001944266002523696,
+ "loss": 1.3679,
+ "step": 320
+ },
+ {
+ "epoch": 0.32109030664874494,
+ "grad_norm": 0.599814236164093,
+ "learning_rate": 0.0001943920418673633,
+ "loss": 1.4075,
+ "step": 321
+ },
+ {
+ "epoch": 0.32209058797786877,
+ "grad_norm": 0.5409269332885742,
+ "learning_rate": 0.00019435737976257377,
+ "loss": 1.4289,
+ "step": 322
+ },
+ {
+ "epoch": 0.3230908693069926,
+ "grad_norm": 0.5298951864242554,
+ "learning_rate": 0.00019432261397608834,
+ "loss": 1.2834,
+ "step": 323
+ },
+ {
+ "epoch": 0.32409115063611643,
+ "grad_norm": 0.7196112871170044,
+ "learning_rate": 0.00019428774454610843,
+ "loss": 1.4845,
+ "step": 324
+ },
+ {
+ "epoch": 0.3250914319652402,
+ "grad_norm": 0.5605450868606567,
+ "learning_rate": 0.00019425277151094913,
+ "loss": 1.4575,
+ "step": 325
+ },
+ {
+ "epoch": 0.32609171329436404,
+ "grad_norm": 0.573080837726593,
+ "learning_rate": 0.00019421769490903957,
+ "loss": 1.5757,
+ "step": 326
+ },
+ {
+ "epoch": 0.32709199462348787,
+ "grad_norm": 0.5017902851104736,
+ "learning_rate": 0.0001941825147789225,
+ "loss": 1.5794,
+ "step": 327
+ },
+ {
+ "epoch": 0.32809227595261165,
+ "grad_norm": 0.643267810344696,
+ "learning_rate": 0.00019414723115925456,
+ "loss": 1.4903,
+ "step": 328
+ },
+ {
+ "epoch": 0.3290925572817355,
+ "grad_norm": 0.6522070169448853,
+ "learning_rate": 0.0001941118440888061,
+ "loss": 1.5907,
+ "step": 329
+ },
+ {
+ "epoch": 0.3300928386108593,
+ "grad_norm": 0.6496105790138245,
+ "learning_rate": 0.0001940763536064611,
+ "loss": 1.4225,
+ "step": 330
+ },
+ {
+ "epoch": 0.33109311993998314,
+ "grad_norm": 0.6011468768119812,
+ "learning_rate": 0.00019404075975121716,
+ "loss": 1.5022,
+ "step": 331
+ },
+ {
+ "epoch": 0.3320934012691069,
+ "grad_norm": 0.6327878832817078,
+ "learning_rate": 0.0001940050625621855,
+ "loss": 1.468,
+ "step": 332
+ },
+ {
+ "epoch": 0.33309368259823074,
+ "grad_norm": 0.6187490820884705,
+ "learning_rate": 0.00019396926207859084,
+ "loss": 1.5183,
+ "step": 333
+ },
+ {
+ "epoch": 0.3340939639273546,
+ "grad_norm": 0.7625093460083008,
+ "learning_rate": 0.0001939333583397715,
+ "loss": 1.4813,
+ "step": 334
+ },
+ {
+ "epoch": 0.3350942452564784,
+ "grad_norm": 0.5286359190940857,
+ "learning_rate": 0.00019389735138517915,
+ "loss": 1.3674,
+ "step": 335
+ },
+ {
+ "epoch": 0.3360945265856022,
+ "grad_norm": 0.5798503160476685,
+ "learning_rate": 0.00019386124125437895,
+ "loss": 1.3016,
+ "step": 336
+ },
+ {
+ "epoch": 0.337094807914726,
+ "grad_norm": 0.48794126510620117,
+ "learning_rate": 0.00019382502798704935,
+ "loss": 1.3642,
+ "step": 337
+ },
+ {
+ "epoch": 0.33809508924384984,
+ "grad_norm": 0.7394312620162964,
+ "learning_rate": 0.00019378871162298227,
+ "loss": 1.327,
+ "step": 338
+ },
+ {
+ "epoch": 0.3390953705729737,
+ "grad_norm": 0.5598319172859192,
+ "learning_rate": 0.00019375229220208276,
+ "loss": 1.4247,
+ "step": 339
+ },
+ {
+ "epoch": 0.34009565190209745,
+ "grad_norm": 0.6099628806114197,
+ "learning_rate": 0.00019371576976436917,
+ "loss": 1.4906,
+ "step": 340
+ },
+ {
+ "epoch": 0.3410959332312213,
+ "grad_norm": 0.6749781370162964,
+ "learning_rate": 0.00019367914434997312,
+ "loss": 1.367,
+ "step": 341
+ },
+ {
+ "epoch": 0.3420962145603451,
+ "grad_norm": 0.7721238136291504,
+ "learning_rate": 0.00019364241599913924,
+ "loss": 1.4464,
+ "step": 342
+ },
+ {
+ "epoch": 0.3430964958894689,
+ "grad_norm": 0.5762369632720947,
+ "learning_rate": 0.0001936055847522254,
+ "loss": 1.409,
+ "step": 343
+ },
+ {
+ "epoch": 0.3440967772185927,
+ "grad_norm": 0.6960498690605164,
+ "learning_rate": 0.00019356865064970244,
+ "loss": 1.3907,
+ "step": 344
+ },
+ {
+ "epoch": 0.34509705854771655,
+ "grad_norm": 0.5805984735488892,
+ "learning_rate": 0.0001935316137321543,
+ "loss": 1.4539,
+ "step": 345
+ },
+ {
+ "epoch": 0.3460973398768404,
+ "grad_norm": 0.5686045289039612,
+ "learning_rate": 0.00019349447404027782,
+ "loss": 1.4493,
+ "step": 346
+ },
+ {
+ "epoch": 0.34709762120596416,
+ "grad_norm": 0.5448501706123352,
+ "learning_rate": 0.00019345723161488283,
+ "loss": 1.5633,
+ "step": 347
+ },
+ {
+ "epoch": 0.348097902535088,
+ "grad_norm": 0.6388784050941467,
+ "learning_rate": 0.000193419886496892,
+ "loss": 1.7179,
+ "step": 348
+ },
+ {
+ "epoch": 0.3490981838642118,
+ "grad_norm": 0.5240457653999329,
+ "learning_rate": 0.00019338243872734086,
+ "loss": 1.4411,
+ "step": 349
+ },
+ {
+ "epoch": 0.35009846519333565,
+ "grad_norm": 0.5460641384124756,
+ "learning_rate": 0.00019334488834737775,
+ "loss": 1.361,
+ "step": 350
+ },
+ {
+ "epoch": 0.3510987465224594,
+ "grad_norm": 0.5495695471763611,
+ "learning_rate": 0.00019330723539826375,
+ "loss": 1.5891,
+ "step": 351
+ },
+ {
+ "epoch": 0.35209902785158326,
+ "grad_norm": 0.5618153214454651,
+ "learning_rate": 0.00019326947992137262,
+ "loss": 1.3084,
+ "step": 352
+ },
+ {
+ "epoch": 0.3530993091807071,
+ "grad_norm": 0.5603707432746887,
+ "learning_rate": 0.00019323162195819082,
+ "loss": 1.5732,
+ "step": 353
+ },
+ {
+ "epoch": 0.35409959050983086,
+ "grad_norm": 0.5732563138008118,
+ "learning_rate": 0.0001931936615503174,
+ "loss": 1.5045,
+ "step": 354
+ },
+ {
+ "epoch": 0.3550998718389547,
+ "grad_norm": 0.5997583866119385,
+ "learning_rate": 0.000193155598739464,
+ "loss": 1.4175,
+ "step": 355
+ },
+ {
+ "epoch": 0.3561001531680785,
+ "grad_norm": 0.5769765377044678,
+ "learning_rate": 0.0001931174335674547,
+ "loss": 1.4834,
+ "step": 356
+ },
+ {
+ "epoch": 0.35710043449720236,
+ "grad_norm": 0.5902683138847351,
+ "learning_rate": 0.0001930791660762262,
+ "loss": 1.4664,
+ "step": 357
+ },
+ {
+ "epoch": 0.35810071582632613,
+ "grad_norm": 0.6354758143424988,
+ "learning_rate": 0.00019304079630782752,
+ "loss": 1.3891,
+ "step": 358
+ },
+ {
+ "epoch": 0.35910099715544996,
+ "grad_norm": 0.6018317341804504,
+ "learning_rate": 0.0001930023243044201,
+ "loss": 1.4514,
+ "step": 359
+ },
+ {
+ "epoch": 0.3601012784845738,
+ "grad_norm": 0.5409123301506042,
+ "learning_rate": 0.00019296375010827773,
+ "loss": 1.4708,
+ "step": 360
+ },
+ {
+ "epoch": 0.3611015598136976,
+ "grad_norm": 0.5457523465156555,
+ "learning_rate": 0.00019292507376178643,
+ "loss": 1.4988,
+ "step": 361
+ },
+ {
+ "epoch": 0.3621018411428214,
+ "grad_norm": 0.626768946647644,
+ "learning_rate": 0.00019288629530744454,
+ "loss": 1.5722,
+ "step": 362
+ },
+ {
+ "epoch": 0.36310212247194523,
+ "grad_norm": 0.566554069519043,
+ "learning_rate": 0.0001928474147878626,
+ "loss": 1.2135,
+ "step": 363
+ },
+ {
+ "epoch": 0.36410240380106906,
+ "grad_norm": 0.7327786684036255,
+ "learning_rate": 0.0001928084322457632,
+ "loss": 1.5245,
+ "step": 364
+ },
+ {
+ "epoch": 0.3651026851301929,
+ "grad_norm": 0.5205698609352112,
+ "learning_rate": 0.00019276934772398114,
+ "loss": 1.2068,
+ "step": 365
+ },
+ {
+ "epoch": 0.36610296645931667,
+ "grad_norm": 1.0956753492355347,
+ "learning_rate": 0.00019273016126546323,
+ "loss": 1.5044,
+ "step": 366
+ },
+ {
+ "epoch": 0.3671032477884405,
+ "grad_norm": 0.6484043598175049,
+ "learning_rate": 0.00019269087291326833,
+ "loss": 1.6369,
+ "step": 367
+ },
+ {
+ "epoch": 0.36810352911756433,
+ "grad_norm": 0.6363429427146912,
+ "learning_rate": 0.00019265148271056722,
+ "loss": 1.4338,
+ "step": 368
+ },
+ {
+ "epoch": 0.3691038104466881,
+ "grad_norm": 0.6295244693756104,
+ "learning_rate": 0.0001926119907006426,
+ "loss": 1.4701,
+ "step": 369
+ },
+ {
+ "epoch": 0.37010409177581194,
+ "grad_norm": 0.6013259887695312,
+ "learning_rate": 0.00019257239692688907,
+ "loss": 1.7629,
+ "step": 370
+ },
+ {
+ "epoch": 0.37110437310493577,
+ "grad_norm": 0.6949493885040283,
+ "learning_rate": 0.00019253270143281296,
+ "loss": 1.6713,
+ "step": 371
+ },
+ {
+ "epoch": 0.3721046544340596,
+ "grad_norm": 0.6933801174163818,
+ "learning_rate": 0.00019249290426203252,
+ "loss": 1.6131,
+ "step": 372
+ },
+ {
+ "epoch": 0.3731049357631834,
+ "grad_norm": 0.5847527384757996,
+ "learning_rate": 0.0001924530054582776,
+ "loss": 1.3968,
+ "step": 373
+ },
+ {
+ "epoch": 0.3741052170923072,
+ "grad_norm": 0.6053057312965393,
+ "learning_rate": 0.0001924130050653898,
+ "loss": 1.3311,
+ "step": 374
+ },
+ {
+ "epoch": 0.37510549842143104,
+ "grad_norm": 0.5513793230056763,
+ "learning_rate": 0.00019237290312732226,
+ "loss": 1.5063,
+ "step": 375
+ },
+ {
+ "epoch": 0.37610577975055487,
+ "grad_norm": 0.5859197378158569,
+ "learning_rate": 0.00019233269968813984,
+ "loss": 1.3556,
+ "step": 376
+ },
+ {
+ "epoch": 0.37710606107967864,
+ "grad_norm": 0.5623495578765869,
+ "learning_rate": 0.00019229239479201876,
+ "loss": 1.3859,
+ "step": 377
+ },
+ {
+ "epoch": 0.3781063424088025,
+ "grad_norm": 0.602118968963623,
+ "learning_rate": 0.0001922519884832469,
+ "loss": 1.334,
+ "step": 378
+ },
+ {
+ "epoch": 0.3791066237379263,
+ "grad_norm": 0.5212380886077881,
+ "learning_rate": 0.0001922114808062234,
+ "loss": 1.401,
+ "step": 379
+ },
+ {
+ "epoch": 0.3801069050670501,
+ "grad_norm": 0.4969455599784851,
+ "learning_rate": 0.00019217087180545893,
+ "loss": 1.2292,
+ "step": 380
+ },
+ {
+ "epoch": 0.3811071863961739,
+ "grad_norm": 0.578629732131958,
+ "learning_rate": 0.0001921301615255754,
+ "loss": 1.5015,
+ "step": 381
+ },
+ {
+ "epoch": 0.38210746772529774,
+ "grad_norm": 0.593053936958313,
+ "learning_rate": 0.0001920893500113061,
+ "loss": 1.302,
+ "step": 382
+ },
+ {
+ "epoch": 0.3831077490544216,
+ "grad_norm": 0.5832563638687134,
+ "learning_rate": 0.00019204843730749547,
+ "loss": 1.3695,
+ "step": 383
+ },
+ {
+ "epoch": 0.38410803038354535,
+ "grad_norm": 0.5608510375022888,
+ "learning_rate": 0.00019200742345909915,
+ "loss": 1.3792,
+ "step": 384
+ },
+ {
+ "epoch": 0.3851083117126692,
+ "grad_norm": 0.5337334275245667,
+ "learning_rate": 0.00019196630851118398,
+ "loss": 1.4163,
+ "step": 385
+ },
+ {
+ "epoch": 0.386108593041793,
+ "grad_norm": 0.5460125803947449,
+ "learning_rate": 0.0001919250925089278,
+ "loss": 1.2439,
+ "step": 386
+ },
+ {
+ "epoch": 0.38710887437091684,
+ "grad_norm": 0.6217851638793945,
+ "learning_rate": 0.00019188377549761963,
+ "loss": 1.6428,
+ "step": 387
+ },
+ {
+ "epoch": 0.3881091557000406,
+ "grad_norm": 0.7154502868652344,
+ "learning_rate": 0.00019184235752265928,
+ "loss": 1.3468,
+ "step": 388
+ },
+ {
+ "epoch": 0.38910943702916445,
+ "grad_norm": 0.5044635534286499,
+ "learning_rate": 0.00019180083862955772,
+ "loss": 1.1877,
+ "step": 389
+ },
+ {
+ "epoch": 0.3901097183582883,
+ "grad_norm": 0.5755971074104309,
+ "learning_rate": 0.00019175921886393666,
+ "loss": 1.3475,
+ "step": 390
+ },
+ {
+ "epoch": 0.39110999968741206,
+ "grad_norm": 0.6121137738227844,
+ "learning_rate": 0.00019171749827152869,
+ "loss": 1.4342,
+ "step": 391
+ },
+ {
+ "epoch": 0.3921102810165359,
+ "grad_norm": 0.5615536570549011,
+ "learning_rate": 0.0001916756768981772,
+ "loss": 1.5471,
+ "step": 392
+ },
+ {
+ "epoch": 0.3931105623456597,
+ "grad_norm": 0.6527026295661926,
+ "learning_rate": 0.00019163375478983632,
+ "loss": 1.6363,
+ "step": 393
+ },
+ {
+ "epoch": 0.39411084367478355,
+ "grad_norm": 0.6465044617652893,
+ "learning_rate": 0.00019159173199257085,
+ "loss": 1.3823,
+ "step": 394
+ },
+ {
+ "epoch": 0.3951111250039073,
+ "grad_norm": 0.5620000958442688,
+ "learning_rate": 0.00019154960855255628,
+ "loss": 1.5418,
+ "step": 395
+ },
+ {
+ "epoch": 0.39611140633303116,
+ "grad_norm": 0.7090588808059692,
+ "learning_rate": 0.0001915073845160786,
+ "loss": 1.4593,
+ "step": 396
+ },
+ {
+ "epoch": 0.397111687662155,
+ "grad_norm": 0.6644489169120789,
+ "learning_rate": 0.00019146505992953446,
+ "loss": 1.4236,
+ "step": 397
+ },
+ {
+ "epoch": 0.3981119689912788,
+ "grad_norm": 0.6038135886192322,
+ "learning_rate": 0.00019142263483943085,
+ "loss": 1.1805,
+ "step": 398
+ },
+ {
+ "epoch": 0.3991122503204026,
+ "grad_norm": 0.6746726036071777,
+ "learning_rate": 0.00019138010929238534,
+ "loss": 1.5264,
+ "step": 399
+ },
+ {
+ "epoch": 0.4001125316495264,
+ "grad_norm": 0.5871374607086182,
+ "learning_rate": 0.00019133748333512575,
+ "loss": 1.3709,
+ "step": 400
+ },
+ {
+ "epoch": 0.40111281297865026,
+ "grad_norm": 0.5743412375450134,
+ "learning_rate": 0.00019129475701449035,
+ "loss": 1.4677,
+ "step": 401
+ },
+ {
+ "epoch": 0.4021130943077741,
+ "grad_norm": 0.6184396743774414,
+ "learning_rate": 0.0001912519303774276,
+ "loss": 1.4228,
+ "step": 402
+ },
+ {
+ "epoch": 0.40311337563689786,
+ "grad_norm": 0.5872434973716736,
+ "learning_rate": 0.0001912090034709963,
+ "loss": 1.3495,
+ "step": 403
+ },
+ {
+ "epoch": 0.4041136569660217,
+ "grad_norm": 0.6500155925750732,
+ "learning_rate": 0.00019116597634236525,
+ "loss": 1.4315,
+ "step": 404
+ },
+ {
+ "epoch": 0.4051139382951455,
+ "grad_norm": 0.5240740180015564,
+ "learning_rate": 0.0001911228490388136,
+ "loss": 1.4954,
+ "step": 405
+ },
+ {
+ "epoch": 0.4061142196242693,
+ "grad_norm": 0.5531806945800781,
+ "learning_rate": 0.00019107962160773035,
+ "loss": 1.3949,
+ "step": 406
+ },
+ {
+ "epoch": 0.40711450095339313,
+ "grad_norm": 0.5266262888908386,
+ "learning_rate": 0.0001910362940966147,
+ "loss": 1.2859,
+ "step": 407
+ },
+ {
+ "epoch": 0.40811478228251696,
+ "grad_norm": 0.5734869241714478,
+ "learning_rate": 0.00019099286655307568,
+ "loss": 1.2451,
+ "step": 408
+ },
+ {
+ "epoch": 0.4091150636116408,
+ "grad_norm": 0.5922874212265015,
+ "learning_rate": 0.0001909493390248324,
+ "loss": 1.5429,
+ "step": 409
+ },
+ {
+ "epoch": 0.41011534494076457,
+ "grad_norm": 0.542540431022644,
+ "learning_rate": 0.00019090571155971366,
+ "loss": 1.4138,
+ "step": 410
+ },
+ {
+ "epoch": 0.4111156262698884,
+ "grad_norm": 0.57356196641922,
+ "learning_rate": 0.00019086198420565823,
+ "loss": 1.2592,
+ "step": 411
+ },
+ {
+ "epoch": 0.41211590759901223,
+ "grad_norm": 0.6042733192443848,
+ "learning_rate": 0.00019081815701071445,
+ "loss": 1.5524,
+ "step": 412
+ },
+ {
+ "epoch": 0.41311618892813606,
+ "grad_norm": 0.46550241112709045,
+ "learning_rate": 0.0001907742300230406,
+ "loss": 1.308,
+ "step": 413
+ },
+ {
+ "epoch": 0.41411647025725984,
+ "grad_norm": 0.6283137798309326,
+ "learning_rate": 0.00019073020329090444,
+ "loss": 1.4753,
+ "step": 414
+ },
+ {
+ "epoch": 0.41511675158638367,
+ "grad_norm": 0.5254876613616943,
+ "learning_rate": 0.0001906860768626834,
+ "loss": 1.2157,
+ "step": 415
+ },
+ {
+ "epoch": 0.4161170329155075,
+ "grad_norm": 0.59089195728302,
+ "learning_rate": 0.00019064185078686443,
+ "loss": 1.2684,
+ "step": 416
+ },
+ {
+ "epoch": 0.4171173142446313,
+ "grad_norm": 0.7129126787185669,
+ "learning_rate": 0.000190597525112044,
+ "loss": 1.3974,
+ "step": 417
+ },
+ {
+ "epoch": 0.4181175955737551,
+ "grad_norm": 0.607305109500885,
+ "learning_rate": 0.000190553099886928,
+ "loss": 1.4312,
+ "step": 418
+ },
+ {
+ "epoch": 0.41911787690287894,
+ "grad_norm": 0.49921515583992004,
+ "learning_rate": 0.00019050857516033173,
+ "loss": 1.3469,
+ "step": 419
+ },
+ {
+ "epoch": 0.42011815823200277,
+ "grad_norm": 0.6167325377464294,
+ "learning_rate": 0.00019046395098117983,
+ "loss": 1.4723,
+ "step": 420
+ },
+ {
+ "epoch": 0.42111843956112655,
+ "grad_norm": 0.6144593358039856,
+ "learning_rate": 0.00019041922739850616,
+ "loss": 1.5502,
+ "step": 421
+ },
+ {
+ "epoch": 0.4221187208902504,
+ "grad_norm": 0.61333167552948,
+ "learning_rate": 0.00019037440446145385,
+ "loss": 1.3283,
+ "step": 422
+ },
+ {
+ "epoch": 0.4231190022193742,
+ "grad_norm": 0.5881702303886414,
+ "learning_rate": 0.00019032948221927524,
+ "loss": 1.4206,
+ "step": 423
+ },
+ {
+ "epoch": 0.42411928354849804,
+ "grad_norm": 0.5334322452545166,
+ "learning_rate": 0.00019028446072133175,
+ "loss": 1.4603,
+ "step": 424
+ },
+ {
+ "epoch": 0.4251195648776218,
+ "grad_norm": 0.5730605721473694,
+ "learning_rate": 0.00019023934001709383,
+ "loss": 1.4375,
+ "step": 425
+ },
+ {
+ "epoch": 0.42611984620674564,
+ "grad_norm": 0.6227820515632629,
+ "learning_rate": 0.00019019412015614098,
+ "loss": 1.4888,
+ "step": 426
+ },
+ {
+ "epoch": 0.4271201275358695,
+ "grad_norm": 0.5811313390731812,
+ "learning_rate": 0.00019014880118816164,
+ "loss": 1.3492,
+ "step": 427
+ },
+ {
+ "epoch": 0.4281204088649933,
+ "grad_norm": 0.5685800313949585,
+ "learning_rate": 0.0001901033831629532,
+ "loss": 1.5052,
+ "step": 428
+ },
+ {
+ "epoch": 0.4291206901941171,
+ "grad_norm": 0.5961394309997559,
+ "learning_rate": 0.00019005786613042185,
+ "loss": 1.3324,
+ "step": 429
+ },
+ {
+ "epoch": 0.4301209715232409,
+ "grad_norm": 0.5845314860343933,
+ "learning_rate": 0.00019001225014058255,
+ "loss": 1.5733,
+ "step": 430
+ },
+ {
+ "epoch": 0.43112125285236474,
+ "grad_norm": 0.5400176048278809,
+ "learning_rate": 0.00018996653524355902,
+ "loss": 1.3973,
+ "step": 431
+ },
+ {
+ "epoch": 0.4321215341814885,
+ "grad_norm": 0.5462201833724976,
+ "learning_rate": 0.00018992072148958368,
+ "loss": 1.2167,
+ "step": 432
+ },
+ {
+ "epoch": 0.43312181551061235,
+ "grad_norm": 0.6200360059738159,
+ "learning_rate": 0.00018987480892899758,
+ "loss": 1.5596,
+ "step": 433
+ },
+ {
+ "epoch": 0.4341220968397362,
+ "grad_norm": 0.5230718851089478,
+ "learning_rate": 0.00018982879761225027,
+ "loss": 1.3661,
+ "step": 434
+ },
+ {
+ "epoch": 0.43512237816886,
+ "grad_norm": 0.5868643522262573,
+ "learning_rate": 0.00018978268758989991,
+ "loss": 1.4792,
+ "step": 435
+ },
+ {
+ "epoch": 0.4361226594979838,
+ "grad_norm": 0.580892026424408,
+ "learning_rate": 0.00018973647891261307,
+ "loss": 1.3275,
+ "step": 436
+ },
+ {
+ "epoch": 0.4371229408271076,
+ "grad_norm": 0.5903263688087463,
+ "learning_rate": 0.00018969017163116472,
+ "loss": 1.4721,
+ "step": 437
+ },
+ {
+ "epoch": 0.43812322215623145,
+ "grad_norm": 0.5108968019485474,
+ "learning_rate": 0.0001896437657964382,
+ "loss": 1.3785,
+ "step": 438
+ },
+ {
+ "epoch": 0.4391235034853553,
+ "grad_norm": 0.6707500219345093,
+ "learning_rate": 0.00018959726145942508,
+ "loss": 1.5033,
+ "step": 439
+ },
+ {
+ "epoch": 0.44012378481447906,
+ "grad_norm": 0.5793184638023376,
+ "learning_rate": 0.00018955065867122528,
+ "loss": 1.3629,
+ "step": 440
+ },
+ {
+ "epoch": 0.4411240661436029,
+ "grad_norm": 0.5549041628837585,
+ "learning_rate": 0.00018950395748304678,
+ "loss": 1.5557,
+ "step": 441
+ },
+ {
+ "epoch": 0.4421243474727267,
+ "grad_norm": 0.5406919121742249,
+ "learning_rate": 0.0001894571579462058,
+ "loss": 1.4441,
+ "step": 442
+ },
+ {
+ "epoch": 0.4431246288018505,
+ "grad_norm": 0.5131089091300964,
+ "learning_rate": 0.00018941026011212654,
+ "loss": 1.3051,
+ "step": 443
+ },
+ {
+ "epoch": 0.4441249101309743,
+ "grad_norm": 0.601586639881134,
+ "learning_rate": 0.00018936326403234125,
+ "loss": 1.5297,
+ "step": 444
+ },
+ {
+ "epoch": 0.44512519146009816,
+ "grad_norm": 0.5036457180976868,
+ "learning_rate": 0.00018931616975849006,
+ "loss": 1.357,
+ "step": 445
+ },
+ {
+ "epoch": 0.446125472789222,
+ "grad_norm": 0.5471266508102417,
+ "learning_rate": 0.00018926897734232115,
+ "loss": 1.2176,
+ "step": 446
+ },
+ {
+ "epoch": 0.44712575411834576,
+ "grad_norm": 0.6057867407798767,
+ "learning_rate": 0.0001892216868356904,
+ "loss": 1.4763,
+ "step": 447
+ },
+ {
+ "epoch": 0.4481260354474696,
+ "grad_norm": 0.5384593605995178,
+ "learning_rate": 0.0001891742982905615,
+ "loss": 1.513,
+ "step": 448
+ },
+ {
+ "epoch": 0.4491263167765934,
+ "grad_norm": 0.6144880056381226,
+ "learning_rate": 0.00018912681175900598,
+ "loss": 1.5782,
+ "step": 449
+ },
+ {
+ "epoch": 0.45012659810571726,
+ "grad_norm": 0.4838174879550934,
+ "learning_rate": 0.00018907922729320285,
+ "loss": 1.4085,
+ "step": 450
+ },
+ {
+ "epoch": 0.45112687943484103,
+ "grad_norm": 0.6852928400039673,
+ "learning_rate": 0.00018903154494543889,
+ "loss": 1.5989,
+ "step": 451
+ },
+ {
+ "epoch": 0.45212716076396486,
+ "grad_norm": 0.47527411580085754,
+ "learning_rate": 0.00018898376476810834,
+ "loss": 1.3409,
+ "step": 452
+ },
+ {
+ "epoch": 0.4531274420930887,
+ "grad_norm": 0.5665884613990784,
+ "learning_rate": 0.00018893588681371303,
+ "loss": 1.5395,
+ "step": 453
+ },
+ {
+ "epoch": 0.45412772342221247,
+ "grad_norm": 0.5792158246040344,
+ "learning_rate": 0.00018888791113486213,
+ "loss": 1.516,
+ "step": 454
+ },
+ {
+ "epoch": 0.4551280047513363,
+ "grad_norm": 0.5223523378372192,
+ "learning_rate": 0.00018883983778427227,
+ "loss": 1.3678,
+ "step": 455
+ },
+ {
+ "epoch": 0.45612828608046013,
+ "grad_norm": 0.5927590131759644,
+ "learning_rate": 0.0001887916668147673,
+ "loss": 1.3617,
+ "step": 456
+ },
+ {
+ "epoch": 0.45712856740958396,
+ "grad_norm": 0.7266496419906616,
+ "learning_rate": 0.00018874339827927846,
+ "loss": 1.3734,
+ "step": 457
+ },
+ {
+ "epoch": 0.45812884873870774,
+ "grad_norm": 0.6495805978775024,
+ "learning_rate": 0.00018869503223084414,
+ "loss": 1.5282,
+ "step": 458
+ },
+ {
+ "epoch": 0.45912913006783157,
+ "grad_norm": 0.6099816560745239,
+ "learning_rate": 0.00018864656872260985,
+ "loss": 1.4691,
+ "step": 459
+ },
+ {
+ "epoch": 0.4601294113969554,
+ "grad_norm": 0.5208227038383484,
+ "learning_rate": 0.00018859800780782828,
+ "loss": 1.3949,
+ "step": 460
+ },
+ {
+ "epoch": 0.46112969272607923,
+ "grad_norm": 0.5526600480079651,
+ "learning_rate": 0.000188549349539859,
+ "loss": 1.3557,
+ "step": 461
+ },
+ {
+ "epoch": 0.462129974055203,
+ "grad_norm": 0.5537740588188171,
+ "learning_rate": 0.00018850059397216876,
+ "loss": 1.4703,
+ "step": 462
+ },
+ {
+ "epoch": 0.46313025538432684,
+ "grad_norm": 0.5553976893424988,
+ "learning_rate": 0.00018845174115833099,
+ "loss": 1.4356,
+ "step": 463
+ },
+ {
+ "epoch": 0.46413053671345067,
+ "grad_norm": 0.6027779579162598,
+ "learning_rate": 0.0001884027911520262,
+ "loss": 1.4763,
+ "step": 464
+ },
+ {
+ "epoch": 0.4651308180425745,
+ "grad_norm": 0.5559154748916626,
+ "learning_rate": 0.00018835374400704154,
+ "loss": 1.4148,
+ "step": 465
+ },
+ {
+ "epoch": 0.4661310993716983,
+ "grad_norm": 0.6124109029769897,
+ "learning_rate": 0.00018830459977727096,
+ "loss": 1.4468,
+ "step": 466
+ },
+ {
+ "epoch": 0.4671313807008221,
+ "grad_norm": 0.4762580692768097,
+ "learning_rate": 0.0001882553585167151,
+ "loss": 1.3714,
+ "step": 467
+ },
+ {
+ "epoch": 0.46813166202994594,
+ "grad_norm": 0.5793487429618835,
+ "learning_rate": 0.00018820602027948114,
+ "loss": 1.4828,
+ "step": 468
+ },
+ {
+ "epoch": 0.4691319433590697,
+ "grad_norm": 0.55177241563797,
+ "learning_rate": 0.00018815658511978298,
+ "loss": 1.4157,
+ "step": 469
+ },
+ {
+ "epoch": 0.47013222468819355,
+ "grad_norm": 0.5065292716026306,
+ "learning_rate": 0.00018810705309194083,
+ "loss": 1.4519,
+ "step": 470
+ },
+ {
+ "epoch": 0.4711325060173174,
+ "grad_norm": 0.5401413440704346,
+ "learning_rate": 0.00018805742425038145,
+ "loss": 1.4344,
+ "step": 471
+ },
+ {
+ "epoch": 0.4721327873464412,
+ "grad_norm": 0.7173880338668823,
+ "learning_rate": 0.00018800769864963802,
+ "loss": 1.7325,
+ "step": 472
+ },
+ {
+ "epoch": 0.473133068675565,
+ "grad_norm": 0.507682204246521,
+ "learning_rate": 0.00018795787634434994,
+ "loss": 1.37,
+ "step": 473
+ },
+ {
+ "epoch": 0.4741333500046888,
+ "grad_norm": 0.551888644695282,
+ "learning_rate": 0.0001879079573892629,
+ "loss": 1.3695,
+ "step": 474
+ },
+ {
+ "epoch": 0.47513363133381264,
+ "grad_norm": 0.5109260082244873,
+ "learning_rate": 0.00018785794183922883,
+ "loss": 1.4001,
+ "step": 475
+ },
+ {
+ "epoch": 0.4761339126629365,
+ "grad_norm": 0.4565551280975342,
+ "learning_rate": 0.00018780782974920572,
+ "loss": 1.1752,
+ "step": 476
+ },
+ {
+ "epoch": 0.47713419399206025,
+ "grad_norm": 0.5651509761810303,
+ "learning_rate": 0.00018775762117425777,
+ "loss": 1.4291,
+ "step": 477
+ },
+ {
+ "epoch": 0.4781344753211841,
+ "grad_norm": 0.5827792286872864,
+ "learning_rate": 0.0001877073161695551,
+ "loss": 1.3438,
+ "step": 478
+ },
+ {
+ "epoch": 0.4791347566503079,
+ "grad_norm": 0.5719752907752991,
+ "learning_rate": 0.00018765691479037376,
+ "loss": 1.4683,
+ "step": 479
+ },
+ {
+ "epoch": 0.4801350379794317,
+ "grad_norm": 0.5153111815452576,
+ "learning_rate": 0.00018760641709209583,
+ "loss": 1.4392,
+ "step": 480
+ },
+ {
+ "epoch": 0.4811353193085555,
+ "grad_norm": 0.5455904603004456,
+ "learning_rate": 0.0001875558231302091,
+ "loss": 1.1603,
+ "step": 481
+ },
+ {
+ "epoch": 0.48213560063767935,
+ "grad_norm": 0.5857074856758118,
+ "learning_rate": 0.00018750513296030718,
+ "loss": 1.3099,
+ "step": 482
+ },
+ {
+ "epoch": 0.4831358819668032,
+ "grad_norm": 0.6051676273345947,
+ "learning_rate": 0.00018745434663808942,
+ "loss": 1.3587,
+ "step": 483
+ },
+ {
+ "epoch": 0.48413616329592696,
+ "grad_norm": 0.588749885559082,
+ "learning_rate": 0.0001874034642193608,
+ "loss": 1.5277,
+ "step": 484
+ },
+ {
+ "epoch": 0.4851364446250508,
+ "grad_norm": 0.5295410752296448,
+ "learning_rate": 0.0001873524857600319,
+ "loss": 1.2084,
+ "step": 485
+ },
+ {
+ "epoch": 0.4861367259541746,
+ "grad_norm": 0.5313368439674377,
+ "learning_rate": 0.00018730141131611882,
+ "loss": 1.4002,
+ "step": 486
+ },
+ {
+ "epoch": 0.48713700728329845,
+ "grad_norm": 0.5166353583335876,
+ "learning_rate": 0.00018725024094374315,
+ "loss": 1.208,
+ "step": 487
+ },
+ {
+ "epoch": 0.4881372886124222,
+ "grad_norm": 0.5478363037109375,
+ "learning_rate": 0.00018719897469913184,
+ "loss": 1.3236,
+ "step": 488
+ },
+ {
+ "epoch": 0.48913756994154606,
+ "grad_norm": 0.5531913042068481,
+ "learning_rate": 0.00018714761263861728,
+ "loss": 1.4938,
+ "step": 489
+ },
+ {
+ "epoch": 0.4901378512706699,
+ "grad_norm": 0.5334530472755432,
+ "learning_rate": 0.000187096154818637,
+ "loss": 1.4172,
+ "step": 490
+ },
+ {
+ "epoch": 0.4911381325997937,
+ "grad_norm": 0.5667001605033875,
+ "learning_rate": 0.00018704460129573391,
+ "loss": 1.3517,
+ "step": 491
+ },
+ {
+ "epoch": 0.4921384139289175,
+ "grad_norm": 0.5568780303001404,
+ "learning_rate": 0.00018699295212655596,
+ "loss": 1.4287,
+ "step": 492
+ },
+ {
+ "epoch": 0.4931386952580413,
+ "grad_norm": 0.6663610935211182,
+ "learning_rate": 0.00018694120736785632,
+ "loss": 1.5416,
+ "step": 493
+ },
+ {
+ "epoch": 0.49413897658716516,
+ "grad_norm": 0.5753045082092285,
+ "learning_rate": 0.00018688936707649304,
+ "loss": 1.5552,
+ "step": 494
+ },
+ {
+ "epoch": 0.49513925791628893,
+ "grad_norm": 0.5707410573959351,
+ "learning_rate": 0.00018683743130942928,
+ "loss": 1.5332,
+ "step": 495
+ },
+ {
+ "epoch": 0.49613953924541276,
+ "grad_norm": 0.5847951173782349,
+ "learning_rate": 0.00018678540012373302,
+ "loss": 1.3488,
+ "step": 496
+ },
+ {
+ "epoch": 0.4971398205745366,
+ "grad_norm": 0.60503751039505,
+ "learning_rate": 0.00018673327357657715,
+ "loss": 1.3924,
+ "step": 497
+ },
+ {
+ "epoch": 0.4981401019036604,
+ "grad_norm": 0.635142982006073,
+ "learning_rate": 0.0001866810517252393,
+ "loss": 1.4392,
+ "step": 498
+ },
+ {
+ "epoch": 0.4991403832327842,
+ "grad_norm": 0.5536782741546631,
+ "learning_rate": 0.00018662873462710184,
+ "loss": 1.286,
+ "step": 499
+ },
+ {
+ "epoch": 0.5001406645619081,
+ "grad_norm": 0.5676659345626831,
+ "learning_rate": 0.0001865763223396518,
+ "loss": 1.3006,
+ "step": 500
+ },
+ {
+ "epoch": 0.5011409458910319,
+ "grad_norm": 0.5546663403511047,
+ "learning_rate": 0.00018652381492048083,
+ "loss": 1.418,
+ "step": 501
+ },
+ {
+ "epoch": 0.5021412272201556,
+ "grad_norm": 0.5137162804603577,
+ "learning_rate": 0.00018647121242728506,
+ "loss": 1.3173,
+ "step": 502
+ },
+ {
+ "epoch": 0.5031415085492795,
+ "grad_norm": 0.5474348068237305,
+ "learning_rate": 0.00018641851491786512,
+ "loss": 1.6652,
+ "step": 503
+ },
+ {
+ "epoch": 0.5041417898784033,
+ "grad_norm": 0.5563383102416992,
+ "learning_rate": 0.00018636572245012606,
+ "loss": 1.4519,
+ "step": 504
+ },
+ {
+ "epoch": 0.5051420712075271,
+ "grad_norm": 0.5621083974838257,
+ "learning_rate": 0.00018631283508207725,
+ "loss": 1.5418,
+ "step": 505
+ },
+ {
+ "epoch": 0.506142352536651,
+ "grad_norm": 0.49915972352027893,
+ "learning_rate": 0.00018625985287183233,
+ "loss": 1.2969,
+ "step": 506
+ },
+ {
+ "epoch": 0.5071426338657747,
+ "grad_norm": 0.601996660232544,
+ "learning_rate": 0.00018620677587760916,
+ "loss": 1.4483,
+ "step": 507
+ },
+ {
+ "epoch": 0.5081429151948985,
+ "grad_norm": 0.5594652891159058,
+ "learning_rate": 0.00018615360415772978,
+ "loss": 1.4094,
+ "step": 508
+ },
+ {
+ "epoch": 0.5091431965240224,
+ "grad_norm": 0.557381808757782,
+ "learning_rate": 0.00018610033777062025,
+ "loss": 1.216,
+ "step": 509
+ },
+ {
+ "epoch": 0.5101434778531462,
+ "grad_norm": 0.5841740369796753,
+ "learning_rate": 0.0001860469767748108,
+ "loss": 1.4924,
+ "step": 510
+ },
+ {
+ "epoch": 0.5111437591822701,
+ "grad_norm": 0.4968324899673462,
+ "learning_rate": 0.00018599352122893539,
+ "loss": 1.2474,
+ "step": 511
+ },
+ {
+ "epoch": 0.5121440405113938,
+ "grad_norm": 0.5390318632125854,
+ "learning_rate": 0.00018593997119173205,
+ "loss": 1.4484,
+ "step": 512
+ },
+ {
+ "epoch": 0.5131443218405176,
+ "grad_norm": 0.6626128554344177,
+ "learning_rate": 0.00018588632672204264,
+ "loss": 1.5664,
+ "step": 513
+ },
+ {
+ "epoch": 0.5141446031696415,
+ "grad_norm": 0.6183133721351624,
+ "learning_rate": 0.0001858325878788126,
+ "loss": 1.5603,
+ "step": 514
+ },
+ {
+ "epoch": 0.5151448844987653,
+ "grad_norm": 0.5574773550033569,
+ "learning_rate": 0.00018577875472109134,
+ "loss": 1.3668,
+ "step": 515
+ },
+ {
+ "epoch": 0.516145165827889,
+ "grad_norm": 0.5127518773078918,
+ "learning_rate": 0.0001857248273080317,
+ "loss": 1.264,
+ "step": 516
+ },
+ {
+ "epoch": 0.5171454471570129,
+ "grad_norm": 0.6540619134902954,
+ "learning_rate": 0.00018567080569889015,
+ "loss": 1.3091,
+ "step": 517
+ },
+ {
+ "epoch": 0.5181457284861367,
+ "grad_norm": 0.5286336541175842,
+ "learning_rate": 0.00018561668995302667,
+ "loss": 1.3581,
+ "step": 518
+ },
+ {
+ "epoch": 0.5191460098152605,
+ "grad_norm": 0.6609972715377808,
+ "learning_rate": 0.00018556248012990468,
+ "loss": 1.3123,
+ "step": 519
+ },
+ {
+ "epoch": 0.5201462911443844,
+ "grad_norm": 0.48230236768722534,
+ "learning_rate": 0.000185508176289091,
+ "loss": 1.2372,
+ "step": 520
+ },
+ {
+ "epoch": 0.5211465724735082,
+ "grad_norm": 0.5173765420913696,
+ "learning_rate": 0.00018545377849025566,
+ "loss": 1.327,
+ "step": 521
+ },
+ {
+ "epoch": 0.522146853802632,
+ "grad_norm": 0.5822583436965942,
+ "learning_rate": 0.0001853992867931721,
+ "loss": 1.3851,
+ "step": 522
+ },
+ {
+ "epoch": 0.5231471351317558,
+ "grad_norm": 0.6025621891021729,
+ "learning_rate": 0.00018534470125771674,
+ "loss": 1.5627,
+ "step": 523
+ },
+ {
+ "epoch": 0.5241474164608796,
+ "grad_norm": 0.5516778230667114,
+ "learning_rate": 0.0001852900219438693,
+ "loss": 1.4036,
+ "step": 524
+ },
+ {
+ "epoch": 0.5251476977900035,
+ "grad_norm": 0.5738380551338196,
+ "learning_rate": 0.0001852352489117124,
+ "loss": 1.5042,
+ "step": 525
+ },
+ {
+ "epoch": 0.5261479791191273,
+ "grad_norm": 0.6360776424407959,
+ "learning_rate": 0.00018518038222143174,
+ "loss": 1.4101,
+ "step": 526
+ },
+ {
+ "epoch": 0.527148260448251,
+ "grad_norm": 0.5776675939559937,
+ "learning_rate": 0.00018512542193331583,
+ "loss": 1.6015,
+ "step": 527
+ },
+ {
+ "epoch": 0.5281485417773749,
+ "grad_norm": 0.5662726759910583,
+ "learning_rate": 0.00018507036810775615,
+ "loss": 1.3186,
+ "step": 528
+ },
+ {
+ "epoch": 0.5291488231064987,
+ "grad_norm": 0.6518335938453674,
+ "learning_rate": 0.00018501522080524688,
+ "loss": 1.4882,
+ "step": 529
+ },
+ {
+ "epoch": 0.5301491044356225,
+ "grad_norm": 0.5475590825080872,
+ "learning_rate": 0.0001849599800863849,
+ "loss": 1.487,
+ "step": 530
+ },
+ {
+ "epoch": 0.5311493857647464,
+ "grad_norm": 0.6275209188461304,
+ "learning_rate": 0.0001849046460118698,
+ "loss": 1.3563,
+ "step": 531
+ },
+ {
+ "epoch": 0.5321496670938701,
+ "grad_norm": 0.5629132390022278,
+ "learning_rate": 0.0001848492186425037,
+ "loss": 1.516,
+ "step": 532
+ },
+ {
+ "epoch": 0.533149948422994,
+ "grad_norm": 0.5251057744026184,
+ "learning_rate": 0.0001847936980391913,
+ "loss": 1.5254,
+ "step": 533
+ },
+ {
+ "epoch": 0.5341502297521178,
+ "grad_norm": 0.5635396838188171,
+ "learning_rate": 0.00018473808426293964,
+ "loss": 1.3408,
+ "step": 534
+ },
+ {
+ "epoch": 0.5351505110812416,
+ "grad_norm": 0.527082622051239,
+ "learning_rate": 0.00018468237737485823,
+ "loss": 1.2664,
+ "step": 535
+ },
+ {
+ "epoch": 0.5361507924103655,
+ "grad_norm": 0.6555044054985046,
+ "learning_rate": 0.00018462657743615888,
+ "loss": 1.464,
+ "step": 536
+ },
+ {
+ "epoch": 0.5371510737394892,
+ "grad_norm": 0.5468676686286926,
+ "learning_rate": 0.00018457068450815562,
+ "loss": 1.3733,
+ "step": 537
+ },
+ {
+ "epoch": 0.538151355068613,
+ "grad_norm": 0.5662835836410522,
+ "learning_rate": 0.00018451469865226464,
+ "loss": 1.509,
+ "step": 538
+ },
+ {
+ "epoch": 0.5391516363977369,
+ "grad_norm": 0.5553548336029053,
+ "learning_rate": 0.00018445861993000436,
+ "loss": 1.2476,
+ "step": 539
+ },
+ {
+ "epoch": 0.5401519177268607,
+ "grad_norm": 0.6240925192832947,
+ "learning_rate": 0.00018440244840299506,
+ "loss": 1.5835,
+ "step": 540
+ },
+ {
+ "epoch": 0.5411521990559846,
+ "grad_norm": 0.6107541918754578,
+ "learning_rate": 0.0001843461841329591,
+ "loss": 1.7176,
+ "step": 541
+ },
+ {
+ "epoch": 0.5421524803851083,
+ "grad_norm": 0.6990326642990112,
+ "learning_rate": 0.0001842898271817208,
+ "loss": 1.4235,
+ "step": 542
+ },
+ {
+ "epoch": 0.5431527617142321,
+ "grad_norm": 0.583871603012085,
+ "learning_rate": 0.00018423337761120618,
+ "loss": 1.5283,
+ "step": 543
+ },
+ {
+ "epoch": 0.544153043043356,
+ "grad_norm": 0.5585455894470215,
+ "learning_rate": 0.00018417683548344318,
+ "loss": 1.4875,
+ "step": 544
+ },
+ {
+ "epoch": 0.5451533243724798,
+ "grad_norm": 0.5199955701828003,
+ "learning_rate": 0.00018412020086056133,
+ "loss": 1.3989,
+ "step": 545
+ },
+ {
+ "epoch": 0.5461536057016035,
+ "grad_norm": 0.5517343878746033,
+ "learning_rate": 0.0001840634738047918,
+ "loss": 1.4073,
+ "step": 546
+ },
+ {
+ "epoch": 0.5471538870307274,
+ "grad_norm": 0.7140716314315796,
+ "learning_rate": 0.0001840066543784675,
+ "loss": 1.4477,
+ "step": 547
+ },
+ {
+ "epoch": 0.5481541683598512,
+ "grad_norm": 0.548422634601593,
+ "learning_rate": 0.00018394974264402257,
+ "loss": 1.4198,
+ "step": 548
+ },
+ {
+ "epoch": 0.549154449688975,
+ "grad_norm": 0.5907624363899231,
+ "learning_rate": 0.00018389273866399275,
+ "loss": 1.4033,
+ "step": 549
+ },
+ {
+ "epoch": 0.5501547310180989,
+ "grad_norm": 0.5327603220939636,
+ "learning_rate": 0.00018383564250101512,
+ "loss": 1.2674,
+ "step": 550
+ },
+ {
+ "epoch": 0.5511550123472226,
+ "grad_norm": 0.4678132236003876,
+ "learning_rate": 0.000183778454217828,
+ "loss": 1.3644,
+ "step": 551
+ },
+ {
+ "epoch": 0.5521552936763465,
+ "grad_norm": 0.674040675163269,
+ "learning_rate": 0.0001837211738772711,
+ "loss": 1.6942,
+ "step": 552
+ },
+ {
+ "epoch": 0.5531555750054703,
+ "grad_norm": 0.5374539494514465,
+ "learning_rate": 0.000183663801542285,
+ "loss": 1.1887,
+ "step": 553
+ },
+ {
+ "epoch": 0.5541558563345941,
+ "grad_norm": 0.5528072118759155,
+ "learning_rate": 0.00018360633727591155,
+ "loss": 1.2,
+ "step": 554
+ },
+ {
+ "epoch": 0.555156137663718,
+ "grad_norm": 0.6597411632537842,
+ "learning_rate": 0.00018354878114129367,
+ "loss": 1.402,
+ "step": 555
+ },
+ {
+ "epoch": 0.5561564189928417,
+ "grad_norm": 0.5931501388549805,
+ "learning_rate": 0.00018349113320167504,
+ "loss": 1.5583,
+ "step": 556
+ },
+ {
+ "epoch": 0.5571567003219655,
+ "grad_norm": 0.6331121921539307,
+ "learning_rate": 0.00018343339352040042,
+ "loss": 1.7882,
+ "step": 557
+ },
+ {
+ "epoch": 0.5581569816510894,
+ "grad_norm": 0.5221824645996094,
+ "learning_rate": 0.00018337556216091517,
+ "loss": 1.2457,
+ "step": 558
+ },
+ {
+ "epoch": 0.5591572629802132,
+ "grad_norm": 0.6008853912353516,
+ "learning_rate": 0.00018331763918676556,
+ "loss": 1.5916,
+ "step": 559
+ },
+ {
+ "epoch": 0.560157544309337,
+ "grad_norm": 0.5409006476402283,
+ "learning_rate": 0.00018325962466159848,
+ "loss": 1.3457,
+ "step": 560
+ },
+ {
+ "epoch": 0.5611578256384608,
+ "grad_norm": 0.5095859169960022,
+ "learning_rate": 0.00018320151864916135,
+ "loss": 1.3622,
+ "step": 561
+ },
+ {
+ "epoch": 0.5621581069675846,
+ "grad_norm": 0.5716331005096436,
+ "learning_rate": 0.00018314332121330225,
+ "loss": 1.6168,
+ "step": 562
+ },
+ {
+ "epoch": 0.5631583882967085,
+ "grad_norm": 0.600307047367096,
+ "learning_rate": 0.0001830850324179695,
+ "loss": 1.4117,
+ "step": 563
+ },
+ {
+ "epoch": 0.5641586696258323,
+ "grad_norm": 0.7528484463691711,
+ "learning_rate": 0.00018302665232721208,
+ "loss": 1.3418,
+ "step": 564
+ },
+ {
+ "epoch": 0.565158950954956,
+ "grad_norm": 0.6119087338447571,
+ "learning_rate": 0.0001829681810051791,
+ "loss": 1.4908,
+ "step": 565
+ },
+ {
+ "epoch": 0.5661592322840799,
+ "grad_norm": 0.6440190672874451,
+ "learning_rate": 0.00018290961851611995,
+ "loss": 1.3511,
+ "step": 566
+ },
+ {
+ "epoch": 0.5671595136132037,
+ "grad_norm": 0.647294282913208,
+ "learning_rate": 0.00018285096492438424,
+ "loss": 1.5165,
+ "step": 567
+ },
+ {
+ "epoch": 0.5681597949423275,
+ "grad_norm": 0.5499668717384338,
+ "learning_rate": 0.00018279222029442163,
+ "loss": 1.2876,
+ "step": 568
+ },
+ {
+ "epoch": 0.5691600762714514,
+ "grad_norm": 0.5629482865333557,
+ "learning_rate": 0.00018273338469078186,
+ "loss": 1.2256,
+ "step": 569
+ },
+ {
+ "epoch": 0.5701603576005752,
+ "grad_norm": 0.48661288619041443,
+ "learning_rate": 0.00018267445817811466,
+ "loss": 1.44,
+ "step": 570
+ },
+ {
+ "epoch": 0.5711606389296989,
+ "grad_norm": 0.5713567733764648,
+ "learning_rate": 0.00018261544082116954,
+ "loss": 1.741,
+ "step": 571
+ },
+ {
+ "epoch": 0.5721609202588228,
+ "grad_norm": 0.6130850315093994,
+ "learning_rate": 0.00018255633268479595,
+ "loss": 1.526,
+ "step": 572
+ },
+ {
+ "epoch": 0.5731612015879466,
+ "grad_norm": 0.5415536761283875,
+ "learning_rate": 0.00018249713383394303,
+ "loss": 1.2405,
+ "step": 573
+ },
+ {
+ "epoch": 0.5741614829170705,
+ "grad_norm": 0.600574791431427,
+ "learning_rate": 0.0001824378443336596,
+ "loss": 1.4534,
+ "step": 574
+ },
+ {
+ "epoch": 0.5751617642461943,
+ "grad_norm": 0.5479387044906616,
+ "learning_rate": 0.00018237846424909413,
+ "loss": 1.4277,
+ "step": 575
+ },
+ {
+ "epoch": 0.576162045575318,
+ "grad_norm": 0.5536132454872131,
+ "learning_rate": 0.00018231899364549455,
+ "loss": 1.3918,
+ "step": 576
+ },
+ {
+ "epoch": 0.5771623269044419,
+ "grad_norm": 0.6228598356246948,
+ "learning_rate": 0.00018225943258820833,
+ "loss": 1.413,
+ "step": 577
+ },
+ {
+ "epoch": 0.5781626082335657,
+ "grad_norm": 0.5498123168945312,
+ "learning_rate": 0.00018219978114268227,
+ "loss": 1.3558,
+ "step": 578
+ },
+ {
+ "epoch": 0.5791628895626895,
+ "grad_norm": 0.5427498817443848,
+ "learning_rate": 0.00018214003937446253,
+ "loss": 1.509,
+ "step": 579
+ },
+ {
+ "epoch": 0.5801631708918134,
+ "grad_norm": 0.522285521030426,
+ "learning_rate": 0.00018208020734919455,
+ "loss": 1.3847,
+ "step": 580
+ },
+ {
+ "epoch": 0.5811634522209371,
+ "grad_norm": 0.5963860750198364,
+ "learning_rate": 0.00018202028513262288,
+ "loss": 1.4605,
+ "step": 581
+ },
+ {
+ "epoch": 0.5821637335500609,
+ "grad_norm": 0.4854499101638794,
+ "learning_rate": 0.00018196027279059117,
+ "loss": 1.4968,
+ "step": 582
+ },
+ {
+ "epoch": 0.5831640148791848,
+ "grad_norm": 0.503466010093689,
+ "learning_rate": 0.00018190017038904215,
+ "loss": 1.2568,
+ "step": 583
+ },
+ {
+ "epoch": 0.5841642962083086,
+ "grad_norm": 0.6027483940124512,
+ "learning_rate": 0.0001818399779940175,
+ "loss": 1.5744,
+ "step": 584
+ },
+ {
+ "epoch": 0.5851645775374325,
+ "grad_norm": 0.5450258851051331,
+ "learning_rate": 0.0001817796956716578,
+ "loss": 1.2672,
+ "step": 585
+ },
+ {
+ "epoch": 0.5861648588665562,
+ "grad_norm": 0.5376724600791931,
+ "learning_rate": 0.00018171932348820234,
+ "loss": 1.5099,
+ "step": 586
+ },
+ {
+ "epoch": 0.58716514019568,
+ "grad_norm": 0.513921856880188,
+ "learning_rate": 0.0001816588615099893,
+ "loss": 1.3213,
+ "step": 587
+ },
+ {
+ "epoch": 0.5881654215248039,
+ "grad_norm": 0.7540159225463867,
+ "learning_rate": 0.00018159830980345548,
+ "loss": 1.2231,
+ "step": 588
+ },
+ {
+ "epoch": 0.5891657028539277,
+ "grad_norm": 0.5917702317237854,
+ "learning_rate": 0.0001815376684351362,
+ "loss": 1.6094,
+ "step": 589
+ },
+ {
+ "epoch": 0.5901659841830514,
+ "grad_norm": 0.5507463216781616,
+ "learning_rate": 0.00018147693747166534,
+ "loss": 1.3904,
+ "step": 590
+ },
+ {
+ "epoch": 0.5911662655121753,
+ "grad_norm": 0.545695960521698,
+ "learning_rate": 0.00018141611697977529,
+ "loss": 1.5172,
+ "step": 591
+ },
+ {
+ "epoch": 0.5921665468412991,
+ "grad_norm": 0.5876530408859253,
+ "learning_rate": 0.00018135520702629675,
+ "loss": 1.3676,
+ "step": 592
+ },
+ {
+ "epoch": 0.5931668281704229,
+ "grad_norm": 0.5510894060134888,
+ "learning_rate": 0.0001812942076781588,
+ "loss": 1.4379,
+ "step": 593
+ },
+ {
+ "epoch": 0.5941671094995468,
+ "grad_norm": 0.5105913877487183,
+ "learning_rate": 0.0001812331190023886,
+ "loss": 1.3687,
+ "step": 594
+ },
+ {
+ "epoch": 0.5951673908286705,
+ "grad_norm": 0.47876060009002686,
+ "learning_rate": 0.0001811719410661116,
+ "loss": 1.3178,
+ "step": 595
+ },
+ {
+ "epoch": 0.5961676721577944,
+ "grad_norm": 0.6079074144363403,
+ "learning_rate": 0.00018111067393655132,
+ "loss": 1.4713,
+ "step": 596
+ },
+ {
+ "epoch": 0.5971679534869182,
+ "grad_norm": 0.5363487601280212,
+ "learning_rate": 0.0001810493176810292,
+ "loss": 1.1868,
+ "step": 597
+ },
+ {
+ "epoch": 0.598168234816042,
+ "grad_norm": 0.5252292156219482,
+ "learning_rate": 0.00018098787236696474,
+ "loss": 1.303,
+ "step": 598
+ },
+ {
+ "epoch": 0.5991685161451659,
+ "grad_norm": 0.5377137064933777,
+ "learning_rate": 0.00018092633806187513,
+ "loss": 1.3653,
+ "step": 599
+ },
+ {
+ "epoch": 0.6001687974742896,
+ "grad_norm": 0.5274302363395691,
+ "learning_rate": 0.0001808647148333755,
+ "loss": 1.3693,
+ "step": 600
+ },
+ {
+ "epoch": 0.6011690788034134,
+ "grad_norm": 0.5664658546447754,
+ "learning_rate": 0.00018080300274917862,
+ "loss": 1.3807,
+ "step": 601
+ },
+ {
+ "epoch": 0.6021693601325373,
+ "grad_norm": 0.6609538197517395,
+ "learning_rate": 0.00018074120187709495,
+ "loss": 1.5015,
+ "step": 602
+ },
+ {
+ "epoch": 0.6031696414616611,
+ "grad_norm": 0.4943195879459381,
+ "learning_rate": 0.00018067931228503246,
+ "loss": 1.4436,
+ "step": 603
+ },
+ {
+ "epoch": 0.604169922790785,
+ "grad_norm": 0.549712598323822,
+ "learning_rate": 0.00018061733404099655,
+ "loss": 1.455,
+ "step": 604
+ },
+ {
+ "epoch": 0.6051702041199087,
+ "grad_norm": 0.5765941143035889,
+ "learning_rate": 0.00018055526721309016,
+ "loss": 1.3317,
+ "step": 605
+ },
+ {
+ "epoch": 0.6061704854490325,
+ "grad_norm": 0.5223068594932556,
+ "learning_rate": 0.0001804931118695135,
+ "loss": 1.3456,
+ "step": 606
+ },
+ {
+ "epoch": 0.6071707667781564,
+ "grad_norm": 0.5385129451751709,
+ "learning_rate": 0.00018043086807856403,
+ "loss": 1.3388,
+ "step": 607
+ },
+ {
+ "epoch": 0.6081710481072802,
+ "grad_norm": 0.5244528651237488,
+ "learning_rate": 0.00018036853590863648,
+ "loss": 1.398,
+ "step": 608
+ },
+ {
+ "epoch": 0.609171329436404,
+ "grad_norm": 0.5274112224578857,
+ "learning_rate": 0.00018030611542822257,
+ "loss": 1.3105,
+ "step": 609
+ },
+ {
+ "epoch": 0.6101716107655278,
+ "grad_norm": 0.5351893305778503,
+ "learning_rate": 0.00018024360670591114,
+ "loss": 1.3128,
+ "step": 610
+ },
+ {
+ "epoch": 0.6111718920946516,
+ "grad_norm": 0.5729460120201111,
+ "learning_rate": 0.00018018100981038798,
+ "loss": 1.3606,
+ "step": 611
+ },
+ {
+ "epoch": 0.6121721734237754,
+ "grad_norm": 0.5494408011436462,
+ "learning_rate": 0.00018011832481043576,
+ "loss": 1.4517,
+ "step": 612
+ },
+ {
+ "epoch": 0.6131724547528993,
+ "grad_norm": 0.5205882787704468,
+ "learning_rate": 0.00018005555177493394,
+ "loss": 1.4943,
+ "step": 613
+ },
+ {
+ "epoch": 0.614172736082023,
+ "grad_norm": 0.5488479137420654,
+ "learning_rate": 0.00017999269077285875,
+ "loss": 1.3939,
+ "step": 614
+ },
+ {
+ "epoch": 0.6151730174111469,
+ "grad_norm": 0.5779786109924316,
+ "learning_rate": 0.00017992974187328305,
+ "loss": 1.5744,
+ "step": 615
+ },
+ {
+ "epoch": 0.6161732987402707,
+ "grad_norm": 0.5576769113540649,
+ "learning_rate": 0.00017986670514537627,
+ "loss": 1.2284,
+ "step": 616
+ },
+ {
+ "epoch": 0.6171735800693945,
+ "grad_norm": 0.4912784993648529,
+ "learning_rate": 0.00017980358065840444,
+ "loss": 1.292,
+ "step": 617
+ },
+ {
+ "epoch": 0.6181738613985184,
+ "grad_norm": 0.657666027545929,
+ "learning_rate": 0.0001797403684817299,
+ "loss": 1.4918,
+ "step": 618
+ },
+ {
+ "epoch": 0.6191741427276422,
+ "grad_norm": 0.5642833113670349,
+ "learning_rate": 0.00017967706868481144,
+ "loss": 1.4718,
+ "step": 619
+ },
+ {
+ "epoch": 0.6201744240567659,
+ "grad_norm": 0.7243106961250305,
+ "learning_rate": 0.00017961368133720407,
+ "loss": 1.4342,
+ "step": 620
+ },
+ {
+ "epoch": 0.6211747053858898,
+ "grad_norm": 0.4982456564903259,
+ "learning_rate": 0.000179550206508559,
+ "loss": 1.4478,
+ "step": 621
+ },
+ {
+ "epoch": 0.6221749867150136,
+ "grad_norm": 0.5249592065811157,
+ "learning_rate": 0.00017948664426862364,
+ "loss": 1.485,
+ "step": 622
+ },
+ {
+ "epoch": 0.6231752680441374,
+ "grad_norm": 0.6167681217193604,
+ "learning_rate": 0.00017942299468724134,
+ "loss": 1.4813,
+ "step": 623
+ },
+ {
+ "epoch": 0.6241755493732613,
+ "grad_norm": 0.5300460457801819,
+ "learning_rate": 0.0001793592578343515,
+ "loss": 1.1364,
+ "step": 624
+ },
+ {
+ "epoch": 0.625175830702385,
+ "grad_norm": 0.5908417105674744,
+ "learning_rate": 0.0001792954337799894,
+ "loss": 1.4402,
+ "step": 625
+ },
+ {
+ "epoch": 0.6261761120315089,
+ "grad_norm": 0.5684035420417786,
+ "learning_rate": 0.00017923152259428612,
+ "loss": 1.4847,
+ "step": 626
+ },
+ {
+ "epoch": 0.6271763933606327,
+ "grad_norm": 0.5421493053436279,
+ "learning_rate": 0.00017916752434746856,
+ "loss": 1.3348,
+ "step": 627
+ },
+ {
+ "epoch": 0.6281766746897565,
+ "grad_norm": 0.5295160412788391,
+ "learning_rate": 0.0001791034391098591,
+ "loss": 1.4703,
+ "step": 628
+ },
+ {
+ "epoch": 0.6291769560188804,
+ "grad_norm": 0.5196051001548767,
+ "learning_rate": 0.00017903926695187595,
+ "loss": 1.3478,
+ "step": 629
+ },
+ {
+ "epoch": 0.6301772373480041,
+ "grad_norm": 0.4994469881057739,
+ "learning_rate": 0.0001789750079440326,
+ "loss": 1.2368,
+ "step": 630
+ },
+ {
+ "epoch": 0.6311775186771279,
+ "grad_norm": 0.5117055177688599,
+ "learning_rate": 0.00017891066215693817,
+ "loss": 1.3429,
+ "step": 631
+ },
+ {
+ "epoch": 0.6321778000062518,
+ "grad_norm": 0.49438026547431946,
+ "learning_rate": 0.00017884622966129695,
+ "loss": 1.301,
+ "step": 632
+ },
+ {
+ "epoch": 0.6331780813353756,
+ "grad_norm": 0.6113334894180298,
+ "learning_rate": 0.00017878171052790868,
+ "loss": 1.4636,
+ "step": 633
+ },
+ {
+ "epoch": 0.6341783626644993,
+ "grad_norm": 0.6063141822814941,
+ "learning_rate": 0.00017871710482766817,
+ "loss": 1.2262,
+ "step": 634
+ },
+ {
+ "epoch": 0.6351786439936232,
+ "grad_norm": 0.5604403614997864,
+ "learning_rate": 0.00017865241263156546,
+ "loss": 1.4112,
+ "step": 635
+ },
+ {
+ "epoch": 0.636178925322747,
+ "grad_norm": 0.523415207862854,
+ "learning_rate": 0.0001785876340106855,
+ "loss": 1.3281,
+ "step": 636
+ },
+ {
+ "epoch": 0.6371792066518709,
+ "grad_norm": 0.5602991580963135,
+ "learning_rate": 0.0001785227690362083,
+ "loss": 1.44,
+ "step": 637
+ },
+ {
+ "epoch": 0.6381794879809947,
+ "grad_norm": 0.46946853399276733,
+ "learning_rate": 0.00017845781777940878,
+ "loss": 1.2956,
+ "step": 638
+ },
+ {
+ "epoch": 0.6391797693101184,
+ "grad_norm": 0.5586503744125366,
+ "learning_rate": 0.00017839278031165658,
+ "loss": 1.5419,
+ "step": 639
+ },
+ {
+ "epoch": 0.6401800506392423,
+ "grad_norm": 0.5270752310752869,
+ "learning_rate": 0.00017832765670441612,
+ "loss": 1.305,
+ "step": 640
+ },
+ {
+ "epoch": 0.6411803319683661,
+ "grad_norm": 0.57756108045578,
+ "learning_rate": 0.0001782624470292465,
+ "loss": 1.2145,
+ "step": 641
+ },
+ {
+ "epoch": 0.6421806132974899,
+ "grad_norm": 0.5709058046340942,
+ "learning_rate": 0.0001781971513578013,
+ "loss": 1.4804,
+ "step": 642
+ },
+ {
+ "epoch": 0.6431808946266138,
+ "grad_norm": 0.505849301815033,
+ "learning_rate": 0.00017813176976182873,
+ "loss": 1.3964,
+ "step": 643
+ },
+ {
+ "epoch": 0.6441811759557375,
+ "grad_norm": 0.5171617269515991,
+ "learning_rate": 0.00017806630231317127,
+ "loss": 1.3283,
+ "step": 644
+ },
+ {
+ "epoch": 0.6451814572848613,
+ "grad_norm": 0.5567512512207031,
+ "learning_rate": 0.00017800074908376584,
+ "loss": 1.481,
+ "step": 645
+ },
+ {
+ "epoch": 0.6461817386139852,
+ "grad_norm": 0.5000666379928589,
+ "learning_rate": 0.00017793511014564358,
+ "loss": 1.2856,
+ "step": 646
+ },
+ {
+ "epoch": 0.647182019943109,
+ "grad_norm": 0.49550777673721313,
+ "learning_rate": 0.00017786938557092983,
+ "loss": 1.3447,
+ "step": 647
+ },
+ {
+ "epoch": 0.6481823012722329,
+ "grad_norm": 0.5904624462127686,
+ "learning_rate": 0.00017780357543184397,
+ "loss": 1.241,
+ "step": 648
+ },
+ {
+ "epoch": 0.6491825826013566,
+ "grad_norm": 0.4615901708602905,
+ "learning_rate": 0.00017773767980069945,
+ "loss": 1.3436,
+ "step": 649
+ },
+ {
+ "epoch": 0.6501828639304804,
+ "grad_norm": 0.48083069920539856,
+ "learning_rate": 0.0001776716987499037,
+ "loss": 1.3906,
+ "step": 650
+ },
+ {
+ "epoch": 0.6511831452596043,
+ "grad_norm": 0.4525931775569916,
+ "learning_rate": 0.0001776056323519579,
+ "loss": 1.3417,
+ "step": 651
+ },
+ {
+ "epoch": 0.6521834265887281,
+ "grad_norm": 0.6179555058479309,
+ "learning_rate": 0.00017753948067945712,
+ "loss": 1.3438,
+ "step": 652
+ },
+ {
+ "epoch": 0.6531837079178519,
+ "grad_norm": 0.5525293946266174,
+ "learning_rate": 0.00017747324380509006,
+ "loss": 1.4551,
+ "step": 653
+ },
+ {
+ "epoch": 0.6541839892469757,
+ "grad_norm": 0.533028781414032,
+ "learning_rate": 0.00017740692180163908,
+ "loss": 1.4396,
+ "step": 654
+ },
+ {
+ "epoch": 0.6551842705760995,
+ "grad_norm": 0.5196881890296936,
+ "learning_rate": 0.00017734051474198003,
+ "loss": 1.3032,
+ "step": 655
+ },
+ {
+ "epoch": 0.6561845519052233,
+ "grad_norm": 0.5190469622612,
+ "learning_rate": 0.0001772740226990823,
+ "loss": 1.4049,
+ "step": 656
+ },
+ {
+ "epoch": 0.6571848332343472,
+ "grad_norm": 0.49517175555229187,
+ "learning_rate": 0.00017720744574600863,
+ "loss": 1.3696,
+ "step": 657
+ },
+ {
+ "epoch": 0.658185114563471,
+ "grad_norm": 0.5165138244628906,
+ "learning_rate": 0.00017714078395591502,
+ "loss": 1.3667,
+ "step": 658
+ },
+ {
+ "epoch": 0.6591853958925948,
+ "grad_norm": 0.5624507665634155,
+ "learning_rate": 0.00017707403740205071,
+ "loss": 1.2109,
+ "step": 659
+ },
+ {
+ "epoch": 0.6601856772217186,
+ "grad_norm": 0.45942649245262146,
+ "learning_rate": 0.00017700720615775812,
+ "loss": 1.259,
+ "step": 660
+ },
+ {
+ "epoch": 0.6611859585508424,
+ "grad_norm": 0.5019019842147827,
+ "learning_rate": 0.0001769402902964727,
+ "loss": 1.3739,
+ "step": 661
+ },
+ {
+ "epoch": 0.6621862398799663,
+ "grad_norm": 0.4661652743816376,
+ "learning_rate": 0.00017687328989172288,
+ "loss": 1.2606,
+ "step": 662
+ },
+ {
+ "epoch": 0.66318652120909,
+ "grad_norm": 0.5310545563697815,
+ "learning_rate": 0.00017680620501712996,
+ "loss": 1.3406,
+ "step": 663
+ },
+ {
+ "epoch": 0.6641868025382138,
+ "grad_norm": 0.5190532207489014,
+ "learning_rate": 0.00017673903574640814,
+ "loss": 1.3052,
+ "step": 664
+ },
+ {
+ "epoch": 0.6651870838673377,
+ "grad_norm": 0.5265533328056335,
+ "learning_rate": 0.00017667178215336423,
+ "loss": 1.2326,
+ "step": 665
+ },
+ {
+ "epoch": 0.6661873651964615,
+ "grad_norm": 0.5971291065216064,
+ "learning_rate": 0.0001766044443118978,
+ "loss": 1.4291,
+ "step": 666
+ },
+ {
+ "epoch": 0.6671876465255854,
+ "grad_norm": 0.5295760631561279,
+ "learning_rate": 0.000176537022296001,
+ "loss": 1.2781,
+ "step": 667
+ },
+ {
+ "epoch": 0.6681879278547092,
+ "grad_norm": 0.5124595761299133,
+ "learning_rate": 0.00017646951617975837,
+ "loss": 1.318,
+ "step": 668
+ },
+ {
+ "epoch": 0.6691882091838329,
+ "grad_norm": 0.5968078970909119,
+ "learning_rate": 0.00017640192603734692,
+ "loss": 1.1483,
+ "step": 669
+ },
+ {
+ "epoch": 0.6701884905129568,
+ "grad_norm": 0.6211404204368591,
+ "learning_rate": 0.00017633425194303606,
+ "loss": 1.1164,
+ "step": 670
+ },
+ {
+ "epoch": 0.6711887718420806,
+ "grad_norm": 0.5539883375167847,
+ "learning_rate": 0.00017626649397118734,
+ "loss": 1.453,
+ "step": 671
+ },
+ {
+ "epoch": 0.6721890531712044,
+ "grad_norm": 0.5188294649124146,
+ "learning_rate": 0.00017619865219625452,
+ "loss": 1.5201,
+ "step": 672
+ },
+ {
+ "epoch": 0.6731893345003283,
+ "grad_norm": 0.531973659992218,
+ "learning_rate": 0.00017613072669278343,
+ "loss": 1.3176,
+ "step": 673
+ },
+ {
+ "epoch": 0.674189615829452,
+ "grad_norm": 0.5878707766532898,
+ "learning_rate": 0.00017606271753541192,
+ "loss": 1.5326,
+ "step": 674
+ },
+ {
+ "epoch": 0.6751898971585758,
+ "grad_norm": 0.595443844795227,
+ "learning_rate": 0.00017599462479886974,
+ "loss": 1.4033,
+ "step": 675
+ },
+ {
+ "epoch": 0.6761901784876997,
+ "grad_norm": 0.5093846321105957,
+ "learning_rate": 0.00017592644855797854,
+ "loss": 1.2995,
+ "step": 676
+ },
+ {
+ "epoch": 0.6771904598168235,
+ "grad_norm": 0.5521978735923767,
+ "learning_rate": 0.00017585818888765168,
+ "loss": 1.2912,
+ "step": 677
+ },
+ {
+ "epoch": 0.6781907411459474,
+ "grad_norm": 0.4612530469894409,
+ "learning_rate": 0.0001757898458628941,
+ "loss": 1.1902,
+ "step": 678
+ },
+ {
+ "epoch": 0.6791910224750711,
+ "grad_norm": 0.4973600506782532,
+ "learning_rate": 0.00017572141955880252,
+ "loss": 1.3547,
+ "step": 679
+ },
+ {
+ "epoch": 0.6801913038041949,
+ "grad_norm": 0.606407105922699,
+ "learning_rate": 0.00017565291005056504,
+ "loss": 1.371,
+ "step": 680
+ },
+ {
+ "epoch": 0.6811915851333188,
+ "grad_norm": 0.5027814507484436,
+ "learning_rate": 0.00017558431741346122,
+ "loss": 1.4551,
+ "step": 681
+ },
+ {
+ "epoch": 0.6821918664624426,
+ "grad_norm": 0.5732039213180542,
+ "learning_rate": 0.00017551564172286197,
+ "loss": 1.4181,
+ "step": 682
+ },
+ {
+ "epoch": 0.6831921477915663,
+ "grad_norm": 0.6327995657920837,
+ "learning_rate": 0.00017544688305422943,
+ "loss": 1.237,
+ "step": 683
+ },
+ {
+ "epoch": 0.6841924291206902,
+ "grad_norm": 0.5779625177383423,
+ "learning_rate": 0.00017537804148311695,
+ "loss": 1.5356,
+ "step": 684
+ },
+ {
+ "epoch": 0.685192710449814,
+ "grad_norm": 0.6031951308250427,
+ "learning_rate": 0.00017530911708516902,
+ "loss": 1.3776,
+ "step": 685
+ },
+ {
+ "epoch": 0.6861929917789378,
+ "grad_norm": 0.4811258018016815,
+ "learning_rate": 0.00017524010993612098,
+ "loss": 1.185,
+ "step": 686
+ },
+ {
+ "epoch": 0.6871932731080617,
+ "grad_norm": 0.5048002600669861,
+ "learning_rate": 0.00017517102011179933,
+ "loss": 1.3335,
+ "step": 687
+ },
+ {
+ "epoch": 0.6881935544371854,
+ "grad_norm": 0.5963343977928162,
+ "learning_rate": 0.0001751018476881212,
+ "loss": 1.4326,
+ "step": 688
+ },
+ {
+ "epoch": 0.6891938357663093,
+ "grad_norm": 0.4770168960094452,
+ "learning_rate": 0.00017503259274109464,
+ "loss": 1.4664,
+ "step": 689
+ },
+ {
+ "epoch": 0.6901941170954331,
+ "grad_norm": 0.5020537376403809,
+ "learning_rate": 0.00017496325534681825,
+ "loss": 1.349,
+ "step": 690
+ },
+ {
+ "epoch": 0.6911943984245569,
+ "grad_norm": 0.5567785501480103,
+ "learning_rate": 0.00017489383558148136,
+ "loss": 1.452,
+ "step": 691
+ },
+ {
+ "epoch": 0.6921946797536808,
+ "grad_norm": 0.5167350769042969,
+ "learning_rate": 0.00017482433352136365,
+ "loss": 1.1148,
+ "step": 692
+ },
+ {
+ "epoch": 0.6931949610828045,
+ "grad_norm": 0.6030716300010681,
+ "learning_rate": 0.00017475474924283536,
+ "loss": 1.3473,
+ "step": 693
+ },
+ {
+ "epoch": 0.6941952424119283,
+ "grad_norm": 0.5643062591552734,
+ "learning_rate": 0.00017468508282235704,
+ "loss": 1.3476,
+ "step": 694
+ },
+ {
+ "epoch": 0.6951955237410522,
+ "grad_norm": 0.5124102234840393,
+ "learning_rate": 0.00017461533433647946,
+ "loss": 1.339,
+ "step": 695
+ },
+ {
+ "epoch": 0.696195805070176,
+ "grad_norm": 0.5690215229988098,
+ "learning_rate": 0.00017454550386184362,
+ "loss": 1.3816,
+ "step": 696
+ },
+ {
+ "epoch": 0.6971960863992998,
+ "grad_norm": 0.5938367247581482,
+ "learning_rate": 0.00017447559147518055,
+ "loss": 1.4554,
+ "step": 697
+ },
+ {
+ "epoch": 0.6981963677284236,
+ "grad_norm": 0.5288996696472168,
+ "learning_rate": 0.00017440559725331135,
+ "loss": 1.2904,
+ "step": 698
+ },
+ {
+ "epoch": 0.6991966490575474,
+ "grad_norm": 0.5047140121459961,
+ "learning_rate": 0.000174335521273147,
+ "loss": 1.2362,
+ "step": 699
+ },
+ {
+ "epoch": 0.7001969303866713,
+ "grad_norm": 0.5563321709632874,
+ "learning_rate": 0.00017426536361168834,
+ "loss": 1.2863,
+ "step": 700
+ },
+ {
+ "epoch": 0.7011972117157951,
+ "grad_norm": 0.48857688903808594,
+ "learning_rate": 0.00017419512434602594,
+ "loss": 1.3387,
+ "step": 701
+ },
+ {
+ "epoch": 0.7021974930449189,
+ "grad_norm": 0.5205016732215881,
+ "learning_rate": 0.00017412480355334005,
+ "loss": 1.3874,
+ "step": 702
+ },
+ {
+ "epoch": 0.7031977743740427,
+ "grad_norm": 0.5850381851196289,
+ "learning_rate": 0.00017405440131090048,
+ "loss": 1.5369,
+ "step": 703
+ },
+ {
+ "epoch": 0.7041980557031665,
+ "grad_norm": 0.5708681344985962,
+ "learning_rate": 0.00017398391769606658,
+ "loss": 1.3622,
+ "step": 704
+ },
+ {
+ "epoch": 0.7051983370322903,
+ "grad_norm": 0.5743641257286072,
+ "learning_rate": 0.00017391335278628712,
+ "loss": 1.2946,
+ "step": 705
+ },
+ {
+ "epoch": 0.7061986183614142,
+ "grad_norm": 0.5376024842262268,
+ "learning_rate": 0.00017384270665910014,
+ "loss": 1.2952,
+ "step": 706
+ },
+ {
+ "epoch": 0.707198899690538,
+ "grad_norm": 0.6123641133308411,
+ "learning_rate": 0.000173771979392133,
+ "loss": 1.4239,
+ "step": 707
+ },
+ {
+ "epoch": 0.7081991810196617,
+ "grad_norm": 0.5639240741729736,
+ "learning_rate": 0.00017370117106310214,
+ "loss": 1.3627,
+ "step": 708
+ },
+ {
+ "epoch": 0.7091994623487856,
+ "grad_norm": 0.5551653504371643,
+ "learning_rate": 0.0001736302817498131,
+ "loss": 1.3435,
+ "step": 709
+ },
+ {
+ "epoch": 0.7101997436779094,
+ "grad_norm": 0.4746958911418915,
+ "learning_rate": 0.00017355931153016044,
+ "loss": 1.2402,
+ "step": 710
+ },
+ {
+ "epoch": 0.7112000250070333,
+ "grad_norm": 0.4722553491592407,
+ "learning_rate": 0.0001734882604821276,
+ "loss": 1.3962,
+ "step": 711
+ },
+ {
+ "epoch": 0.712200306336157,
+ "grad_norm": 0.5038101077079773,
+ "learning_rate": 0.0001734171286837868,
+ "loss": 1.3261,
+ "step": 712
+ },
+ {
+ "epoch": 0.7132005876652808,
+ "grad_norm": 0.5004639625549316,
+ "learning_rate": 0.00017334591621329906,
+ "loss": 1.4943,
+ "step": 713
+ },
+ {
+ "epoch": 0.7142008689944047,
+ "grad_norm": 0.5141516327857971,
+ "learning_rate": 0.00017327462314891402,
+ "loss": 1.2754,
+ "step": 714
+ },
+ {
+ "epoch": 0.7152011503235285,
+ "grad_norm": 0.5491873025894165,
+ "learning_rate": 0.00017320324956896977,
+ "loss": 1.3052,
+ "step": 715
+ },
+ {
+ "epoch": 0.7162014316526523,
+ "grad_norm": 0.49937358498573303,
+ "learning_rate": 0.00017313179555189306,
+ "loss": 1.2277,
+ "step": 716
+ },
+ {
+ "epoch": 0.7172017129817762,
+ "grad_norm": 0.6419594287872314,
+ "learning_rate": 0.00017306026117619889,
+ "loss": 1.4844,
+ "step": 717
+ },
+ {
+ "epoch": 0.7182019943108999,
+ "grad_norm": 0.521108090877533,
+ "learning_rate": 0.0001729886465204906,
+ "loss": 1.2917,
+ "step": 718
+ },
+ {
+ "epoch": 0.7192022756400237,
+ "grad_norm": 0.532421886920929,
+ "learning_rate": 0.0001729169516634598,
+ "loss": 1.4555,
+ "step": 719
+ },
+ {
+ "epoch": 0.7202025569691476,
+ "grad_norm": 0.5168073177337646,
+ "learning_rate": 0.0001728451766838861,
+ "loss": 1.2116,
+ "step": 720
+ },
+ {
+ "epoch": 0.7212028382982714,
+ "grad_norm": 0.5593972206115723,
+ "learning_rate": 0.00017277332166063726,
+ "loss": 1.4345,
+ "step": 721
+ },
+ {
+ "epoch": 0.7222031196273953,
+ "grad_norm": 0.5317432284355164,
+ "learning_rate": 0.00017270138667266894,
+ "loss": 1.2987,
+ "step": 722
+ },
+ {
+ "epoch": 0.723203400956519,
+ "grad_norm": 0.6262248158454895,
+ "learning_rate": 0.00017262937179902472,
+ "loss": 1.2591,
+ "step": 723
+ },
+ {
+ "epoch": 0.7242036822856428,
+ "grad_norm": 0.5377100110054016,
+ "learning_rate": 0.00017255727711883588,
+ "loss": 1.366,
+ "step": 724
+ },
+ {
+ "epoch": 0.7252039636147667,
+ "grad_norm": 0.5637168288230896,
+ "learning_rate": 0.00017248510271132144,
+ "loss": 1.4593,
+ "step": 725
+ },
+ {
+ "epoch": 0.7262042449438905,
+ "grad_norm": 0.5360320210456848,
+ "learning_rate": 0.00017241284865578802,
+ "loss": 1.4797,
+ "step": 726
+ },
+ {
+ "epoch": 0.7272045262730142,
+ "grad_norm": 0.48500168323516846,
+ "learning_rate": 0.00017234051503162978,
+ "loss": 1.3875,
+ "step": 727
+ },
+ {
+ "epoch": 0.7282048076021381,
+ "grad_norm": 0.5666176080703735,
+ "learning_rate": 0.0001722681019183283,
+ "loss": 1.4683,
+ "step": 728
+ },
+ {
+ "epoch": 0.7292050889312619,
+ "grad_norm": 0.5710940361022949,
+ "learning_rate": 0.00017219560939545246,
+ "loss": 1.5538,
+ "step": 729
+ },
+ {
+ "epoch": 0.7302053702603858,
+ "grad_norm": 0.5658044219017029,
+ "learning_rate": 0.00017212303754265843,
+ "loss": 1.248,
+ "step": 730
+ },
+ {
+ "epoch": 0.7312056515895096,
+ "grad_norm": 0.5355331301689148,
+ "learning_rate": 0.0001720503864396896,
+ "loss": 1.259,
+ "step": 731
+ },
+ {
+ "epoch": 0.7322059329186333,
+ "grad_norm": 0.5683363676071167,
+ "learning_rate": 0.00017197765616637636,
+ "loss": 1.4242,
+ "step": 732
+ },
+ {
+ "epoch": 0.7332062142477572,
+ "grad_norm": 0.488972932100296,
+ "learning_rate": 0.0001719048468026361,
+ "loss": 1.3442,
+ "step": 733
+ },
+ {
+ "epoch": 0.734206495576881,
+ "grad_norm": 0.45563748478889465,
+ "learning_rate": 0.00017183195842847322,
+ "loss": 1.3236,
+ "step": 734
+ },
+ {
+ "epoch": 0.7352067769060048,
+ "grad_norm": 0.5114185214042664,
+ "learning_rate": 0.0001717589911239788,
+ "loss": 1.3071,
+ "step": 735
+ },
+ {
+ "epoch": 0.7362070582351287,
+ "grad_norm": 0.558686375617981,
+ "learning_rate": 0.00017168594496933074,
+ "loss": 1.2889,
+ "step": 736
+ },
+ {
+ "epoch": 0.7372073395642524,
+ "grad_norm": 0.49099281430244446,
+ "learning_rate": 0.00017161282004479351,
+ "loss": 1.1701,
+ "step": 737
+ },
+ {
+ "epoch": 0.7382076208933762,
+ "grad_norm": 0.549524188041687,
+ "learning_rate": 0.0001715396164307182,
+ "loss": 1.2853,
+ "step": 738
+ },
+ {
+ "epoch": 0.7392079022225001,
+ "grad_norm": 0.5683863162994385,
+ "learning_rate": 0.0001714663342075424,
+ "loss": 1.4201,
+ "step": 739
+ },
+ {
+ "epoch": 0.7402081835516239,
+ "grad_norm": 0.5957104563713074,
+ "learning_rate": 0.00017139297345578994,
+ "loss": 1.3406,
+ "step": 740
+ },
+ {
+ "epoch": 0.7412084648807478,
+ "grad_norm": 0.4645147919654846,
+ "learning_rate": 0.00017131953425607104,
+ "loss": 1.2344,
+ "step": 741
+ },
+ {
+ "epoch": 0.7422087462098715,
+ "grad_norm": 0.4981783330440521,
+ "learning_rate": 0.00017124601668908212,
+ "loss": 1.422,
+ "step": 742
+ },
+ {
+ "epoch": 0.7432090275389953,
+ "grad_norm": 0.5426530838012695,
+ "learning_rate": 0.00017117242083560568,
+ "loss": 1.4275,
+ "step": 743
+ },
+ {
+ "epoch": 0.7442093088681192,
+ "grad_norm": 0.5585354566574097,
+ "learning_rate": 0.00017109874677651024,
+ "loss": 1.5049,
+ "step": 744
+ },
+ {
+ "epoch": 0.745209590197243,
+ "grad_norm": 0.5639151930809021,
+ "learning_rate": 0.0001710249945927503,
+ "loss": 1.4019,
+ "step": 745
+ },
+ {
+ "epoch": 0.7462098715263668,
+ "grad_norm": 0.8334717750549316,
+ "learning_rate": 0.00017095116436536612,
+ "loss": 1.5607,
+ "step": 746
+ },
+ {
+ "epoch": 0.7472101528554906,
+ "grad_norm": 0.513970673084259,
+ "learning_rate": 0.00017087725617548385,
+ "loss": 1.1967,
+ "step": 747
+ },
+ {
+ "epoch": 0.7482104341846144,
+ "grad_norm": 0.6200702786445618,
+ "learning_rate": 0.00017080327010431513,
+ "loss": 1.2298,
+ "step": 748
+ },
+ {
+ "epoch": 0.7492107155137382,
+ "grad_norm": 0.54522305727005,
+ "learning_rate": 0.00017072920623315734,
+ "loss": 1.3214,
+ "step": 749
+ },
+ {
+ "epoch": 0.7502109968428621,
+ "grad_norm": 0.6682360172271729,
+ "learning_rate": 0.00017065506464339326,
+ "loss": 1.4631,
+ "step": 750
+ },
+ {
+ "epoch": 0.7512112781719859,
+ "grad_norm": 0.5061535239219666,
+ "learning_rate": 0.00017058084541649106,
+ "loss": 1.5062,
+ "step": 751
+ },
+ {
+ "epoch": 0.7522115595011097,
+ "grad_norm": 0.5790627598762512,
+ "learning_rate": 0.00017050654863400429,
+ "loss": 1.1371,
+ "step": 752
+ },
+ {
+ "epoch": 0.7532118408302335,
+ "grad_norm": 0.6058077216148376,
+ "learning_rate": 0.00017043217437757164,
+ "loss": 1.2185,
+ "step": 753
+ },
+ {
+ "epoch": 0.7542121221593573,
+ "grad_norm": 0.5494515895843506,
+ "learning_rate": 0.00017035772272891702,
+ "loss": 1.2468,
+ "step": 754
+ },
+ {
+ "epoch": 0.7552124034884812,
+ "grad_norm": 0.5687912106513977,
+ "learning_rate": 0.00017028319376984928,
+ "loss": 1.5621,
+ "step": 755
+ },
+ {
+ "epoch": 0.756212684817605,
+ "grad_norm": 0.5341185927391052,
+ "learning_rate": 0.00017020858758226229,
+ "loss": 1.3598,
+ "step": 756
+ },
+ {
+ "epoch": 0.7572129661467287,
+ "grad_norm": 0.5373026132583618,
+ "learning_rate": 0.0001701339042481347,
+ "loss": 1.4185,
+ "step": 757
+ },
+ {
+ "epoch": 0.7582132474758526,
+ "grad_norm": 0.46508973836898804,
+ "learning_rate": 0.00017005914384953007,
+ "loss": 1.2962,
+ "step": 758
+ },
+ {
+ "epoch": 0.7592135288049764,
+ "grad_norm": 0.4580937325954437,
+ "learning_rate": 0.00016998430646859654,
+ "loss": 1.0707,
+ "step": 759
+ },
+ {
+ "epoch": 0.7602138101341002,
+ "grad_norm": 0.5277093052864075,
+ "learning_rate": 0.00016990939218756683,
+ "loss": 1.2529,
+ "step": 760
+ },
+ {
+ "epoch": 0.761214091463224,
+ "grad_norm": 0.5356671214103699,
+ "learning_rate": 0.0001698344010887582,
+ "loss": 1.4032,
+ "step": 761
+ },
+ {
+ "epoch": 0.7622143727923478,
+ "grad_norm": 0.6881769299507141,
+ "learning_rate": 0.0001697593332545723,
+ "loss": 1.4885,
+ "step": 762
+ },
+ {
+ "epoch": 0.7632146541214717,
+ "grad_norm": 0.5370383262634277,
+ "learning_rate": 0.0001696841887674951,
+ "loss": 1.3271,
+ "step": 763
+ },
+ {
+ "epoch": 0.7642149354505955,
+ "grad_norm": 0.4792316257953644,
+ "learning_rate": 0.00016960896771009684,
+ "loss": 1.2274,
+ "step": 764
+ },
+ {
+ "epoch": 0.7652152167797193,
+ "grad_norm": 0.5276592373847961,
+ "learning_rate": 0.00016953367016503182,
+ "loss": 1.2399,
+ "step": 765
+ },
+ {
+ "epoch": 0.7662154981088432,
+ "grad_norm": 0.4789050221443176,
+ "learning_rate": 0.00016945829621503838,
+ "loss": 1.4002,
+ "step": 766
+ },
+ {
+ "epoch": 0.7672157794379669,
+ "grad_norm": 0.492712140083313,
+ "learning_rate": 0.00016938284594293897,
+ "loss": 1.3897,
+ "step": 767
+ },
+ {
+ "epoch": 0.7682160607670907,
+ "grad_norm": 0.5009675621986389,
+ "learning_rate": 0.00016930731943163972,
+ "loss": 1.3797,
+ "step": 768
+ },
+ {
+ "epoch": 0.7692163420962146,
+ "grad_norm": 0.4863432049751282,
+ "learning_rate": 0.00016923171676413063,
+ "loss": 1.4251,
+ "step": 769
+ },
+ {
+ "epoch": 0.7702166234253384,
+ "grad_norm": 0.5190616846084595,
+ "learning_rate": 0.00016915603802348535,
+ "loss": 1.4265,
+ "step": 770
+ },
+ {
+ "epoch": 0.7712169047544621,
+ "grad_norm": 0.5603469610214233,
+ "learning_rate": 0.00016908028329286112,
+ "loss": 1.2852,
+ "step": 771
+ },
+ {
+ "epoch": 0.772217186083586,
+ "grad_norm": 0.5128753185272217,
+ "learning_rate": 0.0001690044526554987,
+ "loss": 1.3324,
+ "step": 772
+ },
+ {
+ "epoch": 0.7732174674127098,
+ "grad_norm": 0.4992072284221649,
+ "learning_rate": 0.00016892854619472223,
+ "loss": 1.2498,
+ "step": 773
+ },
+ {
+ "epoch": 0.7742177487418337,
+ "grad_norm": 0.6128407716751099,
+ "learning_rate": 0.00016885256399393924,
+ "loss": 1.2967,
+ "step": 774
+ },
+ {
+ "epoch": 0.7752180300709575,
+ "grad_norm": 0.5186858177185059,
+ "learning_rate": 0.00016877650613664034,
+ "loss": 1.2654,
+ "step": 775
+ },
+ {
+ "epoch": 0.7762183114000812,
+ "grad_norm": 0.5207421183586121,
+ "learning_rate": 0.00016870037270639942,
+ "loss": 1.2994,
+ "step": 776
+ },
+ {
+ "epoch": 0.7772185927292051,
+ "grad_norm": 0.509912371635437,
+ "learning_rate": 0.0001686241637868734,
+ "loss": 1.3971,
+ "step": 777
+ },
+ {
+ "epoch": 0.7782188740583289,
+ "grad_norm": 0.47703370451927185,
+ "learning_rate": 0.00016854787946180198,
+ "loss": 1.282,
+ "step": 778
+ },
+ {
+ "epoch": 0.7792191553874527,
+ "grad_norm": 0.5404442548751831,
+ "learning_rate": 0.00016847151981500789,
+ "loss": 1.1986,
+ "step": 779
+ },
+ {
+ "epoch": 0.7802194367165766,
+ "grad_norm": 0.541050136089325,
+ "learning_rate": 0.00016839508493039657,
+ "loss": 1.4478,
+ "step": 780
+ },
+ {
+ "epoch": 0.7812197180457003,
+ "grad_norm": 0.46520569920539856,
+ "learning_rate": 0.00016831857489195618,
+ "loss": 1.2385,
+ "step": 781
+ },
+ {
+ "epoch": 0.7822199993748241,
+ "grad_norm": 0.5150445699691772,
+ "learning_rate": 0.00016824198978375736,
+ "loss": 1.3695,
+ "step": 782
+ },
+ {
+ "epoch": 0.783220280703948,
+ "grad_norm": 0.5754334926605225,
+ "learning_rate": 0.00016816532968995328,
+ "loss": 1.3026,
+ "step": 783
+ },
+ {
+ "epoch": 0.7842205620330718,
+ "grad_norm": 0.5335776209831238,
+ "learning_rate": 0.0001680885946947796,
+ "loss": 1.3391,
+ "step": 784
+ },
+ {
+ "epoch": 0.7852208433621957,
+ "grad_norm": 0.6596659421920776,
+ "learning_rate": 0.00016801178488255413,
+ "loss": 1.3224,
+ "step": 785
+ },
+ {
+ "epoch": 0.7862211246913194,
+ "grad_norm": 0.5251991748809814,
+ "learning_rate": 0.00016793490033767698,
+ "loss": 1.1744,
+ "step": 786
+ },
+ {
+ "epoch": 0.7872214060204432,
+ "grad_norm": 0.5112204551696777,
+ "learning_rate": 0.00016785794114463037,
+ "loss": 1.2455,
+ "step": 787
+ },
+ {
+ "epoch": 0.7882216873495671,
+ "grad_norm": 0.532893717288971,
+ "learning_rate": 0.00016778090738797853,
+ "loss": 1.2437,
+ "step": 788
+ },
+ {
+ "epoch": 0.7892219686786909,
+ "grad_norm": 0.5534240007400513,
+ "learning_rate": 0.00016770379915236766,
+ "loss": 1.396,
+ "step": 789
+ },
+ {
+ "epoch": 0.7902222500078147,
+ "grad_norm": 0.5164292454719543,
+ "learning_rate": 0.00016762661652252567,
+ "loss": 1.3138,
+ "step": 790
+ },
+ {
+ "epoch": 0.7912225313369385,
+ "grad_norm": 0.5660764575004578,
+ "learning_rate": 0.00016754935958326244,
+ "loss": 1.3014,
+ "step": 791
+ },
+ {
+ "epoch": 0.7922228126660623,
+ "grad_norm": 0.5137651562690735,
+ "learning_rate": 0.00016747202841946928,
+ "loss": 1.2834,
+ "step": 792
+ },
+ {
+ "epoch": 0.7932230939951862,
+ "grad_norm": 0.5546874403953552,
+ "learning_rate": 0.00016739462311611919,
+ "loss": 1.2841,
+ "step": 793
+ },
+ {
+ "epoch": 0.79422337532431,
+ "grad_norm": 0.5112007260322571,
+ "learning_rate": 0.00016731714375826657,
+ "loss": 1.1873,
+ "step": 794
+ },
+ {
+ "epoch": 0.7952236566534338,
+ "grad_norm": 0.5462679862976074,
+ "learning_rate": 0.00016723959043104728,
+ "loss": 1.2602,
+ "step": 795
+ },
+ {
+ "epoch": 0.7962239379825576,
+ "grad_norm": 0.5083702802658081,
+ "learning_rate": 0.00016716196321967832,
+ "loss": 1.334,
+ "step": 796
+ },
+ {
+ "epoch": 0.7972242193116814,
+ "grad_norm": 0.5491913557052612,
+ "learning_rate": 0.00016708426220945802,
+ "loss": 1.335,
+ "step": 797
+ },
+ {
+ "epoch": 0.7982245006408052,
+ "grad_norm": 0.5257419943809509,
+ "learning_rate": 0.00016700648748576574,
+ "loss": 1.374,
+ "step": 798
+ },
+ {
+ "epoch": 0.7992247819699291,
+ "grad_norm": 0.5252013206481934,
+ "learning_rate": 0.0001669286391340618,
+ "loss": 1.281,
+ "step": 799
+ },
+ {
+ "epoch": 0.8002250632990529,
+ "grad_norm": 0.5784058570861816,
+ "learning_rate": 0.00016685071723988748,
+ "loss": 1.385,
+ "step": 800
+ },
+ {
+ "epoch": 0.8012253446281766,
+ "grad_norm": 0.5508819818496704,
+ "learning_rate": 0.00016677272188886483,
+ "loss": 1.5138,
+ "step": 801
+ },
+ {
+ "epoch": 0.8022256259573005,
+ "grad_norm": 0.5943104028701782,
+ "learning_rate": 0.00016669465316669667,
+ "loss": 1.2341,
+ "step": 802
+ },
+ {
+ "epoch": 0.8032259072864243,
+ "grad_norm": 0.5109750032424927,
+ "learning_rate": 0.00016661651115916642,
+ "loss": 1.361,
+ "step": 803
+ },
+ {
+ "epoch": 0.8042261886155482,
+ "grad_norm": 0.5322972536087036,
+ "learning_rate": 0.00016653829595213794,
+ "loss": 1.3383,
+ "step": 804
+ },
+ {
+ "epoch": 0.805226469944672,
+ "grad_norm": 0.4870489537715912,
+ "learning_rate": 0.00016646000763155568,
+ "loss": 1.2932,
+ "step": 805
+ },
+ {
+ "epoch": 0.8062267512737957,
+ "grad_norm": 0.6070749163627625,
+ "learning_rate": 0.00016638164628344425,
+ "loss": 1.3517,
+ "step": 806
+ },
+ {
+ "epoch": 0.8072270326029196,
+ "grad_norm": 0.5695485472679138,
+ "learning_rate": 0.00016630321199390867,
+ "loss": 1.295,
+ "step": 807
+ },
+ {
+ "epoch": 0.8082273139320434,
+ "grad_norm": 0.49092933535575867,
+ "learning_rate": 0.00016622470484913406,
+ "loss": 1.1708,
+ "step": 808
+ },
+ {
+ "epoch": 0.8092275952611672,
+ "grad_norm": 0.5488709807395935,
+ "learning_rate": 0.00016614612493538551,
+ "loss": 1.3101,
+ "step": 809
+ },
+ {
+ "epoch": 0.810227876590291,
+ "grad_norm": 0.6875150799751282,
+ "learning_rate": 0.00016606747233900815,
+ "loss": 1.3,
+ "step": 810
+ },
+ {
+ "epoch": 0.8112281579194148,
+ "grad_norm": 0.5599775910377502,
+ "learning_rate": 0.00016598874714642697,
+ "loss": 1.5711,
+ "step": 811
+ },
+ {
+ "epoch": 0.8122284392485386,
+ "grad_norm": 0.7102994322776794,
+ "learning_rate": 0.00016590994944414678,
+ "loss": 1.4553,
+ "step": 812
+ },
+ {
+ "epoch": 0.8132287205776625,
+ "grad_norm": 0.5191233158111572,
+ "learning_rate": 0.00016583107931875192,
+ "loss": 1.4292,
+ "step": 813
+ },
+ {
+ "epoch": 0.8142290019067863,
+ "grad_norm": 0.4739600718021393,
+ "learning_rate": 0.0001657521368569064,
+ "loss": 1.3776,
+ "step": 814
+ },
+ {
+ "epoch": 0.8152292832359102,
+ "grad_norm": 0.5282078981399536,
+ "learning_rate": 0.0001656731221453537,
+ "loss": 1.4359,
+ "step": 815
+ },
+ {
+ "epoch": 0.8162295645650339,
+ "grad_norm": 0.690367579460144,
+ "learning_rate": 0.00016559403527091675,
+ "loss": 1.1747,
+ "step": 816
+ },
+ {
+ "epoch": 0.8172298458941577,
+ "grad_norm": 0.5715120434761047,
+ "learning_rate": 0.0001655148763204977,
+ "loss": 1.3289,
+ "step": 817
+ },
+ {
+ "epoch": 0.8182301272232816,
+ "grad_norm": 0.7024423480033875,
+ "learning_rate": 0.00016543564538107797,
+ "loss": 1.4758,
+ "step": 818
+ },
+ {
+ "epoch": 0.8192304085524054,
+ "grad_norm": 0.5568886399269104,
+ "learning_rate": 0.00016535634253971794,
+ "loss": 1.5172,
+ "step": 819
+ },
+ {
+ "epoch": 0.8202306898815291,
+ "grad_norm": 0.5847441554069519,
+ "learning_rate": 0.00016527696788355714,
+ "loss": 1.1993,
+ "step": 820
+ },
+ {
+ "epoch": 0.821230971210653,
+ "grad_norm": 0.5402149558067322,
+ "learning_rate": 0.00016519752149981397,
+ "loss": 1.2921,
+ "step": 821
+ },
+ {
+ "epoch": 0.8222312525397768,
+ "grad_norm": 0.6050311326980591,
+ "learning_rate": 0.0001651180034757856,
+ "loss": 1.59,
+ "step": 822
+ },
+ {
+ "epoch": 0.8232315338689006,
+ "grad_norm": 0.6215486526489258,
+ "learning_rate": 0.00016503841389884798,
+ "loss": 1.4562,
+ "step": 823
+ },
+ {
+ "epoch": 0.8242318151980245,
+ "grad_norm": 0.6507789492607117,
+ "learning_rate": 0.00016495875285645566,
+ "loss": 1.349,
+ "step": 824
+ },
+ {
+ "epoch": 0.8252320965271482,
+ "grad_norm": 0.5273147225379944,
+ "learning_rate": 0.00016487902043614173,
+ "loss": 1.4016,
+ "step": 825
+ },
+ {
+ "epoch": 0.8262323778562721,
+ "grad_norm": 0.579987645149231,
+ "learning_rate": 0.0001647992167255177,
+ "loss": 1.4077,
+ "step": 826
+ },
+ {
+ "epoch": 0.8272326591853959,
+ "grad_norm": 0.5068405270576477,
+ "learning_rate": 0.0001647193418122734,
+ "loss": 1.5075,
+ "step": 827
+ },
+ {
+ "epoch": 0.8282329405145197,
+ "grad_norm": 0.519982099533081,
+ "learning_rate": 0.00016463939578417692,
+ "loss": 1.2721,
+ "step": 828
+ },
+ {
+ "epoch": 0.8292332218436436,
+ "grad_norm": 0.5181561708450317,
+ "learning_rate": 0.0001645593787290745,
+ "loss": 1.2299,
+ "step": 829
+ },
+ {
+ "epoch": 0.8302335031727673,
+ "grad_norm": 0.47413337230682373,
+ "learning_rate": 0.0001644792907348904,
+ "loss": 1.2462,
+ "step": 830
+ },
+ {
+ "epoch": 0.8312337845018911,
+ "grad_norm": 0.5426570773124695,
+ "learning_rate": 0.00016439913188962685,
+ "loss": 1.4496,
+ "step": 831
+ },
+ {
+ "epoch": 0.832234065831015,
+ "grad_norm": 0.5744379758834839,
+ "learning_rate": 0.0001643189022813639,
+ "loss": 1.3284,
+ "step": 832
+ },
+ {
+ "epoch": 0.8332343471601388,
+ "grad_norm": 0.49693235754966736,
+ "learning_rate": 0.0001642386019982594,
+ "loss": 1.4082,
+ "step": 833
+ },
+ {
+ "epoch": 0.8342346284892626,
+ "grad_norm": 0.5346773862838745,
+ "learning_rate": 0.00016415823112854883,
+ "loss": 1.4238,
+ "step": 834
+ },
+ {
+ "epoch": 0.8352349098183864,
+ "grad_norm": 0.6201802492141724,
+ "learning_rate": 0.00016407778976054526,
+ "loss": 1.3288,
+ "step": 835
+ },
+ {
+ "epoch": 0.8362351911475102,
+ "grad_norm": 0.5161807537078857,
+ "learning_rate": 0.0001639972779826392,
+ "loss": 1.3798,
+ "step": 836
+ },
+ {
+ "epoch": 0.8372354724766341,
+ "grad_norm": 0.4670160412788391,
+ "learning_rate": 0.0001639166958832985,
+ "loss": 1.3765,
+ "step": 837
+ },
+ {
+ "epoch": 0.8382357538057579,
+ "grad_norm": 0.6492543816566467,
+ "learning_rate": 0.00016383604355106837,
+ "loss": 1.5277,
+ "step": 838
+ },
+ {
+ "epoch": 0.8392360351348817,
+ "grad_norm": 0.5766328573226929,
+ "learning_rate": 0.00016375532107457108,
+ "loss": 1.2481,
+ "step": 839
+ },
+ {
+ "epoch": 0.8402363164640055,
+ "grad_norm": 0.6431072950363159,
+ "learning_rate": 0.00016367452854250603,
+ "loss": 1.2755,
+ "step": 840
+ },
+ {
+ "epoch": 0.8412365977931293,
+ "grad_norm": 0.5121828317642212,
+ "learning_rate": 0.00016359366604364972,
+ "loss": 1.2927,
+ "step": 841
+ },
+ {
+ "epoch": 0.8422368791222531,
+ "grad_norm": 0.5222392678260803,
+ "learning_rate": 0.00016351273366685526,
+ "loss": 1.2626,
+ "step": 842
+ },
+ {
+ "epoch": 0.843237160451377,
+ "grad_norm": 0.5536903142929077,
+ "learning_rate": 0.00016343173150105278,
+ "loss": 1.1892,
+ "step": 843
+ },
+ {
+ "epoch": 0.8442374417805008,
+ "grad_norm": 0.5569381713867188,
+ "learning_rate": 0.00016335065963524897,
+ "loss": 1.4263,
+ "step": 844
+ },
+ {
+ "epoch": 0.8452377231096245,
+ "grad_norm": 0.6490715742111206,
+ "learning_rate": 0.0001632695181585272,
+ "loss": 1.452,
+ "step": 845
+ },
+ {
+ "epoch": 0.8462380044387484,
+ "grad_norm": 0.5965350270271301,
+ "learning_rate": 0.00016318830716004722,
+ "loss": 1.4189,
+ "step": 846
+ },
+ {
+ "epoch": 0.8472382857678722,
+ "grad_norm": 0.45904603600502014,
+ "learning_rate": 0.00016310702672904528,
+ "loss": 1.4024,
+ "step": 847
+ },
+ {
+ "epoch": 0.8482385670969961,
+ "grad_norm": 0.4320334494113922,
+ "learning_rate": 0.00016302567695483382,
+ "loss": 1.2105,
+ "step": 848
+ },
+ {
+ "epoch": 0.8492388484261199,
+ "grad_norm": 0.527032196521759,
+ "learning_rate": 0.0001629442579268016,
+ "loss": 1.1996,
+ "step": 849
+ },
+ {
+ "epoch": 0.8502391297552436,
+ "grad_norm": 0.6317036747932434,
+ "learning_rate": 0.00016286276973441333,
+ "loss": 1.4811,
+ "step": 850
+ },
+ {
+ "epoch": 0.8512394110843675,
+ "grad_norm": 0.5726277828216553,
+ "learning_rate": 0.00016278121246720987,
+ "loss": 1.3249,
+ "step": 851
+ },
+ {
+ "epoch": 0.8522396924134913,
+ "grad_norm": 0.4624577462673187,
+ "learning_rate": 0.00016269958621480788,
+ "loss": 1.3291,
+ "step": 852
+ },
+ {
+ "epoch": 0.8532399737426151,
+ "grad_norm": 0.5774461627006531,
+ "learning_rate": 0.0001626178910668998,
+ "loss": 1.2891,
+ "step": 853
+ },
+ {
+ "epoch": 0.854240255071739,
+ "grad_norm": 0.503584086894989,
+ "learning_rate": 0.00016253612711325386,
+ "loss": 1.3048,
+ "step": 854
+ },
+ {
+ "epoch": 0.8552405364008627,
+ "grad_norm": 0.4560583233833313,
+ "learning_rate": 0.0001624542944437139,
+ "loss": 1.2658,
+ "step": 855
+ },
+ {
+ "epoch": 0.8562408177299866,
+ "grad_norm": 0.49611610174179077,
+ "learning_rate": 0.00016237239314819917,
+ "loss": 1.1017,
+ "step": 856
+ },
+ {
+ "epoch": 0.8572410990591104,
+ "grad_norm": 0.5600405931472778,
+ "learning_rate": 0.0001622904233167044,
+ "loss": 1.3274,
+ "step": 857
+ },
+ {
+ "epoch": 0.8582413803882342,
+ "grad_norm": 0.5849353075027466,
+ "learning_rate": 0.0001622083850392996,
+ "loss": 1.274,
+ "step": 858
+ },
+ {
+ "epoch": 0.859241661717358,
+ "grad_norm": 0.5781377553939819,
+ "learning_rate": 0.00016212627840613003,
+ "loss": 1.4157,
+ "step": 859
+ },
+ {
+ "epoch": 0.8602419430464818,
+ "grad_norm": 0.4908173680305481,
+ "learning_rate": 0.000162044103507416,
+ "loss": 1.3,
+ "step": 860
+ },
+ {
+ "epoch": 0.8612422243756056,
+ "grad_norm": 0.5844553112983704,
+ "learning_rate": 0.00016196186043345288,
+ "loss": 1.2325,
+ "step": 861
+ },
+ {
+ "epoch": 0.8622425057047295,
+ "grad_norm": 0.5381117463111877,
+ "learning_rate": 0.00016187954927461093,
+ "loss": 1.41,
+ "step": 862
+ },
+ {
+ "epoch": 0.8632427870338533,
+ "grad_norm": 0.5468165278434753,
+ "learning_rate": 0.00016179717012133521,
+ "loss": 1.4272,
+ "step": 863
+ },
+ {
+ "epoch": 0.864243068362977,
+ "grad_norm": 0.5702970027923584,
+ "learning_rate": 0.00016171472306414554,
+ "loss": 1.3624,
+ "step": 864
+ },
+ {
+ "epoch": 0.8652433496921009,
+ "grad_norm": 0.5430637001991272,
+ "learning_rate": 0.00016163220819363628,
+ "loss": 1.2555,
+ "step": 865
+ },
+ {
+ "epoch": 0.8662436310212247,
+ "grad_norm": 0.5266844034194946,
+ "learning_rate": 0.00016154962560047643,
+ "loss": 1.3743,
+ "step": 866
+ },
+ {
+ "epoch": 0.8672439123503486,
+ "grad_norm": 0.5201333165168762,
+ "learning_rate": 0.00016146697537540924,
+ "loss": 1.3959,
+ "step": 867
+ },
+ {
+ "epoch": 0.8682441936794724,
+ "grad_norm": 0.44362199306488037,
+ "learning_rate": 0.0001613842576092524,
+ "loss": 1.2661,
+ "step": 868
+ },
+ {
+ "epoch": 0.8692444750085961,
+ "grad_norm": 0.5465226769447327,
+ "learning_rate": 0.00016130147239289778,
+ "loss": 1.3688,
+ "step": 869
+ },
+ {
+ "epoch": 0.87024475633772,
+ "grad_norm": 0.5353460907936096,
+ "learning_rate": 0.00016121861981731135,
+ "loss": 1.2327,
+ "step": 870
+ },
+ {
+ "epoch": 0.8712450376668438,
+ "grad_norm": 0.5463739633560181,
+ "learning_rate": 0.00016113569997353312,
+ "loss": 1.2994,
+ "step": 871
+ },
+ {
+ "epoch": 0.8722453189959676,
+ "grad_norm": 0.5219647288322449,
+ "learning_rate": 0.000161052712952677,
+ "loss": 1.3916,
+ "step": 872
+ },
+ {
+ "epoch": 0.8732456003250915,
+ "grad_norm": 0.4675636887550354,
+ "learning_rate": 0.0001609696588459307,
+ "loss": 1.2786,
+ "step": 873
+ },
+ {
+ "epoch": 0.8742458816542152,
+ "grad_norm": 0.48863986134529114,
+ "learning_rate": 0.00016088653774455568,
+ "loss": 1.1762,
+ "step": 874
+ },
+ {
+ "epoch": 0.875246162983339,
+ "grad_norm": 0.48759785294532776,
+ "learning_rate": 0.00016080334973988695,
+ "loss": 1.2107,
+ "step": 875
+ },
+ {
+ "epoch": 0.8762464443124629,
+ "grad_norm": 0.7353807687759399,
+ "learning_rate": 0.00016072009492333318,
+ "loss": 1.4855,
+ "step": 876
+ },
+ {
+ "epoch": 0.8772467256415867,
+ "grad_norm": 0.4878953993320465,
+ "learning_rate": 0.0001606367733863763,
+ "loss": 1.2343,
+ "step": 877
+ },
+ {
+ "epoch": 0.8782470069707106,
+ "grad_norm": 0.4764840304851532,
+ "learning_rate": 0.00016055338522057158,
+ "loss": 1.3159,
+ "step": 878
+ },
+ {
+ "epoch": 0.8792472882998343,
+ "grad_norm": 0.5289160013198853,
+ "learning_rate": 0.00016046993051754756,
+ "loss": 1.3298,
+ "step": 879
+ },
+ {
+ "epoch": 0.8802475696289581,
+ "grad_norm": 0.5421459078788757,
+ "learning_rate": 0.00016038640936900586,
+ "loss": 1.4081,
+ "step": 880
+ },
+ {
+ "epoch": 0.881247850958082,
+ "grad_norm": 0.5096681118011475,
+ "learning_rate": 0.00016030282186672116,
+ "loss": 1.2406,
+ "step": 881
+ },
+ {
+ "epoch": 0.8822481322872058,
+ "grad_norm": 0.5783627033233643,
+ "learning_rate": 0.00016021916810254097,
+ "loss": 1.3505,
+ "step": 882
+ },
+ {
+ "epoch": 0.8832484136163296,
+ "grad_norm": 0.5718142986297607,
+ "learning_rate": 0.00016013544816838565,
+ "loss": 1.4106,
+ "step": 883
+ },
+ {
+ "epoch": 0.8842486949454534,
+ "grad_norm": 0.551607072353363,
+ "learning_rate": 0.00016005166215624827,
+ "loss": 1.3474,
+ "step": 884
+ },
+ {
+ "epoch": 0.8852489762745772,
+ "grad_norm": 0.5464247465133667,
+ "learning_rate": 0.0001599678101581945,
+ "loss": 1.4443,
+ "step": 885
+ },
+ {
+ "epoch": 0.886249257603701,
+ "grad_norm": 0.5075456500053406,
+ "learning_rate": 0.00015988389226636253,
+ "loss": 1.4919,
+ "step": 886
+ },
+ {
+ "epoch": 0.8872495389328249,
+ "grad_norm": 0.48557186126708984,
+ "learning_rate": 0.00015979990857296295,
+ "loss": 1.4225,
+ "step": 887
+ },
+ {
+ "epoch": 0.8882498202619487,
+ "grad_norm": 0.5385611653327942,
+ "learning_rate": 0.00015971585917027862,
+ "loss": 1.2937,
+ "step": 888
+ },
+ {
+ "epoch": 0.8892501015910725,
+ "grad_norm": 0.6477749943733215,
+ "learning_rate": 0.00015963174415066468,
+ "loss": 1.5628,
+ "step": 889
+ },
+ {
+ "epoch": 0.8902503829201963,
+ "grad_norm": 0.6205973029136658,
+ "learning_rate": 0.0001595475636065483,
+ "loss": 1.4902,
+ "step": 890
+ },
+ {
+ "epoch": 0.8912506642493201,
+ "grad_norm": 0.45717301964759827,
+ "learning_rate": 0.00015946331763042867,
+ "loss": 1.1998,
+ "step": 891
+ },
+ {
+ "epoch": 0.892250945578444,
+ "grad_norm": 0.5279855132102966,
+ "learning_rate": 0.00015937900631487686,
+ "loss": 1.0668,
+ "step": 892
+ },
+ {
+ "epoch": 0.8932512269075678,
+ "grad_norm": 0.5207269787788391,
+ "learning_rate": 0.00015929462975253585,
+ "loss": 1.2774,
+ "step": 893
+ },
+ {
+ "epoch": 0.8942515082366915,
+ "grad_norm": 0.5200834274291992,
+ "learning_rate": 0.00015921018803612014,
+ "loss": 1.4316,
+ "step": 894
+ },
+ {
+ "epoch": 0.8952517895658154,
+ "grad_norm": 0.48317649960517883,
+ "learning_rate": 0.0001591256812584159,
+ "loss": 1.4101,
+ "step": 895
+ },
+ {
+ "epoch": 0.8962520708949392,
+ "grad_norm": 0.475483775138855,
+ "learning_rate": 0.00015904110951228082,
+ "loss": 1.2011,
+ "step": 896
+ },
+ {
+ "epoch": 0.897252352224063,
+ "grad_norm": 0.6542660593986511,
+ "learning_rate": 0.00015895647289064396,
+ "loss": 1.56,
+ "step": 897
+ },
+ {
+ "epoch": 0.8982526335531869,
+ "grad_norm": 0.5154829621315002,
+ "learning_rate": 0.00015887177148650564,
+ "loss": 1.3748,
+ "step": 898
+ },
+ {
+ "epoch": 0.8992529148823106,
+ "grad_norm": 0.5744799375534058,
+ "learning_rate": 0.0001587870053929374,
+ "loss": 1.4072,
+ "step": 899
+ },
+ {
+ "epoch": 0.9002531962114345,
+ "grad_norm": 0.4835909307003021,
+ "learning_rate": 0.00015870217470308188,
+ "loss": 1.3037,
+ "step": 900
+ },
+ {
+ "epoch": 0.9012534775405583,
+ "grad_norm": 0.5292366743087769,
+ "learning_rate": 0.0001586172795101526,
+ "loss": 1.2395,
+ "step": 901
+ },
+ {
+ "epoch": 0.9022537588696821,
+ "grad_norm": 0.5905430912971497,
+ "learning_rate": 0.00015853231990743406,
+ "loss": 1.29,
+ "step": 902
+ },
+ {
+ "epoch": 0.903254040198806,
+ "grad_norm": 0.4918007254600525,
+ "learning_rate": 0.0001584472959882815,
+ "loss": 1.2593,
+ "step": 903
+ },
+ {
+ "epoch": 0.9042543215279297,
+ "grad_norm": 0.4735652208328247,
+ "learning_rate": 0.00015836220784612085,
+ "loss": 1.1669,
+ "step": 904
+ },
+ {
+ "epoch": 0.9052546028570535,
+ "grad_norm": 0.6272550821304321,
+ "learning_rate": 0.00015827705557444852,
+ "loss": 1.3692,
+ "step": 905
+ },
+ {
+ "epoch": 0.9062548841861774,
+ "grad_norm": 0.5333564877510071,
+ "learning_rate": 0.00015819183926683153,
+ "loss": 1.3672,
+ "step": 906
+ },
+ {
+ "epoch": 0.9072551655153012,
+ "grad_norm": 0.44029948115348816,
+ "learning_rate": 0.00015810655901690715,
+ "loss": 1.2124,
+ "step": 907
+ },
+ {
+ "epoch": 0.9082554468444249,
+ "grad_norm": 0.5636379718780518,
+ "learning_rate": 0.00015802121491838297,
+ "loss": 1.3507,
+ "step": 908
+ },
+ {
+ "epoch": 0.9092557281735488,
+ "grad_norm": 0.4394778907299042,
+ "learning_rate": 0.0001579358070650367,
+ "loss": 1.3159,
+ "step": 909
+ },
+ {
+ "epoch": 0.9102560095026726,
+ "grad_norm": 0.5382723212242126,
+ "learning_rate": 0.00015785033555071616,
+ "loss": 1.3733,
+ "step": 910
+ },
+ {
+ "epoch": 0.9112562908317965,
+ "grad_norm": 0.5251659750938416,
+ "learning_rate": 0.00015776480046933905,
+ "loss": 1.2253,
+ "step": 911
+ },
+ {
+ "epoch": 0.9122565721609203,
+ "grad_norm": 0.4791383743286133,
+ "learning_rate": 0.000157679201914893,
+ "loss": 1.2341,
+ "step": 912
+ },
+ {
+ "epoch": 0.913256853490044,
+ "grad_norm": 0.5058613419532776,
+ "learning_rate": 0.00015759353998143528,
+ "loss": 1.2717,
+ "step": 913
+ },
+ {
+ "epoch": 0.9142571348191679,
+ "grad_norm": 0.46837320923805237,
+ "learning_rate": 0.00015750781476309288,
+ "loss": 1.2484,
+ "step": 914
+ },
+ {
+ "epoch": 0.9152574161482917,
+ "grad_norm": 0.524444580078125,
+ "learning_rate": 0.00015742202635406235,
+ "loss": 1.5512,
+ "step": 915
+ },
+ {
+ "epoch": 0.9162576974774155,
+ "grad_norm": 0.6169744729995728,
+ "learning_rate": 0.00015733617484860963,
+ "loss": 1.271,
+ "step": 916
+ },
+ {
+ "epoch": 0.9172579788065394,
+ "grad_norm": 0.48883670568466187,
+ "learning_rate": 0.00015725026034106996,
+ "loss": 1.4779,
+ "step": 917
+ },
+ {
+ "epoch": 0.9182582601356631,
+ "grad_norm": 0.5408645272254944,
+ "learning_rate": 0.00015716428292584787,
+ "loss": 1.3574,
+ "step": 918
+ },
+ {
+ "epoch": 0.919258541464787,
+ "grad_norm": 0.5622221231460571,
+ "learning_rate": 0.00015707824269741702,
+ "loss": 1.2146,
+ "step": 919
+ },
+ {
+ "epoch": 0.9202588227939108,
+ "grad_norm": 0.477328896522522,
+ "learning_rate": 0.00015699213975031996,
+ "loss": 1.162,
+ "step": 920
+ },
+ {
+ "epoch": 0.9212591041230346,
+ "grad_norm": 0.503027081489563,
+ "learning_rate": 0.0001569059741791684,
+ "loss": 1.1674,
+ "step": 921
+ },
+ {
+ "epoch": 0.9222593854521585,
+ "grad_norm": 0.5951637625694275,
+ "learning_rate": 0.0001568197460786426,
+ "loss": 1.3737,
+ "step": 922
+ },
+ {
+ "epoch": 0.9232596667812822,
+ "grad_norm": 0.5276626348495483,
+ "learning_rate": 0.0001567334555434917,
+ "loss": 1.2494,
+ "step": 923
+ },
+ {
+ "epoch": 0.924259948110406,
+ "grad_norm": 0.6354761123657227,
+ "learning_rate": 0.0001566471026685334,
+ "loss": 1.2052,
+ "step": 924
+ },
+ {
+ "epoch": 0.9252602294395299,
+ "grad_norm": 0.4227287471294403,
+ "learning_rate": 0.00015656068754865387,
+ "loss": 1.1446,
+ "step": 925
+ },
+ {
+ "epoch": 0.9262605107686537,
+ "grad_norm": 0.5290839076042175,
+ "learning_rate": 0.00015647421027880772,
+ "loss": 1.2057,
+ "step": 926
+ },
+ {
+ "epoch": 0.9272607920977775,
+ "grad_norm": 0.4961225986480713,
+ "learning_rate": 0.0001563876709540178,
+ "loss": 1.2788,
+ "step": 927
+ },
+ {
+ "epoch": 0.9282610734269013,
+ "grad_norm": 0.5095213651657104,
+ "learning_rate": 0.0001563010696693752,
+ "loss": 1.2751,
+ "step": 928
+ },
+ {
+ "epoch": 0.9292613547560251,
+ "grad_norm": 0.5027223825454712,
+ "learning_rate": 0.00015621440652003907,
+ "loss": 1.3653,
+ "step": 929
+ },
+ {
+ "epoch": 0.930261636085149,
+ "grad_norm": 0.49251896142959595,
+ "learning_rate": 0.00015612768160123652,
+ "loss": 1.1556,
+ "step": 930
+ },
+ {
+ "epoch": 0.9312619174142728,
+ "grad_norm": 0.5187139511108398,
+ "learning_rate": 0.00015604089500826257,
+ "loss": 1.3623,
+ "step": 931
+ },
+ {
+ "epoch": 0.9322621987433966,
+ "grad_norm": 0.5004428029060364,
+ "learning_rate": 0.00015595404683648,
+ "loss": 1.185,
+ "step": 932
+ },
+ {
+ "epoch": 0.9332624800725204,
+ "grad_norm": 0.5750531554222107,
+ "learning_rate": 0.00015586713718131922,
+ "loss": 1.2999,
+ "step": 933
+ },
+ {
+ "epoch": 0.9342627614016442,
+ "grad_norm": 0.482732355594635,
+ "learning_rate": 0.0001557801661382782,
+ "loss": 1.2635,
+ "step": 934
+ },
+ {
+ "epoch": 0.935263042730768,
+ "grad_norm": 0.47854143381118774,
+ "learning_rate": 0.00015569313380292248,
+ "loss": 1.2833,
+ "step": 935
+ },
+ {
+ "epoch": 0.9362633240598919,
+ "grad_norm": 0.49382665753364563,
+ "learning_rate": 0.00015560604027088477,
+ "loss": 1.2327,
+ "step": 936
+ },
+ {
+ "epoch": 0.9372636053890157,
+ "grad_norm": 0.5009885430335999,
+ "learning_rate": 0.00015551888563786515,
+ "loss": 1.2967,
+ "step": 937
+ },
+ {
+ "epoch": 0.9382638867181394,
+ "grad_norm": 0.5012707114219666,
+ "learning_rate": 0.00015543166999963076,
+ "loss": 1.3231,
+ "step": 938
+ },
+ {
+ "epoch": 0.9392641680472633,
+ "grad_norm": 0.6908506751060486,
+ "learning_rate": 0.0001553443934520159,
+ "loss": 1.4055,
+ "step": 939
+ },
+ {
+ "epoch": 0.9402644493763871,
+ "grad_norm": 0.7104817032814026,
+ "learning_rate": 0.00015525705609092157,
+ "loss": 1.3435,
+ "step": 940
+ },
+ {
+ "epoch": 0.941264730705511,
+ "grad_norm": 0.49263522028923035,
+ "learning_rate": 0.00015516965801231586,
+ "loss": 1.2259,
+ "step": 941
+ },
+ {
+ "epoch": 0.9422650120346348,
+ "grad_norm": 0.5337693691253662,
+ "learning_rate": 0.0001550821993122334,
+ "loss": 1.2863,
+ "step": 942
+ },
+ {
+ "epoch": 0.9432652933637585,
+ "grad_norm": 0.5506749153137207,
+ "learning_rate": 0.0001549946800867755,
+ "loss": 1.4061,
+ "step": 943
+ },
+ {
+ "epoch": 0.9442655746928824,
+ "grad_norm": 0.5121364593505859,
+ "learning_rate": 0.00015490710043210997,
+ "loss": 1.3567,
+ "step": 944
+ },
+ {
+ "epoch": 0.9452658560220062,
+ "grad_norm": 0.5326678156852722,
+ "learning_rate": 0.00015481946044447099,
+ "loss": 1.2719,
+ "step": 945
+ },
+ {
+ "epoch": 0.94626613735113,
+ "grad_norm": 0.6023722290992737,
+ "learning_rate": 0.00015473176022015906,
+ "loss": 1.1512,
+ "step": 946
+ },
+ {
+ "epoch": 0.9472664186802539,
+ "grad_norm": 0.4953387975692749,
+ "learning_rate": 0.0001546439998555409,
+ "loss": 1.556,
+ "step": 947
+ },
+ {
+ "epoch": 0.9482667000093776,
+ "grad_norm": 0.5187799334526062,
+ "learning_rate": 0.0001545561794470492,
+ "loss": 1.279,
+ "step": 948
+ },
+ {
+ "epoch": 0.9492669813385014,
+ "grad_norm": 0.5788894295692444,
+ "learning_rate": 0.00015446829909118275,
+ "loss": 1.3246,
+ "step": 949
+ },
+ {
+ "epoch": 0.9502672626676253,
+ "grad_norm": 0.5551681518554688,
+ "learning_rate": 0.00015438035888450623,
+ "loss": 1.2231,
+ "step": 950
+ },
+ {
+ "epoch": 0.9512675439967491,
+ "grad_norm": 0.4898390471935272,
+ "learning_rate": 0.00015429235892364994,
+ "loss": 1.2036,
+ "step": 951
+ },
+ {
+ "epoch": 0.952267825325873,
+ "grad_norm": 0.5427507162094116,
+ "learning_rate": 0.00015420429930530996,
+ "loss": 1.3614,
+ "step": 952
+ },
+ {
+ "epoch": 0.9532681066549967,
+ "grad_norm": 0.557054340839386,
+ "learning_rate": 0.00015411618012624786,
+ "loss": 1.4249,
+ "step": 953
+ },
+ {
+ "epoch": 0.9542683879841205,
+ "grad_norm": 0.5793543457984924,
+ "learning_rate": 0.00015402800148329071,
+ "loss": 1.4341,
+ "step": 954
+ },
+ {
+ "epoch": 0.9552686693132444,
+ "grad_norm": 0.5993456244468689,
+ "learning_rate": 0.00015393976347333088,
+ "loss": 1.0259,
+ "step": 955
+ },
+ {
+ "epoch": 0.9562689506423682,
+ "grad_norm": 0.554904580116272,
+ "learning_rate": 0.00015385146619332596,
+ "loss": 1.3558,
+ "step": 956
+ },
+ {
+ "epoch": 0.9572692319714919,
+ "grad_norm": 0.5488478541374207,
+ "learning_rate": 0.00015376310974029873,
+ "loss": 1.358,
+ "step": 957
+ },
+ {
+ "epoch": 0.9582695133006158,
+ "grad_norm": 0.5108879208564758,
+ "learning_rate": 0.00015367469421133695,
+ "loss": 1.3865,
+ "step": 958
+ },
+ {
+ "epoch": 0.9592697946297396,
+ "grad_norm": 0.4606814682483673,
+ "learning_rate": 0.00015358621970359325,
+ "loss": 1.2055,
+ "step": 959
+ },
+ {
+ "epoch": 0.9602700759588634,
+ "grad_norm": 0.4974004328250885,
+ "learning_rate": 0.00015349768631428519,
+ "loss": 1.2541,
+ "step": 960
+ },
+ {
+ "epoch": 0.9612703572879873,
+ "grad_norm": 0.5107241272926331,
+ "learning_rate": 0.00015340909414069488,
+ "loss": 1.1624,
+ "step": 961
+ },
+ {
+ "epoch": 0.962270638617111,
+ "grad_norm": 0.5587212443351746,
+ "learning_rate": 0.00015332044328016914,
+ "loss": 1.349,
+ "step": 962
+ },
+ {
+ "epoch": 0.9632709199462349,
+ "grad_norm": 0.5209497809410095,
+ "learning_rate": 0.0001532317338301192,
+ "loss": 1.3695,
+ "step": 963
+ },
+ {
+ "epoch": 0.9642712012753587,
+ "grad_norm": 0.4985620677471161,
+ "learning_rate": 0.00015314296588802076,
+ "loss": 1.4597,
+ "step": 964
+ },
+ {
+ "epoch": 0.9652714826044825,
+ "grad_norm": 0.5065100789070129,
+ "learning_rate": 0.00015305413955141365,
+ "loss": 1.4225,
+ "step": 965
+ },
+ {
+ "epoch": 0.9662717639336064,
+ "grad_norm": 0.5079792737960815,
+ "learning_rate": 0.00015296525491790205,
+ "loss": 1.057,
+ "step": 966
+ },
+ {
+ "epoch": 0.9672720452627301,
+ "grad_norm": 0.4673600196838379,
+ "learning_rate": 0.00015287631208515406,
+ "loss": 1.2531,
+ "step": 967
+ },
+ {
+ "epoch": 0.9682723265918539,
+ "grad_norm": 0.5309945344924927,
+ "learning_rate": 0.00015278731115090171,
+ "loss": 1.374,
+ "step": 968
+ },
+ {
+ "epoch": 0.9692726079209778,
+ "grad_norm": 0.4792092442512512,
+ "learning_rate": 0.00015269825221294098,
+ "loss": 1.3018,
+ "step": 969
+ },
+ {
+ "epoch": 0.9702728892501016,
+ "grad_norm": 0.5222868323326111,
+ "learning_rate": 0.00015260913536913154,
+ "loss": 1.4063,
+ "step": 970
+ },
+ {
+ "epoch": 0.9712731705792254,
+ "grad_norm": 0.5373417139053345,
+ "learning_rate": 0.00015251996071739664,
+ "loss": 1.2183,
+ "step": 971
+ },
+ {
+ "epoch": 0.9722734519083492,
+ "grad_norm": 0.5624721050262451,
+ "learning_rate": 0.00015243072835572318,
+ "loss": 1.2696,
+ "step": 972
+ },
+ {
+ "epoch": 0.973273733237473,
+ "grad_norm": 0.46938082575798035,
+ "learning_rate": 0.0001523414383821613,
+ "loss": 1.3544,
+ "step": 973
+ },
+ {
+ "epoch": 0.9742740145665969,
+ "grad_norm": 0.45348694920539856,
+ "learning_rate": 0.00015225209089482462,
+ "loss": 1.2078,
+ "step": 974
+ },
+ {
+ "epoch": 0.9752742958957207,
+ "grad_norm": 0.48000606894493103,
+ "learning_rate": 0.0001521626859918898,
+ "loss": 1.1914,
+ "step": 975
+ },
+ {
+ "epoch": 0.9762745772248445,
+ "grad_norm": 0.5106796622276306,
+ "learning_rate": 0.00015207322377159668,
+ "loss": 1.3249,
+ "step": 976
+ },
+ {
+ "epoch": 0.9772748585539683,
+ "grad_norm": 0.49865373969078064,
+ "learning_rate": 0.00015198370433224805,
+ "loss": 1.2876,
+ "step": 977
+ },
+ {
+ "epoch": 0.9782751398830921,
+ "grad_norm": 0.5271755456924438,
+ "learning_rate": 0.00015189412777220958,
+ "loss": 1.3049,
+ "step": 978
+ },
+ {
+ "epoch": 0.9792754212122159,
+ "grad_norm": 0.49824708700180054,
+ "learning_rate": 0.00015180449418990976,
+ "loss": 1.1614,
+ "step": 979
+ },
+ {
+ "epoch": 0.9802757025413398,
+ "grad_norm": 0.7327549457550049,
+ "learning_rate": 0.00015171480368383964,
+ "loss": 1.2923,
+ "step": 980
+ },
+ {
+ "epoch": 0.9812759838704636,
+ "grad_norm": 0.5170425176620483,
+ "learning_rate": 0.00015162505635255287,
+ "loss": 1.3097,
+ "step": 981
+ },
+ {
+ "epoch": 0.9822762651995874,
+ "grad_norm": 0.47041526436805725,
+ "learning_rate": 0.00015153525229466555,
+ "loss": 1.3508,
+ "step": 982
+ },
+ {
+ "epoch": 0.9832765465287112,
+ "grad_norm": 0.4670693278312683,
+ "learning_rate": 0.00015144539160885613,
+ "loss": 1.3974,
+ "step": 983
+ },
+ {
+ "epoch": 0.984276827857835,
+ "grad_norm": 0.5745754837989807,
+ "learning_rate": 0.00015135547439386516,
+ "loss": 1.2977,
+ "step": 984
+ },
+ {
+ "epoch": 0.9852771091869589,
+ "grad_norm": 0.5845474004745483,
+ "learning_rate": 0.0001512655007484955,
+ "loss": 1.3384,
+ "step": 985
+ },
+ {
+ "epoch": 0.9862773905160827,
+ "grad_norm": 0.5627439618110657,
+ "learning_rate": 0.00015117547077161185,
+ "loss": 1.1756,
+ "step": 986
+ },
+ {
+ "epoch": 0.9872776718452064,
+ "grad_norm": 0.6411226987838745,
+ "learning_rate": 0.0001510853845621409,
+ "loss": 1.3441,
+ "step": 987
+ },
+ {
+ "epoch": 0.9882779531743303,
+ "grad_norm": 0.545659601688385,
+ "learning_rate": 0.00015099524221907107,
+ "loss": 1.3766,
+ "step": 988
+ },
+ {
+ "epoch": 0.9892782345034541,
+ "grad_norm": 0.5058498382568359,
+ "learning_rate": 0.0001509050438414525,
+ "loss": 1.3171,
+ "step": 989
+ },
+ {
+ "epoch": 0.9902785158325779,
+ "grad_norm": 0.6247567534446716,
+ "learning_rate": 0.00015081478952839693,
+ "loss": 1.2141,
+ "step": 990
+ },
+ {
+ "epoch": 0.9912787971617018,
+ "grad_norm": 0.5492308139801025,
+ "learning_rate": 0.00015072447937907753,
+ "loss": 1.1626,
+ "step": 991
+ },
+ {
+ "epoch": 0.9922790784908255,
+ "grad_norm": 0.4795534908771515,
+ "learning_rate": 0.00015063411349272877,
+ "loss": 1.218,
+ "step": 992
+ },
+ {
+ "epoch": 0.9932793598199494,
+ "grad_norm": 0.5527793169021606,
+ "learning_rate": 0.00015054369196864644,
+ "loss": 1.3816,
+ "step": 993
+ },
+ {
+ "epoch": 0.9942796411490732,
+ "grad_norm": 0.5297475457191467,
+ "learning_rate": 0.00015045321490618748,
+ "loss": 1.2515,
+ "step": 994
+ },
+ {
+ "epoch": 0.995279922478197,
+ "grad_norm": 0.518803596496582,
+ "learning_rate": 0.00015036268240476978,
+ "loss": 1.3631,
+ "step": 995
+ },
+ {
+ "epoch": 0.9962802038073209,
+ "grad_norm": 0.47196391224861145,
+ "learning_rate": 0.00015027209456387218,
+ "loss": 1.0932,
+ "step": 996
+ },
+ {
+ "epoch": 0.9972804851364446,
+ "grad_norm": 0.5369086861610413,
+ "learning_rate": 0.00015018145148303438,
+ "loss": 1.1181,
+ "step": 997
+ },
+ {
+ "epoch": 0.9982807664655684,
+ "grad_norm": 0.5940788388252258,
+ "learning_rate": 0.00015009075326185667,
+ "loss": 1.561,
+ "step": 998
+ },
+ {
+ "epoch": 0.9992810477946923,
+ "grad_norm": 0.5340734124183655,
+ "learning_rate": 0.00015000000000000001,
+ "loss": 1.2909,
+ "step": 999
+ },
+ {
+ "epoch": 1.0002813291238162,
+ "grad_norm": 0.5133704543113708,
+ "learning_rate": 0.00014990919179718584,
+ "loss": 1.0441,
+ "step": 1000
+ },
+ {
+ "epoch": 1.0012816104529398,
+ "grad_norm": 0.3812060058116913,
+ "learning_rate": 0.00014981832875319597,
+ "loss": 0.8215,
+ "step": 1001
+ },
+ {
+ "epoch": 1.0022818917820637,
+ "grad_norm": 0.40786364674568176,
+ "learning_rate": 0.00014972741096787242,
+ "loss": 0.8215,
+ "step": 1002
+ },
+ {
+ "epoch": 1.0032821731111876,
+ "grad_norm": 0.4328629672527313,
+ "learning_rate": 0.0001496364385411174,
+ "loss": 0.9506,
+ "step": 1003
+ },
+ {
+ "epoch": 1.0042824544403113,
+ "grad_norm": 0.4680945873260498,
+ "learning_rate": 0.0001495454115728932,
+ "loss": 0.8443,
+ "step": 1004
+ },
+ {
+ "epoch": 1.0052827357694352,
+ "grad_norm": 0.48512670397758484,
+ "learning_rate": 0.0001494543301632219,
+ "loss": 1.1143,
+ "step": 1005
+ },
+ {
+ "epoch": 1.006283017098559,
+ "grad_norm": 0.43949049711227417,
+ "learning_rate": 0.00014936319441218555,
+ "loss": 1.0257,
+ "step": 1006
+ },
+ {
+ "epoch": 1.0072832984276827,
+ "grad_norm": 0.5564325451850891,
+ "learning_rate": 0.0001492720044199259,
+ "loss": 0.967,
+ "step": 1007
+ },
+ {
+ "epoch": 1.0082835797568066,
+ "grad_norm": 0.47199952602386475,
+ "learning_rate": 0.0001491807602866442,
+ "loss": 1.0317,
+ "step": 1008
+ },
+ {
+ "epoch": 1.0092838610859305,
+ "grad_norm": 0.4625256657600403,
+ "learning_rate": 0.00014908946211260123,
+ "loss": 0.894,
+ "step": 1009
+ },
+ {
+ "epoch": 1.0102841424150542,
+ "grad_norm": 0.5081682801246643,
+ "learning_rate": 0.00014899810999811726,
+ "loss": 0.9647,
+ "step": 1010
+ },
+ {
+ "epoch": 1.011284423744178,
+ "grad_norm": 0.5240431427955627,
+ "learning_rate": 0.0001489067040435717,
+ "loss": 1.1076,
+ "step": 1011
+ },
+ {
+ "epoch": 1.012284705073302,
+ "grad_norm": 0.5996805429458618,
+ "learning_rate": 0.00014881524434940313,
+ "loss": 0.9063,
+ "step": 1012
+ },
+ {
+ "epoch": 1.0132849864024256,
+ "grad_norm": 0.4602286219596863,
+ "learning_rate": 0.0001487237310161093,
+ "loss": 0.8003,
+ "step": 1013
+ },
+ {
+ "epoch": 1.0142852677315495,
+ "grad_norm": 0.5298121571540833,
+ "learning_rate": 0.0001486321641442467,
+ "loss": 0.9616,
+ "step": 1014
+ },
+ {
+ "epoch": 1.0152855490606734,
+ "grad_norm": 0.47525477409362793,
+ "learning_rate": 0.00014854054383443081,
+ "loss": 1.0457,
+ "step": 1015
+ },
+ {
+ "epoch": 1.016285830389797,
+ "grad_norm": 0.5577285885810852,
+ "learning_rate": 0.00014844887018733582,
+ "loss": 0.8973,
+ "step": 1016
+ },
+ {
+ "epoch": 1.017286111718921,
+ "grad_norm": 0.5028079748153687,
+ "learning_rate": 0.00014835714330369446,
+ "loss": 1.0721,
+ "step": 1017
+ },
+ {
+ "epoch": 1.0182863930480448,
+ "grad_norm": 0.5401796102523804,
+ "learning_rate": 0.00014826536328429795,
+ "loss": 0.9595,
+ "step": 1018
+ },
+ {
+ "epoch": 1.0192866743771685,
+ "grad_norm": 0.4957962930202484,
+ "learning_rate": 0.000148173530229996,
+ "loss": 0.9871,
+ "step": 1019
+ },
+ {
+ "epoch": 1.0202869557062924,
+ "grad_norm": 0.4891825020313263,
+ "learning_rate": 0.00014808164424169647,
+ "loss": 0.9546,
+ "step": 1020
+ },
+ {
+ "epoch": 1.0212872370354162,
+ "grad_norm": 0.48703211545944214,
+ "learning_rate": 0.0001479897054203655,
+ "loss": 0.8863,
+ "step": 1021
+ },
+ {
+ "epoch": 1.0222875183645401,
+ "grad_norm": 0.5614656805992126,
+ "learning_rate": 0.00014789771386702717,
+ "loss": 0.9857,
+ "step": 1022
+ },
+ {
+ "epoch": 1.0232877996936638,
+ "grad_norm": 0.5903550982475281,
+ "learning_rate": 0.0001478056696827636,
+ "loss": 0.8347,
+ "step": 1023
+ },
+ {
+ "epoch": 1.0242880810227877,
+ "grad_norm": 0.47974926233291626,
+ "learning_rate": 0.0001477135729687147,
+ "loss": 1.0035,
+ "step": 1024
+ },
+ {
+ "epoch": 1.0252883623519116,
+ "grad_norm": 0.5049344897270203,
+ "learning_rate": 0.0001476214238260781,
+ "loss": 0.953,
+ "step": 1025
+ },
+ {
+ "epoch": 1.0262886436810352,
+ "grad_norm": 0.3981640636920929,
+ "learning_rate": 0.000147529222356109,
+ "loss": 0.7118,
+ "step": 1026
+ },
+ {
+ "epoch": 1.0272889250101591,
+ "grad_norm": 0.598785400390625,
+ "learning_rate": 0.0001474369686601202,
+ "loss": 0.9002,
+ "step": 1027
+ },
+ {
+ "epoch": 1.028289206339283,
+ "grad_norm": 0.5422918200492859,
+ "learning_rate": 0.0001473446628394818,
+ "loss": 1.192,
+ "step": 1028
+ },
+ {
+ "epoch": 1.0292894876684067,
+ "grad_norm": 0.592509925365448,
+ "learning_rate": 0.00014725230499562119,
+ "loss": 1.0989,
+ "step": 1029
+ },
+ {
+ "epoch": 1.0302897689975306,
+ "grad_norm": 0.5232793688774109,
+ "learning_rate": 0.00014715989523002296,
+ "loss": 1.0667,
+ "step": 1030
+ },
+ {
+ "epoch": 1.0312900503266544,
+ "grad_norm": 0.5362406373023987,
+ "learning_rate": 0.00014706743364422878,
+ "loss": 0.8933,
+ "step": 1031
+ },
+ {
+ "epoch": 1.032290331655778,
+ "grad_norm": 0.43486225605010986,
+ "learning_rate": 0.00014697492033983707,
+ "loss": 0.8525,
+ "step": 1032
+ },
+ {
+ "epoch": 1.033290612984902,
+ "grad_norm": 0.5187330842018127,
+ "learning_rate": 0.00014688235541850337,
+ "loss": 1.017,
+ "step": 1033
+ },
+ {
+ "epoch": 1.0342908943140259,
+ "grad_norm": 0.5081651210784912,
+ "learning_rate": 0.0001467897389819397,
+ "loss": 1.0135,
+ "step": 1034
+ },
+ {
+ "epoch": 1.0352911756431495,
+ "grad_norm": 0.49661391973495483,
+ "learning_rate": 0.00014669707113191483,
+ "loss": 0.8711,
+ "step": 1035
+ },
+ {
+ "epoch": 1.0362914569722734,
+ "grad_norm": 0.4899054169654846,
+ "learning_rate": 0.0001466043519702539,
+ "loss": 0.9924,
+ "step": 1036
+ },
+ {
+ "epoch": 1.0372917383013973,
+ "grad_norm": 0.47787439823150635,
+ "learning_rate": 0.00014651158159883855,
+ "loss": 0.9238,
+ "step": 1037
+ },
+ {
+ "epoch": 1.038292019630521,
+ "grad_norm": 0.509600818157196,
+ "learning_rate": 0.0001464187601196066,
+ "loss": 0.8854,
+ "step": 1038
+ },
+ {
+ "epoch": 1.0392923009596449,
+ "grad_norm": 0.3907245397567749,
+ "learning_rate": 0.00014632588763455212,
+ "loss": 0.8911,
+ "step": 1039
+ },
+ {
+ "epoch": 1.0402925822887688,
+ "grad_norm": 0.4939952492713928,
+ "learning_rate": 0.00014623296424572517,
+ "loss": 0.9069,
+ "step": 1040
+ },
+ {
+ "epoch": 1.0412928636178926,
+ "grad_norm": 0.4680919945240021,
+ "learning_rate": 0.00014613999005523174,
+ "loss": 0.9361,
+ "step": 1041
+ },
+ {
+ "epoch": 1.0422931449470163,
+ "grad_norm": 0.4871543347835541,
+ "learning_rate": 0.00014604696516523361,
+ "loss": 0.9268,
+ "step": 1042
+ },
+ {
+ "epoch": 1.0432934262761402,
+ "grad_norm": 0.5115481615066528,
+ "learning_rate": 0.00014595388967794835,
+ "loss": 0.9555,
+ "step": 1043
+ },
+ {
+ "epoch": 1.044293707605264,
+ "grad_norm": 0.5923699140548706,
+ "learning_rate": 0.00014586076369564908,
+ "loss": 1.0122,
+ "step": 1044
+ },
+ {
+ "epoch": 1.0452939889343877,
+ "grad_norm": 0.491161048412323,
+ "learning_rate": 0.00014576758732066442,
+ "loss": 0.9805,
+ "step": 1045
+ },
+ {
+ "epoch": 1.0462942702635116,
+ "grad_norm": 0.462168425321579,
+ "learning_rate": 0.00014567436065537835,
+ "loss": 0.9213,
+ "step": 1046
+ },
+ {
+ "epoch": 1.0472945515926355,
+ "grad_norm": 0.5082408785820007,
+ "learning_rate": 0.00014558108380223012,
+ "loss": 0.9073,
+ "step": 1047
+ },
+ {
+ "epoch": 1.0482948329217592,
+ "grad_norm": 0.6131752133369446,
+ "learning_rate": 0.00014548775686371412,
+ "loss": 0.9156,
+ "step": 1048
+ },
+ {
+ "epoch": 1.049295114250883,
+ "grad_norm": 0.6133660674095154,
+ "learning_rate": 0.00014539437994237977,
+ "loss": 1.2011,
+ "step": 1049
+ },
+ {
+ "epoch": 1.050295395580007,
+ "grad_norm": 0.542412519454956,
+ "learning_rate": 0.00014530095314083143,
+ "loss": 1.1075,
+ "step": 1050
+ },
+ {
+ "epoch": 1.0512956769091306,
+ "grad_norm": 0.5367622971534729,
+ "learning_rate": 0.00014520747656172824,
+ "loss": 1.0783,
+ "step": 1051
+ },
+ {
+ "epoch": 1.0522959582382545,
+ "grad_norm": 0.5243119597434998,
+ "learning_rate": 0.00014511395030778406,
+ "loss": 1.0865,
+ "step": 1052
+ },
+ {
+ "epoch": 1.0532962395673784,
+ "grad_norm": 0.5611020922660828,
+ "learning_rate": 0.00014502037448176734,
+ "loss": 0.9613,
+ "step": 1053
+ },
+ {
+ "epoch": 1.054296520896502,
+ "grad_norm": 0.506432294845581,
+ "learning_rate": 0.000144926749186501,
+ "loss": 1.1364,
+ "step": 1054
+ },
+ {
+ "epoch": 1.055296802225626,
+ "grad_norm": 0.5270103812217712,
+ "learning_rate": 0.00014483307452486227,
+ "loss": 1.042,
+ "step": 1055
+ },
+ {
+ "epoch": 1.0562970835547498,
+ "grad_norm": 0.5376967191696167,
+ "learning_rate": 0.0001447393505997827,
+ "loss": 0.9563,
+ "step": 1056
+ },
+ {
+ "epoch": 1.0572973648838735,
+ "grad_norm": 0.4821127653121948,
+ "learning_rate": 0.00014464557751424793,
+ "loss": 0.9241,
+ "step": 1057
+ },
+ {
+ "epoch": 1.0582976462129974,
+ "grad_norm": 0.6197866201400757,
+ "learning_rate": 0.00014455175537129758,
+ "loss": 1.0489,
+ "step": 1058
+ },
+ {
+ "epoch": 1.0592979275421213,
+ "grad_norm": 0.42820343375205994,
+ "learning_rate": 0.00014445788427402528,
+ "loss": 0.7755,
+ "step": 1059
+ },
+ {
+ "epoch": 1.0602982088712452,
+ "grad_norm": 0.49635690450668335,
+ "learning_rate": 0.00014436396432557835,
+ "loss": 0.8485,
+ "step": 1060
+ },
+ {
+ "epoch": 1.0612984902003688,
+ "grad_norm": 0.5529823899269104,
+ "learning_rate": 0.00014426999562915782,
+ "loss": 0.9589,
+ "step": 1061
+ },
+ {
+ "epoch": 1.0622987715294927,
+ "grad_norm": 0.5504932403564453,
+ "learning_rate": 0.00014417597828801832,
+ "loss": 0.9048,
+ "step": 1062
+ },
+ {
+ "epoch": 1.0632990528586166,
+ "grad_norm": 0.5755835175514221,
+ "learning_rate": 0.0001440819124054679,
+ "loss": 0.9542,
+ "step": 1063
+ },
+ {
+ "epoch": 1.0642993341877403,
+ "grad_norm": 0.4767759144306183,
+ "learning_rate": 0.00014398779808486793,
+ "loss": 0.9174,
+ "step": 1064
+ },
+ {
+ "epoch": 1.0652996155168641,
+ "grad_norm": 0.5343469381332397,
+ "learning_rate": 0.00014389363542963306,
+ "loss": 0.8493,
+ "step": 1065
+ },
+ {
+ "epoch": 1.066299896845988,
+ "grad_norm": 0.48161643743515015,
+ "learning_rate": 0.000143799424543231,
+ "loss": 0.8218,
+ "step": 1066
+ },
+ {
+ "epoch": 1.0673001781751117,
+ "grad_norm": 0.4958563446998596,
+ "learning_rate": 0.0001437051655291825,
+ "loss": 0.9849,
+ "step": 1067
+ },
+ {
+ "epoch": 1.0683004595042356,
+ "grad_norm": 0.5286628007888794,
+ "learning_rate": 0.0001436108584910611,
+ "loss": 0.8935,
+ "step": 1068
+ },
+ {
+ "epoch": 1.0693007408333595,
+ "grad_norm": 0.6096596121788025,
+ "learning_rate": 0.0001435165035324933,
+ "loss": 1.0577,
+ "step": 1069
+ },
+ {
+ "epoch": 1.0703010221624831,
+ "grad_norm": 0.4895448088645935,
+ "learning_rate": 0.000143422100757158,
+ "loss": 0.865,
+ "step": 1070
+ },
+ {
+ "epoch": 1.071301303491607,
+ "grad_norm": 0.5186201930046082,
+ "learning_rate": 0.00014332765026878687,
+ "loss": 0.8414,
+ "step": 1071
+ },
+ {
+ "epoch": 1.072301584820731,
+ "grad_norm": 0.5639254450798035,
+ "learning_rate": 0.0001432331521711639,
+ "loss": 0.9401,
+ "step": 1072
+ },
+ {
+ "epoch": 1.0733018661498546,
+ "grad_norm": 0.48865774273872375,
+ "learning_rate": 0.00014313860656812536,
+ "loss": 0.7894,
+ "step": 1073
+ },
+ {
+ "epoch": 1.0743021474789785,
+ "grad_norm": 0.4796544313430786,
+ "learning_rate": 0.00014304401356355983,
+ "loss": 0.8153,
+ "step": 1074
+ },
+ {
+ "epoch": 1.0753024288081023,
+ "grad_norm": 0.5578910708427429,
+ "learning_rate": 0.00014294937326140788,
+ "loss": 1.1675,
+ "step": 1075
+ },
+ {
+ "epoch": 1.076302710137226,
+ "grad_norm": 0.5607575178146362,
+ "learning_rate": 0.00014285468576566207,
+ "loss": 0.9133,
+ "step": 1076
+ },
+ {
+ "epoch": 1.07730299146635,
+ "grad_norm": 0.48808708786964417,
+ "learning_rate": 0.00014275995118036693,
+ "loss": 0.8884,
+ "step": 1077
+ },
+ {
+ "epoch": 1.0783032727954738,
+ "grad_norm": 0.4981604814529419,
+ "learning_rate": 0.00014266516960961852,
+ "loss": 0.9235,
+ "step": 1078
+ },
+ {
+ "epoch": 1.0793035541245974,
+ "grad_norm": 0.6323955655097961,
+ "learning_rate": 0.00014257034115756472,
+ "loss": 1.1617,
+ "step": 1079
+ },
+ {
+ "epoch": 1.0803038354537213,
+ "grad_norm": 0.5465244650840759,
+ "learning_rate": 0.0001424754659284048,
+ "loss": 1.0126,
+ "step": 1080
+ },
+ {
+ "epoch": 1.0813041167828452,
+ "grad_norm": 0.504200279712677,
+ "learning_rate": 0.0001423805440263895,
+ "loss": 1.0069,
+ "step": 1081
+ },
+ {
+ "epoch": 1.0823043981119689,
+ "grad_norm": 0.8698700070381165,
+ "learning_rate": 0.0001422855755558208,
+ "loss": 0.9653,
+ "step": 1082
+ },
+ {
+ "epoch": 1.0833046794410928,
+ "grad_norm": 0.41991496086120605,
+ "learning_rate": 0.00014219056062105193,
+ "loss": 1.089,
+ "step": 1083
+ },
+ {
+ "epoch": 1.0843049607702167,
+ "grad_norm": 0.5334717035293579,
+ "learning_rate": 0.0001420954993264871,
+ "loss": 1.0137,
+ "step": 1084
+ },
+ {
+ "epoch": 1.0853052420993405,
+ "grad_norm": 0.5418859124183655,
+ "learning_rate": 0.00014200039177658145,
+ "loss": 0.9302,
+ "step": 1085
+ },
+ {
+ "epoch": 1.0863055234284642,
+ "grad_norm": 0.515819251537323,
+ "learning_rate": 0.000141905238075841,
+ "loss": 1.0703,
+ "step": 1086
+ },
+ {
+ "epoch": 1.087305804757588,
+ "grad_norm": 0.43046239018440247,
+ "learning_rate": 0.00014181003832882248,
+ "loss": 1.0722,
+ "step": 1087
+ },
+ {
+ "epoch": 1.088306086086712,
+ "grad_norm": 0.6555958390235901,
+ "learning_rate": 0.00014171479264013311,
+ "loss": 0.806,
+ "step": 1088
+ },
+ {
+ "epoch": 1.0893063674158356,
+ "grad_norm": 0.5608332753181458,
+ "learning_rate": 0.00014161950111443077,
+ "loss": 0.9925,
+ "step": 1089
+ },
+ {
+ "epoch": 1.0903066487449595,
+ "grad_norm": 0.5866970419883728,
+ "learning_rate": 0.00014152416385642357,
+ "loss": 0.9278,
+ "step": 1090
+ },
+ {
+ "epoch": 1.0913069300740834,
+ "grad_norm": 0.4913788437843323,
+ "learning_rate": 0.00014142878097086995,
+ "loss": 0.7394,
+ "step": 1091
+ },
+ {
+ "epoch": 1.092307211403207,
+ "grad_norm": 0.4942512512207031,
+ "learning_rate": 0.0001413333525625784,
+ "loss": 0.8891,
+ "step": 1092
+ },
+ {
+ "epoch": 1.093307492732331,
+ "grad_norm": 0.5537131428718567,
+ "learning_rate": 0.00014123787873640754,
+ "loss": 0.9632,
+ "step": 1093
+ },
+ {
+ "epoch": 1.0943077740614549,
+ "grad_norm": 0.49271076917648315,
+ "learning_rate": 0.00014114235959726575,
+ "loss": 0.8708,
+ "step": 1094
+ },
+ {
+ "epoch": 1.0953080553905785,
+ "grad_norm": 0.448188841342926,
+ "learning_rate": 0.0001410467952501114,
+ "loss": 0.9727,
+ "step": 1095
+ },
+ {
+ "epoch": 1.0963083367197024,
+ "grad_norm": 0.4975283741950989,
+ "learning_rate": 0.00014095118579995235,
+ "loss": 0.9971,
+ "step": 1096
+ },
+ {
+ "epoch": 1.0973086180488263,
+ "grad_norm": 0.46382221579551697,
+ "learning_rate": 0.0001408555313518461,
+ "loss": 0.8853,
+ "step": 1097
+ },
+ {
+ "epoch": 1.09830889937795,
+ "grad_norm": 0.5071414113044739,
+ "learning_rate": 0.00014075983201089964,
+ "loss": 0.7723,
+ "step": 1098
+ },
+ {
+ "epoch": 1.0993091807070738,
+ "grad_norm": 0.41700050234794617,
+ "learning_rate": 0.0001406640878822692,
+ "loss": 0.7892,
+ "step": 1099
+ },
+ {
+ "epoch": 1.1003094620361977,
+ "grad_norm": 0.497175395488739,
+ "learning_rate": 0.00014056829907116024,
+ "loss": 0.9791,
+ "step": 1100
+ },
+ {
+ "epoch": 1.1013097433653214,
+ "grad_norm": 0.4512806236743927,
+ "learning_rate": 0.00014047246568282736,
+ "loss": 0.9878,
+ "step": 1101
+ },
+ {
+ "epoch": 1.1023100246944453,
+ "grad_norm": 0.5804361701011658,
+ "learning_rate": 0.00014037658782257414,
+ "loss": 1.1583,
+ "step": 1102
+ },
+ {
+ "epoch": 1.1033103060235692,
+ "grad_norm": 0.5334234237670898,
+ "learning_rate": 0.00014028066559575302,
+ "loss": 1.0705,
+ "step": 1103
+ },
+ {
+ "epoch": 1.104310587352693,
+ "grad_norm": 0.4683452844619751,
+ "learning_rate": 0.00014018469910776513,
+ "loss": 0.8608,
+ "step": 1104
+ },
+ {
+ "epoch": 1.1053108686818167,
+ "grad_norm": 0.5595771074295044,
+ "learning_rate": 0.0001400886884640603,
+ "loss": 1.0804,
+ "step": 1105
+ },
+ {
+ "epoch": 1.1063111500109406,
+ "grad_norm": 0.45048126578330994,
+ "learning_rate": 0.00013999263377013693,
+ "loss": 0.7782,
+ "step": 1106
+ },
+ {
+ "epoch": 1.1073114313400645,
+ "grad_norm": 0.4472745954990387,
+ "learning_rate": 0.00013989653513154165,
+ "loss": 0.8599,
+ "step": 1107
+ },
+ {
+ "epoch": 1.1083117126691882,
+ "grad_norm": 0.5168829560279846,
+ "learning_rate": 0.00013980039265386955,
+ "loss": 0.9984,
+ "step": 1108
+ },
+ {
+ "epoch": 1.109311993998312,
+ "grad_norm": 0.5712297558784485,
+ "learning_rate": 0.00013970420644276383,
+ "loss": 0.957,
+ "step": 1109
+ },
+ {
+ "epoch": 1.110312275327436,
+ "grad_norm": 0.5360589027404785,
+ "learning_rate": 0.0001396079766039157,
+ "loss": 1.0957,
+ "step": 1110
+ },
+ {
+ "epoch": 1.1113125566565596,
+ "grad_norm": 0.49815621972084045,
+ "learning_rate": 0.00013951170324306435,
+ "loss": 1.1143,
+ "step": 1111
+ },
+ {
+ "epoch": 1.1123128379856835,
+ "grad_norm": 0.45044735074043274,
+ "learning_rate": 0.00013941538646599687,
+ "loss": 0.8463,
+ "step": 1112
+ },
+ {
+ "epoch": 1.1133131193148074,
+ "grad_norm": 0.5086628198623657,
+ "learning_rate": 0.0001393190263785479,
+ "loss": 0.9061,
+ "step": 1113
+ },
+ {
+ "epoch": 1.114313400643931,
+ "grad_norm": 0.4669632315635681,
+ "learning_rate": 0.0001392226230865998,
+ "loss": 0.7891,
+ "step": 1114
+ },
+ {
+ "epoch": 1.115313681973055,
+ "grad_norm": 0.43681180477142334,
+ "learning_rate": 0.0001391261766960823,
+ "loss": 0.7687,
+ "step": 1115
+ },
+ {
+ "epoch": 1.1163139633021788,
+ "grad_norm": 0.47354501485824585,
+ "learning_rate": 0.00013902968731297255,
+ "loss": 1.0181,
+ "step": 1116
+ },
+ {
+ "epoch": 1.1173142446313025,
+ "grad_norm": 0.5224591493606567,
+ "learning_rate": 0.00013893315504329498,
+ "loss": 0.9072,
+ "step": 1117
+ },
+ {
+ "epoch": 1.1183145259604264,
+ "grad_norm": 0.5648715496063232,
+ "learning_rate": 0.00013883657999312109,
+ "loss": 1.0256,
+ "step": 1118
+ },
+ {
+ "epoch": 1.1193148072895502,
+ "grad_norm": 0.4603082239627838,
+ "learning_rate": 0.00013873996226856933,
+ "loss": 0.9129,
+ "step": 1119
+ },
+ {
+ "epoch": 1.120315088618674,
+ "grad_norm": 0.48259446024894714,
+ "learning_rate": 0.00013864330197580513,
+ "loss": 0.8335,
+ "step": 1120
+ },
+ {
+ "epoch": 1.1213153699477978,
+ "grad_norm": 0.5239295363426208,
+ "learning_rate": 0.0001385465992210407,
+ "loss": 1.1409,
+ "step": 1121
+ },
+ {
+ "epoch": 1.1223156512769217,
+ "grad_norm": 0.5242553949356079,
+ "learning_rate": 0.00013844985411053492,
+ "loss": 0.9542,
+ "step": 1122
+ },
+ {
+ "epoch": 1.1233159326060456,
+ "grad_norm": 0.5396201014518738,
+ "learning_rate": 0.00013835306675059308,
+ "loss": 1.0786,
+ "step": 1123
+ },
+ {
+ "epoch": 1.1243162139351692,
+ "grad_norm": 1.818426251411438,
+ "learning_rate": 0.00013825623724756704,
+ "loss": 0.9336,
+ "step": 1124
+ },
+ {
+ "epoch": 1.1253164952642931,
+ "grad_norm": 0.5364382863044739,
+ "learning_rate": 0.00013815936570785487,
+ "loss": 0.8096,
+ "step": 1125
+ },
+ {
+ "epoch": 1.1263167765934168,
+ "grad_norm": 0.47344619035720825,
+ "learning_rate": 0.00013806245223790088,
+ "loss": 0.8777,
+ "step": 1126
+ },
+ {
+ "epoch": 1.1273170579225407,
+ "grad_norm": 0.48119789361953735,
+ "learning_rate": 0.0001379654969441955,
+ "loss": 0.9965,
+ "step": 1127
+ },
+ {
+ "epoch": 1.1283173392516646,
+ "grad_norm": 0.5970126390457153,
+ "learning_rate": 0.000137868499933275,
+ "loss": 1.1389,
+ "step": 1128
+ },
+ {
+ "epoch": 1.1293176205807884,
+ "grad_norm": 0.5217893719673157,
+ "learning_rate": 0.00013777146131172162,
+ "loss": 1.1361,
+ "step": 1129
+ },
+ {
+ "epoch": 1.130317901909912,
+ "grad_norm": 0.4322263300418854,
+ "learning_rate": 0.00013767438118616318,
+ "loss": 0.8632,
+ "step": 1130
+ },
+ {
+ "epoch": 1.131318183239036,
+ "grad_norm": 0.49836596846580505,
+ "learning_rate": 0.00013757725966327322,
+ "loss": 0.9594,
+ "step": 1131
+ },
+ {
+ "epoch": 1.1323184645681599,
+ "grad_norm": 0.5220472812652588,
+ "learning_rate": 0.00013748009684977073,
+ "loss": 1.0783,
+ "step": 1132
+ },
+ {
+ "epoch": 1.1333187458972835,
+ "grad_norm": 0.5030301809310913,
+ "learning_rate": 0.0001373828928524201,
+ "loss": 0.9482,
+ "step": 1133
+ },
+ {
+ "epoch": 1.1343190272264074,
+ "grad_norm": 0.5477299094200134,
+ "learning_rate": 0.00013728564777803088,
+ "loss": 1.1119,
+ "step": 1134
+ },
+ {
+ "epoch": 1.1353193085555313,
+ "grad_norm": 0.5505563020706177,
+ "learning_rate": 0.00013718836173345783,
+ "loss": 1.0315,
+ "step": 1135
+ },
+ {
+ "epoch": 1.136319589884655,
+ "grad_norm": 0.5921071171760559,
+ "learning_rate": 0.00013709103482560078,
+ "loss": 0.98,
+ "step": 1136
+ },
+ {
+ "epoch": 1.1373198712137789,
+ "grad_norm": 0.4483082890510559,
+ "learning_rate": 0.00013699366716140435,
+ "loss": 0.9203,
+ "step": 1137
+ },
+ {
+ "epoch": 1.1383201525429028,
+ "grad_norm": 0.4304388165473938,
+ "learning_rate": 0.00013689625884785798,
+ "loss": 0.824,
+ "step": 1138
+ },
+ {
+ "epoch": 1.1393204338720264,
+ "grad_norm": 0.5273844003677368,
+ "learning_rate": 0.00013679880999199583,
+ "loss": 1.0061,
+ "step": 1139
+ },
+ {
+ "epoch": 1.1403207152011503,
+ "grad_norm": 0.5016499161720276,
+ "learning_rate": 0.00013670132070089653,
+ "loss": 0.8692,
+ "step": 1140
+ },
+ {
+ "epoch": 1.1413209965302742,
+ "grad_norm": 0.5045731067657471,
+ "learning_rate": 0.00013660379108168324,
+ "loss": 0.958,
+ "step": 1141
+ },
+ {
+ "epoch": 1.142321277859398,
+ "grad_norm": 0.484275221824646,
+ "learning_rate": 0.00013650622124152334,
+ "loss": 0.8589,
+ "step": 1142
+ },
+ {
+ "epoch": 1.1433215591885217,
+ "grad_norm": 0.6210789680480957,
+ "learning_rate": 0.0001364086112876284,
+ "loss": 0.931,
+ "step": 1143
+ },
+ {
+ "epoch": 1.1443218405176456,
+ "grad_norm": 0.59291011095047,
+ "learning_rate": 0.00013631096132725413,
+ "loss": 0.9706,
+ "step": 1144
+ },
+ {
+ "epoch": 1.1453221218467693,
+ "grad_norm": 0.48909759521484375,
+ "learning_rate": 0.00013621327146770025,
+ "loss": 0.9696,
+ "step": 1145
+ },
+ {
+ "epoch": 1.1463224031758932,
+ "grad_norm": 0.5022495985031128,
+ "learning_rate": 0.00013611554181631013,
+ "loss": 0.9349,
+ "step": 1146
+ },
+ {
+ "epoch": 1.147322684505017,
+ "grad_norm": 0.6155623197555542,
+ "learning_rate": 0.00013601777248047105,
+ "loss": 0.9161,
+ "step": 1147
+ },
+ {
+ "epoch": 1.148322965834141,
+ "grad_norm": 0.49372079968452454,
+ "learning_rate": 0.0001359199635676138,
+ "loss": 0.8598,
+ "step": 1148
+ },
+ {
+ "epoch": 1.1493232471632646,
+ "grad_norm": 0.504294753074646,
+ "learning_rate": 0.00013582211518521273,
+ "loss": 0.9334,
+ "step": 1149
+ },
+ {
+ "epoch": 1.1503235284923885,
+ "grad_norm": 0.44594088196754456,
+ "learning_rate": 0.00013572422744078551,
+ "loss": 1.0443,
+ "step": 1150
+ },
+ {
+ "epoch": 1.1513238098215124,
+ "grad_norm": 0.4689579904079437,
+ "learning_rate": 0.00013562630044189304,
+ "loss": 0.9192,
+ "step": 1151
+ },
+ {
+ "epoch": 1.152324091150636,
+ "grad_norm": 0.49370667338371277,
+ "learning_rate": 0.00013552833429613938,
+ "loss": 0.8638,
+ "step": 1152
+ },
+ {
+ "epoch": 1.15332437247976,
+ "grad_norm": 0.4459637403488159,
+ "learning_rate": 0.0001354303291111716,
+ "loss": 0.8719,
+ "step": 1153
+ },
+ {
+ "epoch": 1.1543246538088838,
+ "grad_norm": 0.41995370388031006,
+ "learning_rate": 0.0001353322849946797,
+ "loss": 0.9429,
+ "step": 1154
+ },
+ {
+ "epoch": 1.1553249351380075,
+ "grad_norm": 0.5358927249908447,
+ "learning_rate": 0.00013523420205439646,
+ "loss": 1.0724,
+ "step": 1155
+ },
+ {
+ "epoch": 1.1563252164671314,
+ "grad_norm": 0.48797738552093506,
+ "learning_rate": 0.0001351360803980972,
+ "loss": 1.0191,
+ "step": 1156
+ },
+ {
+ "epoch": 1.1573254977962553,
+ "grad_norm": 0.46079760789871216,
+ "learning_rate": 0.00013503792013359997,
+ "loss": 0.8731,
+ "step": 1157
+ },
+ {
+ "epoch": 1.158325779125379,
+ "grad_norm": 0.5278632044792175,
+ "learning_rate": 0.00013493972136876509,
+ "loss": 1.0689,
+ "step": 1158
+ },
+ {
+ "epoch": 1.1593260604545028,
+ "grad_norm": 0.6085927486419678,
+ "learning_rate": 0.00013484148421149527,
+ "loss": 1.0228,
+ "step": 1159
+ },
+ {
+ "epoch": 1.1603263417836267,
+ "grad_norm": 0.49424564838409424,
+ "learning_rate": 0.0001347432087697354,
+ "loss": 0.9622,
+ "step": 1160
+ },
+ {
+ "epoch": 1.1613266231127504,
+ "grad_norm": 0.4577535092830658,
+ "learning_rate": 0.00013464489515147238,
+ "loss": 0.795,
+ "step": 1161
+ },
+ {
+ "epoch": 1.1623269044418743,
+ "grad_norm": 0.5331981778144836,
+ "learning_rate": 0.0001345465434647351,
+ "loss": 1.2866,
+ "step": 1162
+ },
+ {
+ "epoch": 1.1633271857709981,
+ "grad_norm": 0.4657655954360962,
+ "learning_rate": 0.00013444815381759425,
+ "loss": 0.8171,
+ "step": 1163
+ },
+ {
+ "epoch": 1.1643274671001218,
+ "grad_norm": 0.44027647376060486,
+ "learning_rate": 0.00013434972631816235,
+ "loss": 0.9448,
+ "step": 1164
+ },
+ {
+ "epoch": 1.1653277484292457,
+ "grad_norm": 1.996617317199707,
+ "learning_rate": 0.0001342512610745933,
+ "loss": 0.8706,
+ "step": 1165
+ },
+ {
+ "epoch": 1.1663280297583696,
+ "grad_norm": 0.4826609790325165,
+ "learning_rate": 0.0001341527581950827,
+ "loss": 1.1075,
+ "step": 1166
+ },
+ {
+ "epoch": 1.1673283110874935,
+ "grad_norm": 0.4908469617366791,
+ "learning_rate": 0.00013405421778786737,
+ "loss": 0.835,
+ "step": 1167
+ },
+ {
+ "epoch": 1.1683285924166171,
+ "grad_norm": 0.5113404989242554,
+ "learning_rate": 0.00013395563996122537,
+ "loss": 0.8437,
+ "step": 1168
+ },
+ {
+ "epoch": 1.169328873745741,
+ "grad_norm": 0.5029433369636536,
+ "learning_rate": 0.00013385702482347593,
+ "loss": 1.1188,
+ "step": 1169
+ },
+ {
+ "epoch": 1.170329155074865,
+ "grad_norm": 0.4739987552165985,
+ "learning_rate": 0.00013375837248297926,
+ "loss": 0.9829,
+ "step": 1170
+ },
+ {
+ "epoch": 1.1713294364039886,
+ "grad_norm": 0.5853392481803894,
+ "learning_rate": 0.0001336596830481364,
+ "loss": 1.0384,
+ "step": 1171
+ },
+ {
+ "epoch": 1.1723297177331125,
+ "grad_norm": 0.5038638710975647,
+ "learning_rate": 0.0001335609566273892,
+ "loss": 0.9389,
+ "step": 1172
+ },
+ {
+ "epoch": 1.1733299990622363,
+ "grad_norm": 0.4367244243621826,
+ "learning_rate": 0.00013346219332922016,
+ "loss": 0.8182,
+ "step": 1173
+ },
+ {
+ "epoch": 1.17433028039136,
+ "grad_norm": 0.4453211724758148,
+ "learning_rate": 0.00013336339326215228,
+ "loss": 0.9289,
+ "step": 1174
+ },
+ {
+ "epoch": 1.175330561720484,
+ "grad_norm": 0.49941959977149963,
+ "learning_rate": 0.00013326455653474897,
+ "loss": 1.1277,
+ "step": 1175
+ },
+ {
+ "epoch": 1.1763308430496078,
+ "grad_norm": 0.553996205329895,
+ "learning_rate": 0.00013316568325561393,
+ "loss": 0.8582,
+ "step": 1176
+ },
+ {
+ "epoch": 1.1773311243787314,
+ "grad_norm": 0.5424408316612244,
+ "learning_rate": 0.00013306677353339098,
+ "loss": 1.0046,
+ "step": 1177
+ },
+ {
+ "epoch": 1.1783314057078553,
+ "grad_norm": 0.4373432695865631,
+ "learning_rate": 0.000132967827476764,
+ "loss": 0.9554,
+ "step": 1178
+ },
+ {
+ "epoch": 1.1793316870369792,
+ "grad_norm": 0.4744022786617279,
+ "learning_rate": 0.0001328688451944569,
+ "loss": 0.7784,
+ "step": 1179
+ },
+ {
+ "epoch": 1.1803319683661029,
+ "grad_norm": 0.5251059532165527,
+ "learning_rate": 0.00013276982679523322,
+ "loss": 0.8857,
+ "step": 1180
+ },
+ {
+ "epoch": 1.1813322496952268,
+ "grad_norm": 0.5108295679092407,
+ "learning_rate": 0.00013267077238789633,
+ "loss": 1.0711,
+ "step": 1181
+ },
+ {
+ "epoch": 1.1823325310243507,
+ "grad_norm": 0.49973955750465393,
+ "learning_rate": 0.00013257168208128908,
+ "loss": 1.0047,
+ "step": 1182
+ },
+ {
+ "epoch": 1.1833328123534743,
+ "grad_norm": 0.5143113732337952,
+ "learning_rate": 0.00013247255598429378,
+ "loss": 0.9294,
+ "step": 1183
+ },
+ {
+ "epoch": 1.1843330936825982,
+ "grad_norm": 0.5185163617134094,
+ "learning_rate": 0.00013237339420583212,
+ "loss": 0.9491,
+ "step": 1184
+ },
+ {
+ "epoch": 1.185333375011722,
+ "grad_norm": 0.49349021911621094,
+ "learning_rate": 0.00013227419685486492,
+ "loss": 0.812,
+ "step": 1185
+ },
+ {
+ "epoch": 1.186333656340846,
+ "grad_norm": 0.5210988521575928,
+ "learning_rate": 0.00013217496404039218,
+ "loss": 1.1228,
+ "step": 1186
+ },
+ {
+ "epoch": 1.1873339376699696,
+ "grad_norm": 0.46139585971832275,
+ "learning_rate": 0.0001320756958714528,
+ "loss": 0.7623,
+ "step": 1187
+ },
+ {
+ "epoch": 1.1883342189990935,
+ "grad_norm": 0.5365749597549438,
+ "learning_rate": 0.00013197639245712454,
+ "loss": 1.0785,
+ "step": 1188
+ },
+ {
+ "epoch": 1.1893345003282172,
+ "grad_norm": 0.4624418616294861,
+ "learning_rate": 0.00013187705390652388,
+ "loss": 1.0245,
+ "step": 1189
+ },
+ {
+ "epoch": 1.190334781657341,
+ "grad_norm": 0.4919735789299011,
+ "learning_rate": 0.00013177768032880593,
+ "loss": 0.9078,
+ "step": 1190
+ },
+ {
+ "epoch": 1.191335062986465,
+ "grad_norm": 0.5049088597297668,
+ "learning_rate": 0.0001316782718331643,
+ "loss": 0.8884,
+ "step": 1191
+ },
+ {
+ "epoch": 1.1923353443155889,
+ "grad_norm": 0.47496137022972107,
+ "learning_rate": 0.0001315788285288309,
+ "loss": 0.9414,
+ "step": 1192
+ },
+ {
+ "epoch": 1.1933356256447125,
+ "grad_norm": 0.4913059175014496,
+ "learning_rate": 0.00013147935052507597,
+ "loss": 0.8762,
+ "step": 1193
+ },
+ {
+ "epoch": 1.1943359069738364,
+ "grad_norm": 0.5643580555915833,
+ "learning_rate": 0.00013137983793120786,
+ "loss": 0.9556,
+ "step": 1194
+ },
+ {
+ "epoch": 1.1953361883029603,
+ "grad_norm": 0.5032216310501099,
+ "learning_rate": 0.0001312802908565729,
+ "loss": 1.1547,
+ "step": 1195
+ },
+ {
+ "epoch": 1.196336469632084,
+ "grad_norm": 0.5721387267112732,
+ "learning_rate": 0.0001311807094105553,
+ "loss": 0.97,
+ "step": 1196
+ },
+ {
+ "epoch": 1.1973367509612078,
+ "grad_norm": 0.47524675726890564,
+ "learning_rate": 0.00013108109370257712,
+ "loss": 0.9953,
+ "step": 1197
+ },
+ {
+ "epoch": 1.1983370322903317,
+ "grad_norm": 0.5769131183624268,
+ "learning_rate": 0.00013098144384209796,
+ "loss": 1.0578,
+ "step": 1198
+ },
+ {
+ "epoch": 1.1993373136194554,
+ "grad_norm": 0.4861721694469452,
+ "learning_rate": 0.000130881759938615,
+ "loss": 0.7542,
+ "step": 1199
+ },
+ {
+ "epoch": 1.2003375949485793,
+ "grad_norm": 0.4798511266708374,
+ "learning_rate": 0.00013078204210166278,
+ "loss": 0.9024,
+ "step": 1200
+ },
+ {
+ "epoch": 1.2013378762777032,
+ "grad_norm": 0.4447210729122162,
+ "learning_rate": 0.00013068229044081324,
+ "loss": 0.9703,
+ "step": 1201
+ },
+ {
+ "epoch": 1.2023381576068268,
+ "grad_norm": 0.5221365690231323,
+ "learning_rate": 0.0001305825050656754,
+ "loss": 1.0575,
+ "step": 1202
+ },
+ {
+ "epoch": 1.2033384389359507,
+ "grad_norm": 0.44786536693573,
+ "learning_rate": 0.00013048268608589533,
+ "loss": 0.9047,
+ "step": 1203
+ },
+ {
+ "epoch": 1.2043387202650746,
+ "grad_norm": 0.44534093141555786,
+ "learning_rate": 0.00013038283361115603,
+ "loss": 0.9156,
+ "step": 1204
+ },
+ {
+ "epoch": 1.2053390015941985,
+ "grad_norm": 0.5345563292503357,
+ "learning_rate": 0.0001302829477511773,
+ "loss": 0.9933,
+ "step": 1205
+ },
+ {
+ "epoch": 1.2063392829233222,
+ "grad_norm": 0.49175193905830383,
+ "learning_rate": 0.0001301830286157157,
+ "loss": 0.84,
+ "step": 1206
+ },
+ {
+ "epoch": 1.207339564252446,
+ "grad_norm": 0.5271350145339966,
+ "learning_rate": 0.0001300830763145642,
+ "loss": 0.8739,
+ "step": 1207
+ },
+ {
+ "epoch": 1.2083398455815697,
+ "grad_norm": 0.4891369342803955,
+ "learning_rate": 0.00012998309095755235,
+ "loss": 0.9923,
+ "step": 1208
+ },
+ {
+ "epoch": 1.2093401269106936,
+ "grad_norm": 0.44362354278564453,
+ "learning_rate": 0.00012988307265454597,
+ "loss": 0.911,
+ "step": 1209
+ },
+ {
+ "epoch": 1.2103404082398175,
+ "grad_norm": 0.46026211977005005,
+ "learning_rate": 0.0001297830215154471,
+ "loss": 0.8749,
+ "step": 1210
+ },
+ {
+ "epoch": 1.2113406895689414,
+ "grad_norm": 0.49236229062080383,
+ "learning_rate": 0.00012968293765019384,
+ "loss": 0.8959,
+ "step": 1211
+ },
+ {
+ "epoch": 1.212340970898065,
+ "grad_norm": 0.5326531529426575,
+ "learning_rate": 0.00012958282116876026,
+ "loss": 1.0464,
+ "step": 1212
+ },
+ {
+ "epoch": 1.213341252227189,
+ "grad_norm": 0.4658203721046448,
+ "learning_rate": 0.00012948267218115624,
+ "loss": 0.8895,
+ "step": 1213
+ },
+ {
+ "epoch": 1.2143415335563128,
+ "grad_norm": 0.5042040348052979,
+ "learning_rate": 0.00012938249079742743,
+ "loss": 0.889,
+ "step": 1214
+ },
+ {
+ "epoch": 1.2153418148854365,
+ "grad_norm": 0.5408799648284912,
+ "learning_rate": 0.00012928227712765504,
+ "loss": 0.9974,
+ "step": 1215
+ },
+ {
+ "epoch": 1.2163420962145604,
+ "grad_norm": 0.7056695818901062,
+ "learning_rate": 0.0001291820312819558,
+ "loss": 0.8644,
+ "step": 1216
+ },
+ {
+ "epoch": 1.2173423775436842,
+ "grad_norm": 0.5424172878265381,
+ "learning_rate": 0.00012908175337048174,
+ "loss": 1.0855,
+ "step": 1217
+ },
+ {
+ "epoch": 1.218342658872808,
+ "grad_norm": 0.4773527681827545,
+ "learning_rate": 0.00012898144350342015,
+ "loss": 1.014,
+ "step": 1218
+ },
+ {
+ "epoch": 1.2193429402019318,
+ "grad_norm": 0.5538880228996277,
+ "learning_rate": 0.0001288811017909934,
+ "loss": 1.0491,
+ "step": 1219
+ },
+ {
+ "epoch": 1.2203432215310557,
+ "grad_norm": 0.4497896730899811,
+ "learning_rate": 0.00012878072834345895,
+ "loss": 0.8591,
+ "step": 1220
+ },
+ {
+ "epoch": 1.2213435028601793,
+ "grad_norm": 0.5487242341041565,
+ "learning_rate": 0.00012868032327110904,
+ "loss": 0.9809,
+ "step": 1221
+ },
+ {
+ "epoch": 1.2223437841893032,
+ "grad_norm": 0.5900948643684387,
+ "learning_rate": 0.00012857988668427066,
+ "loss": 1.1435,
+ "step": 1222
+ },
+ {
+ "epoch": 1.2233440655184271,
+ "grad_norm": 0.5471523404121399,
+ "learning_rate": 0.0001284794186933055,
+ "loss": 1.0088,
+ "step": 1223
+ },
+ {
+ "epoch": 1.2243443468475508,
+ "grad_norm": 0.4625445604324341,
+ "learning_rate": 0.00012837891940860972,
+ "loss": 1.0452,
+ "step": 1224
+ },
+ {
+ "epoch": 1.2253446281766747,
+ "grad_norm": 0.4972693920135498,
+ "learning_rate": 0.00012827838894061377,
+ "loss": 1.0403,
+ "step": 1225
+ },
+ {
+ "epoch": 1.2263449095057986,
+ "grad_norm": 0.4823111295700073,
+ "learning_rate": 0.00012817782739978255,
+ "loss": 0.9439,
+ "step": 1226
+ },
+ {
+ "epoch": 1.2273451908349222,
+ "grad_norm": 0.5163894295692444,
+ "learning_rate": 0.00012807723489661495,
+ "loss": 1.031,
+ "step": 1227
+ },
+ {
+ "epoch": 1.228345472164046,
+ "grad_norm": 0.5085253119468689,
+ "learning_rate": 0.00012797661154164395,
+ "loss": 0.998,
+ "step": 1228
+ },
+ {
+ "epoch": 1.22934575349317,
+ "grad_norm": 0.4469011425971985,
+ "learning_rate": 0.00012787595744543647,
+ "loss": 0.8943,
+ "step": 1229
+ },
+ {
+ "epoch": 1.2303460348222939,
+ "grad_norm": 0.5117391347885132,
+ "learning_rate": 0.00012777527271859307,
+ "loss": 0.9817,
+ "step": 1230
+ },
+ {
+ "epoch": 1.2313463161514175,
+ "grad_norm": 0.44259950518608093,
+ "learning_rate": 0.0001276745574717481,
+ "loss": 0.7659,
+ "step": 1231
+ },
+ {
+ "epoch": 1.2323465974805414,
+ "grad_norm": 0.42978596687316895,
+ "learning_rate": 0.00012757381181556943,
+ "loss": 0.7313,
+ "step": 1232
+ },
+ {
+ "epoch": 1.2333468788096653,
+ "grad_norm": 0.5619105696678162,
+ "learning_rate": 0.0001274730358607583,
+ "loss": 0.9881,
+ "step": 1233
+ },
+ {
+ "epoch": 1.234347160138789,
+ "grad_norm": 0.5065141916275024,
+ "learning_rate": 0.00012737222971804924,
+ "loss": 0.9789,
+ "step": 1234
+ },
+ {
+ "epoch": 1.2353474414679129,
+ "grad_norm": 0.514705240726471,
+ "learning_rate": 0.00012727139349821,
+ "loss": 0.9278,
+ "step": 1235
+ },
+ {
+ "epoch": 1.2363477227970368,
+ "grad_norm": 0.48272448778152466,
+ "learning_rate": 0.0001271705273120413,
+ "loss": 0.9011,
+ "step": 1236
+ },
+ {
+ "epoch": 1.2373480041261604,
+ "grad_norm": 0.4993284344673157,
+ "learning_rate": 0.00012706963127037685,
+ "loss": 0.8341,
+ "step": 1237
+ },
+ {
+ "epoch": 1.2383482854552843,
+ "grad_norm": 0.44701850414276123,
+ "learning_rate": 0.00012696870548408316,
+ "loss": 0.8481,
+ "step": 1238
+ },
+ {
+ "epoch": 1.2393485667844082,
+ "grad_norm": 0.5611200332641602,
+ "learning_rate": 0.00012686775006405946,
+ "loss": 1.101,
+ "step": 1239
+ },
+ {
+ "epoch": 1.2403488481135319,
+ "grad_norm": 0.4962129592895508,
+ "learning_rate": 0.00012676676512123747,
+ "loss": 0.951,
+ "step": 1240
+ },
+ {
+ "epoch": 1.2413491294426557,
+ "grad_norm": 0.5547065734863281,
+ "learning_rate": 0.00012666575076658134,
+ "loss": 1.0228,
+ "step": 1241
+ },
+ {
+ "epoch": 1.2423494107717796,
+ "grad_norm": 0.5761319398880005,
+ "learning_rate": 0.00012656470711108764,
+ "loss": 1.0631,
+ "step": 1242
+ },
+ {
+ "epoch": 1.2433496921009033,
+ "grad_norm": 0.5202417969703674,
+ "learning_rate": 0.00012646363426578505,
+ "loss": 0.9623,
+ "step": 1243
+ },
+ {
+ "epoch": 1.2443499734300272,
+ "grad_norm": 0.561244547367096,
+ "learning_rate": 0.0001263625323417343,
+ "loss": 1.1666,
+ "step": 1244
+ },
+ {
+ "epoch": 1.245350254759151,
+ "grad_norm": 0.43389594554901123,
+ "learning_rate": 0.0001262614014500282,
+ "loss": 0.9473,
+ "step": 1245
+ },
+ {
+ "epoch": 1.2463505360882747,
+ "grad_norm": 0.5219054222106934,
+ "learning_rate": 0.00012616024170179126,
+ "loss": 1.0181,
+ "step": 1246
+ },
+ {
+ "epoch": 1.2473508174173986,
+ "grad_norm": 0.5179515480995178,
+ "learning_rate": 0.00012605905320817976,
+ "loss": 1.0851,
+ "step": 1247
+ },
+ {
+ "epoch": 1.2483510987465225,
+ "grad_norm": 0.5104801058769226,
+ "learning_rate": 0.00012595783608038155,
+ "loss": 0.9239,
+ "step": 1248
+ },
+ {
+ "epoch": 1.2493513800756464,
+ "grad_norm": 0.46918627619743347,
+ "learning_rate": 0.00012585659042961596,
+ "loss": 0.8361,
+ "step": 1249
+ },
+ {
+ "epoch": 1.25035166140477,
+ "grad_norm": 0.5275365710258484,
+ "learning_rate": 0.00012575531636713368,
+ "loss": 0.9256,
+ "step": 1250
+ },
+ {
+ "epoch": 1.251351942733894,
+ "grad_norm": 0.5006279349327087,
+ "learning_rate": 0.00012565401400421651,
+ "loss": 0.8748,
+ "step": 1251
+ },
+ {
+ "epoch": 1.2523522240630176,
+ "grad_norm": 0.466467022895813,
+ "learning_rate": 0.0001255526834521775,
+ "loss": 0.9217,
+ "step": 1252
+ },
+ {
+ "epoch": 1.2533525053921415,
+ "grad_norm": 0.45304587483406067,
+ "learning_rate": 0.00012545132482236055,
+ "loss": 0.8776,
+ "step": 1253
+ },
+ {
+ "epoch": 1.2543527867212654,
+ "grad_norm": 0.483394980430603,
+ "learning_rate": 0.0001253499382261405,
+ "loss": 0.9421,
+ "step": 1254
+ },
+ {
+ "epoch": 1.2553530680503893,
+ "grad_norm": 0.5117647051811218,
+ "learning_rate": 0.00012524852377492285,
+ "loss": 1.0033,
+ "step": 1255
+ },
+ {
+ "epoch": 1.256353349379513,
+ "grad_norm": 0.5712929964065552,
+ "learning_rate": 0.00012514708158014378,
+ "loss": 1.0216,
+ "step": 1256
+ },
+ {
+ "epoch": 1.2573536307086368,
+ "grad_norm": 0.49368858337402344,
+ "learning_rate": 0.00012504561175326985,
+ "loss": 0.8836,
+ "step": 1257
+ },
+ {
+ "epoch": 1.2583539120377607,
+ "grad_norm": 0.5303272008895874,
+ "learning_rate": 0.00012494411440579814,
+ "loss": 1.0138,
+ "step": 1258
+ },
+ {
+ "epoch": 1.2593541933668844,
+ "grad_norm": 0.47034743428230286,
+ "learning_rate": 0.0001248425896492558,
+ "loss": 0.9346,
+ "step": 1259
+ },
+ {
+ "epoch": 1.2603544746960083,
+ "grad_norm": 0.5398191809654236,
+ "learning_rate": 0.00012474103759520027,
+ "loss": 1.2548,
+ "step": 1260
+ },
+ {
+ "epoch": 1.2613547560251321,
+ "grad_norm": 0.4403116703033447,
+ "learning_rate": 0.00012463945835521878,
+ "loss": 0.8063,
+ "step": 1261
+ },
+ {
+ "epoch": 1.2623550373542558,
+ "grad_norm": 0.5504721999168396,
+ "learning_rate": 0.0001245378520409286,
+ "loss": 1.0888,
+ "step": 1262
+ },
+ {
+ "epoch": 1.2633553186833797,
+ "grad_norm": 0.46984589099884033,
+ "learning_rate": 0.0001244362187639767,
+ "loss": 0.9062,
+ "step": 1263
+ },
+ {
+ "epoch": 1.2643556000125036,
+ "grad_norm": 0.5573250651359558,
+ "learning_rate": 0.00012433455863603967,
+ "loss": 0.9474,
+ "step": 1264
+ },
+ {
+ "epoch": 1.2653558813416272,
+ "grad_norm": 0.5468732714653015,
+ "learning_rate": 0.00012423287176882358,
+ "loss": 0.9424,
+ "step": 1265
+ },
+ {
+ "epoch": 1.2663561626707511,
+ "grad_norm": 0.4921899437904358,
+ "learning_rate": 0.00012413115827406392,
+ "loss": 0.8568,
+ "step": 1266
+ },
+ {
+ "epoch": 1.267356443999875,
+ "grad_norm": 0.48769402503967285,
+ "learning_rate": 0.00012402941826352546,
+ "loss": 0.7579,
+ "step": 1267
+ },
+ {
+ "epoch": 1.268356725328999,
+ "grad_norm": 0.5462141633033752,
+ "learning_rate": 0.00012392765184900202,
+ "loss": 0.9946,
+ "step": 1268
+ },
+ {
+ "epoch": 1.2693570066581226,
+ "grad_norm": 0.5021050572395325,
+ "learning_rate": 0.0001238258591423165,
+ "loss": 0.8603,
+ "step": 1269
+ },
+ {
+ "epoch": 1.2703572879872465,
+ "grad_norm": 0.5272159576416016,
+ "learning_rate": 0.00012372404025532072,
+ "loss": 0.94,
+ "step": 1270
+ },
+ {
+ "epoch": 1.2713575693163701,
+ "grad_norm": 0.5332500338554382,
+ "learning_rate": 0.00012362219529989514,
+ "loss": 1.1609,
+ "step": 1271
+ },
+ {
+ "epoch": 1.272357850645494,
+ "grad_norm": 0.5058136582374573,
+ "learning_rate": 0.00012352032438794902,
+ "loss": 1.0013,
+ "step": 1272
+ },
+ {
+ "epoch": 1.273358131974618,
+ "grad_norm": 0.5055596828460693,
+ "learning_rate": 0.00012341842763142005,
+ "loss": 1.0121,
+ "step": 1273
+ },
+ {
+ "epoch": 1.2743584133037418,
+ "grad_norm": 0.5699402689933777,
+ "learning_rate": 0.00012331650514227425,
+ "loss": 1.1188,
+ "step": 1274
+ },
+ {
+ "epoch": 1.2753586946328654,
+ "grad_norm": 0.511233925819397,
+ "learning_rate": 0.00012321455703250616,
+ "loss": 1.0291,
+ "step": 1275
+ },
+ {
+ "epoch": 1.2763589759619893,
+ "grad_norm": 0.5304299592971802,
+ "learning_rate": 0.00012311258341413822,
+ "loss": 0.9619,
+ "step": 1276
+ },
+ {
+ "epoch": 1.277359257291113,
+ "grad_norm": 0.5318915247917175,
+ "learning_rate": 0.00012301058439922102,
+ "loss": 0.9669,
+ "step": 1277
+ },
+ {
+ "epoch": 1.2783595386202369,
+ "grad_norm": 0.510267436504364,
+ "learning_rate": 0.000122908560099833,
+ "loss": 1.0956,
+ "step": 1278
+ },
+ {
+ "epoch": 1.2793598199493608,
+ "grad_norm": 0.530360758304596,
+ "learning_rate": 0.00012280651062808047,
+ "loss": 1.02,
+ "step": 1279
+ },
+ {
+ "epoch": 1.2803601012784847,
+ "grad_norm": 0.5094459056854248,
+ "learning_rate": 0.00012270443609609729,
+ "loss": 0.9614,
+ "step": 1280
+ },
+ {
+ "epoch": 1.2813603826076083,
+ "grad_norm": 0.4430864453315735,
+ "learning_rate": 0.0001226023366160449,
+ "loss": 0.8188,
+ "step": 1281
+ },
+ {
+ "epoch": 1.2823606639367322,
+ "grad_norm": 0.4705411493778229,
+ "learning_rate": 0.00012250021230011225,
+ "loss": 0.8952,
+ "step": 1282
+ },
+ {
+ "epoch": 1.283360945265856,
+ "grad_norm": 0.5231715440750122,
+ "learning_rate": 0.00012239806326051539,
+ "loss": 0.941,
+ "step": 1283
+ },
+ {
+ "epoch": 1.2843612265949798,
+ "grad_norm": 0.5658493041992188,
+ "learning_rate": 0.00012229588960949771,
+ "loss": 1.0047,
+ "step": 1284
+ },
+ {
+ "epoch": 1.2853615079241036,
+ "grad_norm": 0.6016567349433899,
+ "learning_rate": 0.00012219369145932959,
+ "loss": 1.1764,
+ "step": 1285
+ },
+ {
+ "epoch": 1.2863617892532275,
+ "grad_norm": 0.6365408301353455,
+ "learning_rate": 0.00012209146892230822,
+ "loss": 0.9777,
+ "step": 1286
+ },
+ {
+ "epoch": 1.2873620705823514,
+ "grad_norm": 0.46536219120025635,
+ "learning_rate": 0.00012198922211075778,
+ "loss": 0.9826,
+ "step": 1287
+ },
+ {
+ "epoch": 1.288362351911475,
+ "grad_norm": 0.5130245089530945,
+ "learning_rate": 0.00012188695113702896,
+ "loss": 1.0255,
+ "step": 1288
+ },
+ {
+ "epoch": 1.289362633240599,
+ "grad_norm": 0.5321043133735657,
+ "learning_rate": 0.00012178465611349911,
+ "loss": 0.9973,
+ "step": 1289
+ },
+ {
+ "epoch": 1.2903629145697226,
+ "grad_norm": 0.48580724000930786,
+ "learning_rate": 0.00012168233715257194,
+ "loss": 0.8768,
+ "step": 1290
+ },
+ {
+ "epoch": 1.2913631958988465,
+ "grad_norm": 0.5140405297279358,
+ "learning_rate": 0.00012157999436667747,
+ "loss": 0.8985,
+ "step": 1291
+ },
+ {
+ "epoch": 1.2923634772279704,
+ "grad_norm": 0.4582030773162842,
+ "learning_rate": 0.00012147762786827193,
+ "loss": 0.9693,
+ "step": 1292
+ },
+ {
+ "epoch": 1.2933637585570943,
+ "grad_norm": 0.47397539019584656,
+ "learning_rate": 0.00012137523776983757,
+ "loss": 0.8348,
+ "step": 1293
+ },
+ {
+ "epoch": 1.294364039886218,
+ "grad_norm": 0.43932002782821655,
+ "learning_rate": 0.00012127282418388264,
+ "loss": 0.851,
+ "step": 1294
+ },
+ {
+ "epoch": 1.2953643212153418,
+ "grad_norm": 0.5559205412864685,
+ "learning_rate": 0.0001211703872229411,
+ "loss": 0.86,
+ "step": 1295
+ },
+ {
+ "epoch": 1.2963646025444655,
+ "grad_norm": 0.5433980226516724,
+ "learning_rate": 0.00012106792699957263,
+ "loss": 1.1181,
+ "step": 1296
+ },
+ {
+ "epoch": 1.2973648838735894,
+ "grad_norm": 0.5069502592086792,
+ "learning_rate": 0.00012096544362636255,
+ "loss": 0.9613,
+ "step": 1297
+ },
+ {
+ "epoch": 1.2983651652027133,
+ "grad_norm": 0.5588079690933228,
+ "learning_rate": 0.00012086293721592152,
+ "loss": 1.0741,
+ "step": 1298
+ },
+ {
+ "epoch": 1.2993654465318372,
+ "grad_norm": 0.6035181879997253,
+ "learning_rate": 0.00012076040788088554,
+ "loss": 1.0187,
+ "step": 1299
+ },
+ {
+ "epoch": 1.3003657278609608,
+ "grad_norm": 0.4385228455066681,
+ "learning_rate": 0.00012065785573391581,
+ "loss": 0.9293,
+ "step": 1300
+ },
+ {
+ "epoch": 1.3013660091900847,
+ "grad_norm": 0.5284578800201416,
+ "learning_rate": 0.00012055528088769861,
+ "loss": 0.9479,
+ "step": 1301
+ },
+ {
+ "epoch": 1.3023662905192086,
+ "grad_norm": 0.46655789017677307,
+ "learning_rate": 0.00012045268345494511,
+ "loss": 0.8702,
+ "step": 1302
+ },
+ {
+ "epoch": 1.3033665718483323,
+ "grad_norm": 0.5073155164718628,
+ "learning_rate": 0.00012035006354839133,
+ "loss": 0.8667,
+ "step": 1303
+ },
+ {
+ "epoch": 1.3043668531774562,
+ "grad_norm": 0.5954610109329224,
+ "learning_rate": 0.00012024742128079805,
+ "loss": 1.0998,
+ "step": 1304
+ },
+ {
+ "epoch": 1.30536713450658,
+ "grad_norm": 0.46617114543914795,
+ "learning_rate": 0.00012014475676495052,
+ "loss": 0.8853,
+ "step": 1305
+ },
+ {
+ "epoch": 1.306367415835704,
+ "grad_norm": 0.5705167055130005,
+ "learning_rate": 0.00012004207011365849,
+ "loss": 0.9094,
+ "step": 1306
+ },
+ {
+ "epoch": 1.3073676971648276,
+ "grad_norm": 0.4711546301841736,
+ "learning_rate": 0.00011993936143975599,
+ "loss": 0.9597,
+ "step": 1307
+ },
+ {
+ "epoch": 1.3083679784939515,
+ "grad_norm": 0.5322745442390442,
+ "learning_rate": 0.00011983663085610131,
+ "loss": 0.9221,
+ "step": 1308
+ },
+ {
+ "epoch": 1.3093682598230751,
+ "grad_norm": 0.4769452214241028,
+ "learning_rate": 0.00011973387847557676,
+ "loss": 0.7874,
+ "step": 1309
+ },
+ {
+ "epoch": 1.310368541152199,
+ "grad_norm": 0.5224636793136597,
+ "learning_rate": 0.00011963110441108863,
+ "loss": 0.8233,
+ "step": 1310
+ },
+ {
+ "epoch": 1.311368822481323,
+ "grad_norm": 0.5125696063041687,
+ "learning_rate": 0.000119528308775567,
+ "loss": 0.9894,
+ "step": 1311
+ },
+ {
+ "epoch": 1.3123691038104468,
+ "grad_norm": 0.5573001503944397,
+ "learning_rate": 0.00011942549168196575,
+ "loss": 0.9043,
+ "step": 1312
+ },
+ {
+ "epoch": 1.3133693851395705,
+ "grad_norm": 0.5493408441543579,
+ "learning_rate": 0.00011932265324326221,
+ "loss": 0.964,
+ "step": 1313
+ },
+ {
+ "epoch": 1.3143696664686944,
+ "grad_norm": 0.5327842235565186,
+ "learning_rate": 0.0001192197935724573,
+ "loss": 0.9196,
+ "step": 1314
+ },
+ {
+ "epoch": 1.315369947797818,
+ "grad_norm": 0.5743328332901001,
+ "learning_rate": 0.00011911691278257511,
+ "loss": 1.0504,
+ "step": 1315
+ },
+ {
+ "epoch": 1.316370229126942,
+ "grad_norm": 0.446932315826416,
+ "learning_rate": 0.0001190140109866631,
+ "loss": 0.8425,
+ "step": 1316
+ },
+ {
+ "epoch": 1.3173705104560658,
+ "grad_norm": 0.47306087613105774,
+ "learning_rate": 0.00011891108829779165,
+ "loss": 0.8726,
+ "step": 1317
+ },
+ {
+ "epoch": 1.3183707917851897,
+ "grad_norm": 0.566939115524292,
+ "learning_rate": 0.00011880814482905422,
+ "loss": 0.8747,
+ "step": 1318
+ },
+ {
+ "epoch": 1.3193710731143133,
+ "grad_norm": 0.5145870447158813,
+ "learning_rate": 0.00011870518069356709,
+ "loss": 0.9383,
+ "step": 1319
+ },
+ {
+ "epoch": 1.3203713544434372,
+ "grad_norm": 0.5228437185287476,
+ "learning_rate": 0.0001186021960044692,
+ "loss": 1.103,
+ "step": 1320
+ },
+ {
+ "epoch": 1.3213716357725611,
+ "grad_norm": 0.4844512939453125,
+ "learning_rate": 0.00011849919087492211,
+ "loss": 0.98,
+ "step": 1321
+ },
+ {
+ "epoch": 1.3223719171016848,
+ "grad_norm": 0.5099167227745056,
+ "learning_rate": 0.00011839616541810983,
+ "loss": 0.9023,
+ "step": 1322
+ },
+ {
+ "epoch": 1.3233721984308087,
+ "grad_norm": 0.4702555537223816,
+ "learning_rate": 0.00011829311974723867,
+ "loss": 0.8553,
+ "step": 1323
+ },
+ {
+ "epoch": 1.3243724797599326,
+ "grad_norm": 0.5219053030014038,
+ "learning_rate": 0.00011819005397553723,
+ "loss": 0.9446,
+ "step": 1324
+ },
+ {
+ "epoch": 1.3253727610890562,
+ "grad_norm": 0.48462843894958496,
+ "learning_rate": 0.00011808696821625613,
+ "loss": 0.9591,
+ "step": 1325
+ },
+ {
+ "epoch": 1.32637304241818,
+ "grad_norm": 0.5187227725982666,
+ "learning_rate": 0.000117983862582668,
+ "loss": 0.9413,
+ "step": 1326
+ },
+ {
+ "epoch": 1.327373323747304,
+ "grad_norm": 0.47444605827331543,
+ "learning_rate": 0.00011788073718806725,
+ "loss": 0.8979,
+ "step": 1327
+ },
+ {
+ "epoch": 1.3283736050764277,
+ "grad_norm": 0.5251137018203735,
+ "learning_rate": 0.00011777759214577006,
+ "loss": 1.0449,
+ "step": 1328
+ },
+ {
+ "epoch": 1.3293738864055515,
+ "grad_norm": 0.5007866024971008,
+ "learning_rate": 0.00011767442756911417,
+ "loss": 0.9907,
+ "step": 1329
+ },
+ {
+ "epoch": 1.3303741677346754,
+ "grad_norm": 0.8486194610595703,
+ "learning_rate": 0.00011757124357145881,
+ "loss": 1.0459,
+ "step": 1330
+ },
+ {
+ "epoch": 1.3313744490637993,
+ "grad_norm": 0.5153964161872864,
+ "learning_rate": 0.00011746804026618452,
+ "loss": 0.9911,
+ "step": 1331
+ },
+ {
+ "epoch": 1.332374730392923,
+ "grad_norm": 0.523077666759491,
+ "learning_rate": 0.00011736481776669306,
+ "loss": 1.0571,
+ "step": 1332
+ },
+ {
+ "epoch": 1.3333750117220469,
+ "grad_norm": 0.5242265462875366,
+ "learning_rate": 0.00011726157618640728,
+ "loss": 0.9057,
+ "step": 1333
+ },
+ {
+ "epoch": 1.3343752930511705,
+ "grad_norm": 0.524046778678894,
+ "learning_rate": 0.00011715831563877104,
+ "loss": 1.0413,
+ "step": 1334
+ },
+ {
+ "epoch": 1.3353755743802944,
+ "grad_norm": 0.5873232483863831,
+ "learning_rate": 0.00011705503623724898,
+ "loss": 1.1105,
+ "step": 1335
+ },
+ {
+ "epoch": 1.3363758557094183,
+ "grad_norm": 0.5559434294700623,
+ "learning_rate": 0.00011695173809532652,
+ "loss": 0.9045,
+ "step": 1336
+ },
+ {
+ "epoch": 1.3373761370385422,
+ "grad_norm": 0.5970155000686646,
+ "learning_rate": 0.00011684842132650957,
+ "loss": 1.1663,
+ "step": 1337
+ },
+ {
+ "epoch": 1.3383764183676659,
+ "grad_norm": 0.5005142092704773,
+ "learning_rate": 0.00011674508604432464,
+ "loss": 1.0695,
+ "step": 1338
+ },
+ {
+ "epoch": 1.3393766996967897,
+ "grad_norm": 0.49226582050323486,
+ "learning_rate": 0.00011664173236231848,
+ "loss": 1.0875,
+ "step": 1339
+ },
+ {
+ "epoch": 1.3403769810259134,
+ "grad_norm": 0.4792287349700928,
+ "learning_rate": 0.0001165383603940581,
+ "loss": 0.9102,
+ "step": 1340
+ },
+ {
+ "epoch": 1.3413772623550373,
+ "grad_norm": 0.4332147538661957,
+ "learning_rate": 0.00011643497025313061,
+ "loss": 0.8948,
+ "step": 1341
+ },
+ {
+ "epoch": 1.3423775436841612,
+ "grad_norm": 0.45502984523773193,
+ "learning_rate": 0.00011633156205314309,
+ "loss": 0.8538,
+ "step": 1342
+ },
+ {
+ "epoch": 1.343377825013285,
+ "grad_norm": 0.5594006776809692,
+ "learning_rate": 0.00011622813590772244,
+ "loss": 1.0178,
+ "step": 1343
+ },
+ {
+ "epoch": 1.3443781063424087,
+ "grad_norm": 0.4428876042366028,
+ "learning_rate": 0.00011612469193051525,
+ "loss": 0.856,
+ "step": 1344
+ },
+ {
+ "epoch": 1.3453783876715326,
+ "grad_norm": 0.4615425169467926,
+ "learning_rate": 0.00011602123023518779,
+ "loss": 0.8568,
+ "step": 1345
+ },
+ {
+ "epoch": 1.3463786690006565,
+ "grad_norm": 0.543389618396759,
+ "learning_rate": 0.00011591775093542572,
+ "loss": 0.8293,
+ "step": 1346
+ },
+ {
+ "epoch": 1.3473789503297802,
+ "grad_norm": 0.4740433394908905,
+ "learning_rate": 0.0001158142541449341,
+ "loss": 0.9163,
+ "step": 1347
+ },
+ {
+ "epoch": 1.348379231658904,
+ "grad_norm": 0.47938287258148193,
+ "learning_rate": 0.00011571073997743716,
+ "loss": 0.9745,
+ "step": 1348
+ },
+ {
+ "epoch": 1.349379512988028,
+ "grad_norm": 0.47510263323783875,
+ "learning_rate": 0.0001156072085466783,
+ "loss": 0.9536,
+ "step": 1349
+ },
+ {
+ "epoch": 1.3503797943171518,
+ "grad_norm": 0.5921860933303833,
+ "learning_rate": 0.00011550365996641979,
+ "loss": 0.8397,
+ "step": 1350
+ },
+ {
+ "epoch": 1.3513800756462755,
+ "grad_norm": 0.5436375737190247,
+ "learning_rate": 0.00011540009435044281,
+ "loss": 0.9381,
+ "step": 1351
+ },
+ {
+ "epoch": 1.3523803569753994,
+ "grad_norm": 0.4591434597969055,
+ "learning_rate": 0.00011529651181254723,
+ "loss": 1.0771,
+ "step": 1352
+ },
+ {
+ "epoch": 1.353380638304523,
+ "grad_norm": 0.533069372177124,
+ "learning_rate": 0.0001151929124665516,
+ "loss": 0.9103,
+ "step": 1353
+ },
+ {
+ "epoch": 1.354380919633647,
+ "grad_norm": 0.538324773311615,
+ "learning_rate": 0.00011508929642629274,
+ "loss": 1.0469,
+ "step": 1354
+ },
+ {
+ "epoch": 1.3553812009627708,
+ "grad_norm": 0.46198832988739014,
+ "learning_rate": 0.00011498566380562601,
+ "loss": 0.8242,
+ "step": 1355
+ },
+ {
+ "epoch": 1.3563814822918947,
+ "grad_norm": 0.573716402053833,
+ "learning_rate": 0.0001148820147184249,
+ "loss": 0.9437,
+ "step": 1356
+ },
+ {
+ "epoch": 1.3573817636210184,
+ "grad_norm": 0.5638802647590637,
+ "learning_rate": 0.00011477834927858104,
+ "loss": 0.9336,
+ "step": 1357
+ },
+ {
+ "epoch": 1.3583820449501423,
+ "grad_norm": 0.48780402541160583,
+ "learning_rate": 0.00011467466760000399,
+ "loss": 0.8859,
+ "step": 1358
+ },
+ {
+ "epoch": 1.359382326279266,
+ "grad_norm": 0.5441538095474243,
+ "learning_rate": 0.00011457096979662114,
+ "loss": 0.8804,
+ "step": 1359
+ },
+ {
+ "epoch": 1.3603826076083898,
+ "grad_norm": 0.5250831842422485,
+ "learning_rate": 0.00011446725598237767,
+ "loss": 0.9739,
+ "step": 1360
+ },
+ {
+ "epoch": 1.3613828889375137,
+ "grad_norm": 0.49177756905555725,
+ "learning_rate": 0.00011436352627123623,
+ "loss": 0.9586,
+ "step": 1361
+ },
+ {
+ "epoch": 1.3623831702666376,
+ "grad_norm": 0.5866628885269165,
+ "learning_rate": 0.00011425978077717709,
+ "loss": 1.0511,
+ "step": 1362
+ },
+ {
+ "epoch": 1.3633834515957612,
+ "grad_norm": 0.49350351095199585,
+ "learning_rate": 0.00011415601961419775,
+ "loss": 0.9637,
+ "step": 1363
+ },
+ {
+ "epoch": 1.3643837329248851,
+ "grad_norm": 0.5402287244796753,
+ "learning_rate": 0.00011405224289631295,
+ "loss": 1.0008,
+ "step": 1364
+ },
+ {
+ "epoch": 1.365384014254009,
+ "grad_norm": 0.5524907112121582,
+ "learning_rate": 0.00011394845073755455,
+ "loss": 1.0398,
+ "step": 1365
+ },
+ {
+ "epoch": 1.3663842955831327,
+ "grad_norm": 0.49948206543922424,
+ "learning_rate": 0.0001138446432519714,
+ "loss": 0.8577,
+ "step": 1366
+ },
+ {
+ "epoch": 1.3673845769122566,
+ "grad_norm": 0.500592052936554,
+ "learning_rate": 0.00011374082055362909,
+ "loss": 1.0053,
+ "step": 1367
+ },
+ {
+ "epoch": 1.3683848582413805,
+ "grad_norm": 0.4469926357269287,
+ "learning_rate": 0.00011363698275661001,
+ "loss": 0.8081,
+ "step": 1368
+ },
+ {
+ "epoch": 1.3693851395705043,
+ "grad_norm": 0.4939117431640625,
+ "learning_rate": 0.00011353312997501313,
+ "loss": 0.9559,
+ "step": 1369
+ },
+ {
+ "epoch": 1.370385420899628,
+ "grad_norm": 0.5091076493263245,
+ "learning_rate": 0.00011342926232295386,
+ "loss": 0.8962,
+ "step": 1370
+ },
+ {
+ "epoch": 1.371385702228752,
+ "grad_norm": 0.48055970668792725,
+ "learning_rate": 0.00011332537991456398,
+ "loss": 0.8686,
+ "step": 1371
+ },
+ {
+ "epoch": 1.3723859835578756,
+ "grad_norm": 0.4724258482456207,
+ "learning_rate": 0.00011322148286399147,
+ "loss": 0.8872,
+ "step": 1372
+ },
+ {
+ "epoch": 1.3733862648869994,
+ "grad_norm": 0.4945514500141144,
+ "learning_rate": 0.0001131175712854004,
+ "loss": 0.8766,
+ "step": 1373
+ },
+ {
+ "epoch": 1.3743865462161233,
+ "grad_norm": 0.4784204065799713,
+ "learning_rate": 0.00011301364529297079,
+ "loss": 0.8216,
+ "step": 1374
+ },
+ {
+ "epoch": 1.3753868275452472,
+ "grad_norm": 0.4669654667377472,
+ "learning_rate": 0.0001129097050008985,
+ "loss": 0.98,
+ "step": 1375
+ },
+ {
+ "epoch": 1.3763871088743709,
+ "grad_norm": 0.5275737047195435,
+ "learning_rate": 0.00011280575052339514,
+ "loss": 0.9391,
+ "step": 1376
+ },
+ {
+ "epoch": 1.3773873902034948,
+ "grad_norm": 0.47577112913131714,
+ "learning_rate": 0.00011270178197468789,
+ "loss": 0.8956,
+ "step": 1377
+ },
+ {
+ "epoch": 1.3783876715326184,
+ "grad_norm": 0.49086448550224304,
+ "learning_rate": 0.00011259779946901934,
+ "loss": 1.0058,
+ "step": 1378
+ },
+ {
+ "epoch": 1.3793879528617423,
+ "grad_norm": 0.5351247191429138,
+ "learning_rate": 0.0001124938031206475,
+ "loss": 1.0215,
+ "step": 1379
+ },
+ {
+ "epoch": 1.3803882341908662,
+ "grad_norm": 0.5512630343437195,
+ "learning_rate": 0.00011238979304384554,
+ "loss": 1.0254,
+ "step": 1380
+ },
+ {
+ "epoch": 1.38138851551999,
+ "grad_norm": 0.5598354339599609,
+ "learning_rate": 0.0001122857693529017,
+ "loss": 0.8707,
+ "step": 1381
+ },
+ {
+ "epoch": 1.3823887968491138,
+ "grad_norm": 0.5506719946861267,
+ "learning_rate": 0.0001121817321621192,
+ "loss": 0.9061,
+ "step": 1382
+ },
+ {
+ "epoch": 1.3833890781782376,
+ "grad_norm": 0.5244742035865784,
+ "learning_rate": 0.00011207768158581613,
+ "loss": 1.0017,
+ "step": 1383
+ },
+ {
+ "epoch": 1.3843893595073615,
+ "grad_norm": 0.480194091796875,
+ "learning_rate": 0.00011197361773832525,
+ "loss": 0.8132,
+ "step": 1384
+ },
+ {
+ "epoch": 1.3853896408364852,
+ "grad_norm": 0.5409587025642395,
+ "learning_rate": 0.00011186954073399387,
+ "loss": 1.0724,
+ "step": 1385
+ },
+ {
+ "epoch": 1.386389922165609,
+ "grad_norm": 0.5776751041412354,
+ "learning_rate": 0.00011176545068718385,
+ "loss": 0.9577,
+ "step": 1386
+ },
+ {
+ "epoch": 1.387390203494733,
+ "grad_norm": 0.4478171765804291,
+ "learning_rate": 0.0001116613477122713,
+ "loss": 0.7698,
+ "step": 1387
+ },
+ {
+ "epoch": 1.3883904848238566,
+ "grad_norm": 0.5580281615257263,
+ "learning_rate": 0.00011155723192364658,
+ "loss": 1.0065,
+ "step": 1388
+ },
+ {
+ "epoch": 1.3893907661529805,
+ "grad_norm": 0.5318020582199097,
+ "learning_rate": 0.00011145310343571411,
+ "loss": 0.9155,
+ "step": 1389
+ },
+ {
+ "epoch": 1.3903910474821044,
+ "grad_norm": 0.45960649847984314,
+ "learning_rate": 0.00011134896236289224,
+ "loss": 0.848,
+ "step": 1390
+ },
+ {
+ "epoch": 1.391391328811228,
+ "grad_norm": 0.49986693263053894,
+ "learning_rate": 0.0001112448088196132,
+ "loss": 1.0222,
+ "step": 1391
+ },
+ {
+ "epoch": 1.392391610140352,
+ "grad_norm": 0.6470636129379272,
+ "learning_rate": 0.00011114064292032282,
+ "loss": 0.8976,
+ "step": 1392
+ },
+ {
+ "epoch": 1.3933918914694758,
+ "grad_norm": 0.49885210394859314,
+ "learning_rate": 0.0001110364647794807,
+ "loss": 0.8872,
+ "step": 1393
+ },
+ {
+ "epoch": 1.3943921727985997,
+ "grad_norm": 0.48183003067970276,
+ "learning_rate": 0.00011093227451155974,
+ "loss": 0.7506,
+ "step": 1394
+ },
+ {
+ "epoch": 1.3953924541277234,
+ "grad_norm": 0.47776031494140625,
+ "learning_rate": 0.0001108280722310462,
+ "loss": 0.9945,
+ "step": 1395
+ },
+ {
+ "epoch": 1.3963927354568473,
+ "grad_norm": 0.5032552480697632,
+ "learning_rate": 0.0001107238580524395,
+ "loss": 0.9844,
+ "step": 1396
+ },
+ {
+ "epoch": 1.397393016785971,
+ "grad_norm": 0.5641827583312988,
+ "learning_rate": 0.00011061963209025223,
+ "loss": 0.9862,
+ "step": 1397
+ },
+ {
+ "epoch": 1.3983932981150948,
+ "grad_norm": 0.45950955152511597,
+ "learning_rate": 0.00011051539445900983,
+ "loss": 0.9878,
+ "step": 1398
+ },
+ {
+ "epoch": 1.3993935794442187,
+ "grad_norm": 0.48625022172927856,
+ "learning_rate": 0.00011041114527325065,
+ "loss": 0.9446,
+ "step": 1399
+ },
+ {
+ "epoch": 1.4003938607733426,
+ "grad_norm": 0.5851911902427673,
+ "learning_rate": 0.00011030688464752566,
+ "loss": 1.1538,
+ "step": 1400
+ },
+ {
+ "epoch": 1.4013941421024663,
+ "grad_norm": 0.45012837648391724,
+ "learning_rate": 0.00011020261269639842,
+ "loss": 0.8871,
+ "step": 1401
+ },
+ {
+ "epoch": 1.4023944234315902,
+ "grad_norm": 0.4794975221157074,
+ "learning_rate": 0.000110098329534445,
+ "loss": 0.912,
+ "step": 1402
+ },
+ {
+ "epoch": 1.4033947047607138,
+ "grad_norm": 0.5397909879684448,
+ "learning_rate": 0.00010999403527625367,
+ "loss": 1.015,
+ "step": 1403
+ },
+ {
+ "epoch": 1.4043949860898377,
+ "grad_norm": 0.5413039922714233,
+ "learning_rate": 0.00010988973003642499,
+ "loss": 1.0111,
+ "step": 1404
+ },
+ {
+ "epoch": 1.4053952674189616,
+ "grad_norm": 0.48752084374427795,
+ "learning_rate": 0.00010978541392957156,
+ "loss": 0.8649,
+ "step": 1405
+ },
+ {
+ "epoch": 1.4063955487480855,
+ "grad_norm": 0.5576539635658264,
+ "learning_rate": 0.00010968108707031792,
+ "loss": 0.8334,
+ "step": 1406
+ },
+ {
+ "epoch": 1.4073958300772091,
+ "grad_norm": 0.5292769074440002,
+ "learning_rate": 0.00010957674957330042,
+ "loss": 1.0312,
+ "step": 1407
+ },
+ {
+ "epoch": 1.408396111406333,
+ "grad_norm": 0.5971432328224182,
+ "learning_rate": 0.00010947240155316707,
+ "loss": 0.9367,
+ "step": 1408
+ },
+ {
+ "epoch": 1.409396392735457,
+ "grad_norm": 0.5620018839836121,
+ "learning_rate": 0.00010936804312457749,
+ "loss": 0.9493,
+ "step": 1409
+ },
+ {
+ "epoch": 1.4103966740645806,
+ "grad_norm": 0.456496000289917,
+ "learning_rate": 0.00010926367440220276,
+ "loss": 0.8532,
+ "step": 1410
+ },
+ {
+ "epoch": 1.4113969553937045,
+ "grad_norm": 0.47393882274627686,
+ "learning_rate": 0.00010915929550072517,
+ "loss": 0.8073,
+ "step": 1411
+ },
+ {
+ "epoch": 1.4123972367228284,
+ "grad_norm": 0.5321446061134338,
+ "learning_rate": 0.00010905490653483827,
+ "loss": 1.1076,
+ "step": 1412
+ },
+ {
+ "epoch": 1.4133975180519522,
+ "grad_norm": 0.4768468141555786,
+ "learning_rate": 0.00010895050761924668,
+ "loss": 0.9466,
+ "step": 1413
+ },
+ {
+ "epoch": 1.414397799381076,
+ "grad_norm": 0.5629300475120544,
+ "learning_rate": 0.00010884609886866588,
+ "loss": 1.0541,
+ "step": 1414
+ },
+ {
+ "epoch": 1.4153980807101998,
+ "grad_norm": 0.45907631516456604,
+ "learning_rate": 0.00010874168039782227,
+ "loss": 0.9156,
+ "step": 1415
+ },
+ {
+ "epoch": 1.4163983620393235,
+ "grad_norm": 0.5152727961540222,
+ "learning_rate": 0.00010863725232145286,
+ "loss": 1.0495,
+ "step": 1416
+ },
+ {
+ "epoch": 1.4173986433684473,
+ "grad_norm": 0.511647641658783,
+ "learning_rate": 0.00010853281475430517,
+ "loss": 0.7327,
+ "step": 1417
+ },
+ {
+ "epoch": 1.4183989246975712,
+ "grad_norm": 0.6430179476737976,
+ "learning_rate": 0.0001084283678111372,
+ "loss": 0.9831,
+ "step": 1418
+ },
+ {
+ "epoch": 1.4193992060266951,
+ "grad_norm": 0.5592547059059143,
+ "learning_rate": 0.00010832391160671729,
+ "loss": 0.9462,
+ "step": 1419
+ },
+ {
+ "epoch": 1.4203994873558188,
+ "grad_norm": 0.5079266428947449,
+ "learning_rate": 0.00010821944625582392,
+ "loss": 1.0473,
+ "step": 1420
+ },
+ {
+ "epoch": 1.4213997686849427,
+ "grad_norm": 0.5006073713302612,
+ "learning_rate": 0.00010811497187324555,
+ "loss": 0.8077,
+ "step": 1421
+ },
+ {
+ "epoch": 1.4224000500140663,
+ "grad_norm": 0.47260841727256775,
+ "learning_rate": 0.00010801048857378071,
+ "loss": 0.8069,
+ "step": 1422
+ },
+ {
+ "epoch": 1.4234003313431902,
+ "grad_norm": 0.5051037669181824,
+ "learning_rate": 0.00010790599647223763,
+ "loss": 1.0241,
+ "step": 1423
+ },
+ {
+ "epoch": 1.424400612672314,
+ "grad_norm": 0.5116690397262573,
+ "learning_rate": 0.0001078014956834342,
+ "loss": 1.0377,
+ "step": 1424
+ },
+ {
+ "epoch": 1.425400894001438,
+ "grad_norm": 0.48974907398223877,
+ "learning_rate": 0.00010769698632219794,
+ "loss": 1.0578,
+ "step": 1425
+ },
+ {
+ "epoch": 1.4264011753305617,
+ "grad_norm": 0.5071999430656433,
+ "learning_rate": 0.00010759246850336572,
+ "loss": 0.9072,
+ "step": 1426
+ },
+ {
+ "epoch": 1.4274014566596855,
+ "grad_norm": 0.6418463587760925,
+ "learning_rate": 0.0001074879423417837,
+ "loss": 1.1195,
+ "step": 1427
+ },
+ {
+ "epoch": 1.4284017379888094,
+ "grad_norm": 0.4854032099246979,
+ "learning_rate": 0.00010738340795230721,
+ "loss": 1.0776,
+ "step": 1428
+ },
+ {
+ "epoch": 1.429402019317933,
+ "grad_norm": 0.5330777764320374,
+ "learning_rate": 0.00010727886544980068,
+ "loss": 1.0851,
+ "step": 1429
+ },
+ {
+ "epoch": 1.430402300647057,
+ "grad_norm": 0.5281643271446228,
+ "learning_rate": 0.00010717431494913741,
+ "loss": 0.8663,
+ "step": 1430
+ },
+ {
+ "epoch": 1.4314025819761809,
+ "grad_norm": 0.47898662090301514,
+ "learning_rate": 0.00010706975656519946,
+ "loss": 0.9926,
+ "step": 1431
+ },
+ {
+ "epoch": 1.4324028633053048,
+ "grad_norm": 0.43927934765815735,
+ "learning_rate": 0.00010696519041287765,
+ "loss": 0.8698,
+ "step": 1432
+ },
+ {
+ "epoch": 1.4334031446344284,
+ "grad_norm": 0.5207253694534302,
+ "learning_rate": 0.0001068606166070712,
+ "loss": 0.9795,
+ "step": 1433
+ },
+ {
+ "epoch": 1.4344034259635523,
+ "grad_norm": 0.5264511704444885,
+ "learning_rate": 0.00010675603526268785,
+ "loss": 0.9593,
+ "step": 1434
+ },
+ {
+ "epoch": 1.435403707292676,
+ "grad_norm": 0.5435792803764343,
+ "learning_rate": 0.00010665144649464356,
+ "loss": 0.9436,
+ "step": 1435
+ },
+ {
+ "epoch": 1.4364039886217999,
+ "grad_norm": 0.5383104681968689,
+ "learning_rate": 0.00010654685041786249,
+ "loss": 0.9569,
+ "step": 1436
+ },
+ {
+ "epoch": 1.4374042699509237,
+ "grad_norm": 0.48762592673301697,
+ "learning_rate": 0.00010644224714727681,
+ "loss": 0.9235,
+ "step": 1437
+ },
+ {
+ "epoch": 1.4384045512800476,
+ "grad_norm": 0.4815019965171814,
+ "learning_rate": 0.0001063376367978266,
+ "loss": 0.8241,
+ "step": 1438
+ },
+ {
+ "epoch": 1.4394048326091713,
+ "grad_norm": 0.4944337010383606,
+ "learning_rate": 0.00010623301948445971,
+ "loss": 0.9169,
+ "step": 1439
+ },
+ {
+ "epoch": 1.4404051139382952,
+ "grad_norm": 0.5658552646636963,
+ "learning_rate": 0.00010612839532213164,
+ "loss": 1.044,
+ "step": 1440
+ },
+ {
+ "epoch": 1.4414053952674188,
+ "grad_norm": 0.5688045620918274,
+ "learning_rate": 0.00010602376442580544,
+ "loss": 0.9684,
+ "step": 1441
+ },
+ {
+ "epoch": 1.4424056765965427,
+ "grad_norm": 0.5434709787368774,
+ "learning_rate": 0.00010591912691045152,
+ "loss": 0.8741,
+ "step": 1442
+ },
+ {
+ "epoch": 1.4434059579256666,
+ "grad_norm": 0.583562433719635,
+ "learning_rate": 0.00010581448289104758,
+ "loss": 1.1651,
+ "step": 1443
+ },
+ {
+ "epoch": 1.4444062392547905,
+ "grad_norm": 0.566363513469696,
+ "learning_rate": 0.00010570983248257853,
+ "loss": 1.0091,
+ "step": 1444
+ },
+ {
+ "epoch": 1.4454065205839142,
+ "grad_norm": 0.527039647102356,
+ "learning_rate": 0.00010560517580003617,
+ "loss": 1.0666,
+ "step": 1445
+ },
+ {
+ "epoch": 1.446406801913038,
+ "grad_norm": 0.46389803290367126,
+ "learning_rate": 0.00010550051295841931,
+ "loss": 0.9344,
+ "step": 1446
+ },
+ {
+ "epoch": 1.447407083242162,
+ "grad_norm": 0.6291074752807617,
+ "learning_rate": 0.00010539584407273349,
+ "loss": 1.0388,
+ "step": 1447
+ },
+ {
+ "epoch": 1.4484073645712856,
+ "grad_norm": 0.5249356031417847,
+ "learning_rate": 0.00010529116925799085,
+ "loss": 0.97,
+ "step": 1448
+ },
+ {
+ "epoch": 1.4494076459004095,
+ "grad_norm": 0.4662008583545685,
+ "learning_rate": 0.00010518648862921012,
+ "loss": 0.8385,
+ "step": 1449
+ },
+ {
+ "epoch": 1.4504079272295334,
+ "grad_norm": 0.5730600953102112,
+ "learning_rate": 0.00010508180230141635,
+ "loss": 0.8747,
+ "step": 1450
+ },
+ {
+ "epoch": 1.451408208558657,
+ "grad_norm": 0.48082512617111206,
+ "learning_rate": 0.00010497711038964086,
+ "loss": 0.8624,
+ "step": 1451
+ },
+ {
+ "epoch": 1.452408489887781,
+ "grad_norm": 0.48900333046913147,
+ "learning_rate": 0.0001048724130089212,
+ "loss": 0.7826,
+ "step": 1452
+ },
+ {
+ "epoch": 1.4534087712169048,
+ "grad_norm": 0.4998112618923187,
+ "learning_rate": 0.00010476771027430086,
+ "loss": 0.8687,
+ "step": 1453
+ },
+ {
+ "epoch": 1.4544090525460285,
+ "grad_norm": 0.4872112572193146,
+ "learning_rate": 0.00010466300230082911,
+ "loss": 0.9185,
+ "step": 1454
+ },
+ {
+ "epoch": 1.4554093338751524,
+ "grad_norm": 0.5405575633049011,
+ "learning_rate": 0.00010455828920356115,
+ "loss": 0.9601,
+ "step": 1455
+ },
+ {
+ "epoch": 1.4564096152042763,
+ "grad_norm": 0.4496804475784302,
+ "learning_rate": 0.00010445357109755771,
+ "loss": 0.8606,
+ "step": 1456
+ },
+ {
+ "epoch": 1.4574098965334001,
+ "grad_norm": 0.49340635538101196,
+ "learning_rate": 0.00010434884809788508,
+ "loss": 1.1009,
+ "step": 1457
+ },
+ {
+ "epoch": 1.4584101778625238,
+ "grad_norm": 0.4692990481853485,
+ "learning_rate": 0.00010424412031961484,
+ "loss": 0.8011,
+ "step": 1458
+ },
+ {
+ "epoch": 1.4594104591916477,
+ "grad_norm": 0.5027800798416138,
+ "learning_rate": 0.00010413938787782394,
+ "loss": 0.8827,
+ "step": 1459
+ },
+ {
+ "epoch": 1.4604107405207714,
+ "grad_norm": 0.6764587163925171,
+ "learning_rate": 0.00010403465088759437,
+ "loss": 0.8513,
+ "step": 1460
+ },
+ {
+ "epoch": 1.4614110218498952,
+ "grad_norm": 0.558620035648346,
+ "learning_rate": 0.00010392990946401313,
+ "loss": 0.9881,
+ "step": 1461
+ },
+ {
+ "epoch": 1.4624113031790191,
+ "grad_norm": 0.603817343711853,
+ "learning_rate": 0.00010382516372217215,
+ "loss": 0.9869,
+ "step": 1462
+ },
+ {
+ "epoch": 1.463411584508143,
+ "grad_norm": 0.4486953020095825,
+ "learning_rate": 0.000103720413777168,
+ "loss": 0.8933,
+ "step": 1463
+ },
+ {
+ "epoch": 1.4644118658372667,
+ "grad_norm": 0.5756564736366272,
+ "learning_rate": 0.00010361565974410192,
+ "loss": 0.9974,
+ "step": 1464
+ },
+ {
+ "epoch": 1.4654121471663906,
+ "grad_norm": 0.4386444389820099,
+ "learning_rate": 0.00010351090173807969,
+ "loss": 0.8577,
+ "step": 1465
+ },
+ {
+ "epoch": 1.4664124284955142,
+ "grad_norm": 0.5308933258056641,
+ "learning_rate": 0.00010340613987421137,
+ "loss": 1.0539,
+ "step": 1466
+ },
+ {
+ "epoch": 1.4674127098246381,
+ "grad_norm": 0.6070798635482788,
+ "learning_rate": 0.00010330137426761135,
+ "loss": 0.9111,
+ "step": 1467
+ },
+ {
+ "epoch": 1.468412991153762,
+ "grad_norm": 0.5870214700698853,
+ "learning_rate": 0.00010319660503339808,
+ "loss": 0.9958,
+ "step": 1468
+ },
+ {
+ "epoch": 1.469413272482886,
+ "grad_norm": 0.5014438629150391,
+ "learning_rate": 0.00010309183228669397,
+ "loss": 0.987,
+ "step": 1469
+ },
+ {
+ "epoch": 1.4704135538120096,
+ "grad_norm": 0.47051525115966797,
+ "learning_rate": 0.00010298705614262532,
+ "loss": 1.0899,
+ "step": 1470
+ },
+ {
+ "epoch": 1.4714138351411334,
+ "grad_norm": 0.5500984787940979,
+ "learning_rate": 0.0001028822767163222,
+ "loss": 0.8882,
+ "step": 1471
+ },
+ {
+ "epoch": 1.4724141164702573,
+ "grad_norm": 0.4973205626010895,
+ "learning_rate": 0.00010277749412291824,
+ "loss": 0.9374,
+ "step": 1472
+ },
+ {
+ "epoch": 1.473414397799381,
+ "grad_norm": 0.4927331209182739,
+ "learning_rate": 0.00010267270847755048,
+ "loss": 0.9608,
+ "step": 1473
+ },
+ {
+ "epoch": 1.4744146791285049,
+ "grad_norm": 0.5539640188217163,
+ "learning_rate": 0.00010256791989535952,
+ "loss": 0.9339,
+ "step": 1474
+ },
+ {
+ "epoch": 1.4754149604576288,
+ "grad_norm": 0.48375800251960754,
+ "learning_rate": 0.00010246312849148899,
+ "loss": 0.8778,
+ "step": 1475
+ },
+ {
+ "epoch": 1.4764152417867527,
+ "grad_norm": 0.522544264793396,
+ "learning_rate": 0.00010235833438108571,
+ "loss": 0.9633,
+ "step": 1476
+ },
+ {
+ "epoch": 1.4774155231158763,
+ "grad_norm": 0.5747688412666321,
+ "learning_rate": 0.00010225353767929944,
+ "loss": 1.0206,
+ "step": 1477
+ },
+ {
+ "epoch": 1.4784158044450002,
+ "grad_norm": 0.4539598226547241,
+ "learning_rate": 0.00010214873850128282,
+ "loss": 0.7895,
+ "step": 1478
+ },
+ {
+ "epoch": 1.4794160857741239,
+ "grad_norm": 0.4290696978569031,
+ "learning_rate": 0.00010204393696219117,
+ "loss": 0.8718,
+ "step": 1479
+ },
+ {
+ "epoch": 1.4804163671032478,
+ "grad_norm": 0.43560928106307983,
+ "learning_rate": 0.00010193913317718244,
+ "loss": 0.8839,
+ "step": 1480
+ },
+ {
+ "epoch": 1.4814166484323716,
+ "grad_norm": 0.4937680661678314,
+ "learning_rate": 0.00010183432726141706,
+ "loss": 0.9615,
+ "step": 1481
+ },
+ {
+ "epoch": 1.4824169297614955,
+ "grad_norm": 0.5631589889526367,
+ "learning_rate": 0.00010172951933005775,
+ "loss": 1.0691,
+ "step": 1482
+ },
+ {
+ "epoch": 1.4834172110906192,
+ "grad_norm": 0.5049973726272583,
+ "learning_rate": 0.00010162470949826948,
+ "loss": 0.9107,
+ "step": 1483
+ },
+ {
+ "epoch": 1.484417492419743,
+ "grad_norm": 0.5362145304679871,
+ "learning_rate": 0.0001015198978812193,
+ "loss": 0.9762,
+ "step": 1484
+ },
+ {
+ "epoch": 1.4854177737488667,
+ "grad_norm": 0.4824192225933075,
+ "learning_rate": 0.00010141508459407623,
+ "loss": 0.8844,
+ "step": 1485
+ },
+ {
+ "epoch": 1.4864180550779906,
+ "grad_norm": 0.5116665959358215,
+ "learning_rate": 0.0001013102697520111,
+ "loss": 0.9461,
+ "step": 1486
+ },
+ {
+ "epoch": 1.4874183364071145,
+ "grad_norm": 0.5244630575180054,
+ "learning_rate": 0.00010120545347019647,
+ "loss": 1.0286,
+ "step": 1487
+ },
+ {
+ "epoch": 1.4884186177362384,
+ "grad_norm": 0.5252584218978882,
+ "learning_rate": 0.00010110063586380646,
+ "loss": 1.1083,
+ "step": 1488
+ },
+ {
+ "epoch": 1.489418899065362,
+ "grad_norm": 0.4909230172634125,
+ "learning_rate": 0.00010099581704801673,
+ "loss": 0.9338,
+ "step": 1489
+ },
+ {
+ "epoch": 1.490419180394486,
+ "grad_norm": 0.5618056654930115,
+ "learning_rate": 0.00010089099713800414,
+ "loss": 1.0513,
+ "step": 1490
+ },
+ {
+ "epoch": 1.4914194617236098,
+ "grad_norm": 0.48737892508506775,
+ "learning_rate": 0.00010078617624894684,
+ "loss": 0.8669,
+ "step": 1491
+ },
+ {
+ "epoch": 1.4924197430527335,
+ "grad_norm": 0.411451131105423,
+ "learning_rate": 0.000100681354496024,
+ "loss": 0.881,
+ "step": 1492
+ },
+ {
+ "epoch": 1.4934200243818574,
+ "grad_norm": 0.5821709632873535,
+ "learning_rate": 0.00010057653199441581,
+ "loss": 0.9359,
+ "step": 1493
+ },
+ {
+ "epoch": 1.4944203057109813,
+ "grad_norm": 0.4621860086917877,
+ "learning_rate": 0.00010047170885930324,
+ "loss": 0.8121,
+ "step": 1494
+ },
+ {
+ "epoch": 1.4954205870401052,
+ "grad_norm": 0.4658668339252472,
+ "learning_rate": 0.00010036688520586788,
+ "loss": 0.9806,
+ "step": 1495
+ },
+ {
+ "epoch": 1.4964208683692288,
+ "grad_norm": 0.49816030263900757,
+ "learning_rate": 0.00010026206114929209,
+ "loss": 0.9124,
+ "step": 1496
+ },
+ {
+ "epoch": 1.4974211496983527,
+ "grad_norm": 0.5228123068809509,
+ "learning_rate": 0.00010015723680475846,
+ "loss": 1.0132,
+ "step": 1497
+ },
+ {
+ "epoch": 1.4984214310274764,
+ "grad_norm": 0.4727514982223511,
+ "learning_rate": 0.00010005241228745004,
+ "loss": 0.8418,
+ "step": 1498
+ },
+ {
+ "epoch": 1.4994217123566003,
+ "grad_norm": 0.528904914855957,
+ "learning_rate": 9.994758771254997e-05,
+ "loss": 0.9702,
+ "step": 1499
+ },
+ {
+ "epoch": 1.5004219936857242,
+ "grad_norm": 0.5090524554252625,
+ "learning_rate": 9.984276319524154e-05,
+ "loss": 0.9927,
+ "step": 1500
+ }
+ ],
+ "logging_steps": 1,
+ "max_steps": 2997,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 3,
+ "save_steps": 500,
+ "stateful_callbacks": {
+ "TrainerControl": {
+ "args": {
+ "should_epoch_stop": false,
+ "should_evaluate": false,
+ "should_log": false,
+ "should_save": true,
+ "should_training_stop": false
+ },
+ "attributes": {}
+ }
+ },
+ "total_flos": 9.717711466227302e+16,
+ "train_batch_size": 2,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/checkpoint-1500/training_args.bin b/checkpoint-1500/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..2f5db54c03213459099040d14f85829b6aeb0666
--- /dev/null
+++ b/checkpoint-1500/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7cb43d78443117126e44061cb7a0c1f9a5c40f27f7bf1d5cd0232587a4334407
+size 5304
diff --git a/checkpoint-2000/config.json b/checkpoint-2000/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..06ec1df58f28234ccce22a5325e108ece94f0078
--- /dev/null
+++ b/checkpoint-2000/config.json
@@ -0,0 +1,34 @@
+{
+ "_name_or_path": "facebook/nllb-200-3.3B",
+ "activation_dropout": 0.0,
+ "activation_function": "relu",
+ "architectures": [
+ "M2M100ForConditionalGeneration"
+ ],
+ "attention_dropout": 0.1,
+ "bos_token_id": 0,
+ "d_model": 2048,
+ "decoder_attention_heads": 16,
+ "decoder_ffn_dim": 8192,
+ "decoder_layerdrop": 0,
+ "decoder_layers": 24,
+ "decoder_start_token_id": 2,
+ "dropout": 0.1,
+ "encoder_attention_heads": 16,
+ "encoder_ffn_dim": 8192,
+ "encoder_layerdrop": 0,
+ "encoder_layers": 24,
+ "eos_token_id": 2,
+ "init_std": 0.02,
+ "is_encoder_decoder": true,
+ "max_length": 200,
+ "max_position_embeddings": 1024,
+ "model_type": "m2m_100",
+ "num_hidden_layers": 24,
+ "pad_token_id": 1,
+ "scale_embedding": true,
+ "torch_dtype": "float32",
+ "transformers_version": "4.43.2",
+ "use_cache": true,
+ "vocab_size": 256206
+}
diff --git a/checkpoint-2000/generation_config.json b/checkpoint-2000/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..402a1a43d1af8c080466b8139184b4e5b7f3f47c
--- /dev/null
+++ b/checkpoint-2000/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 0,
+ "decoder_start_token_id": 2,
+ "eos_token_id": 2,
+ "max_length": 200,
+ "pad_token_id": 1,
+ "transformers_version": "4.43.2"
+}
diff --git a/checkpoint-2000/model-00001-of-00003.safetensors b/checkpoint-2000/model-00001-of-00003.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..f73571b868bc5c799749fb358215e221353af8f5
--- /dev/null
+++ b/checkpoint-2000/model-00001-of-00003.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a625f6f3b2f8893f71aa6dcd7e0fc4340c09252efc0e740fd48d0eaffcb816bd
+size 4986088344
diff --git a/checkpoint-2000/model-00002-of-00003.safetensors b/checkpoint-2000/model-00002-of-00003.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..3f0d8744627dbbbe8b8f16cf1c27d5513d464114
--- /dev/null
+++ b/checkpoint-2000/model-00002-of-00003.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:557b61c559358454ed196c640f4a0d45ba06cc053a20ee05c2877bf9ecd06465
+size 4985688360
diff --git a/checkpoint-2000/model-00003-of-00003.safetensors b/checkpoint-2000/model-00003-of-00003.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..61c406a3ad8fefab98e758846a91666ff45fbc56
--- /dev/null
+++ b/checkpoint-2000/model-00003-of-00003.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f4659500777218e1b14f90ce3e76468cfe931135e6e090800e202803b138961b
+size 3407796744
diff --git a/checkpoint-2000/model.safetensors.index.json b/checkpoint-2000/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..66f5db79b23230955de24502c00adc6525edbdfc
--- /dev/null
+++ b/checkpoint-2000/model.safetensors.index.json
@@ -0,0 +1,1020 @@
+{
+ "metadata": {
+ "total_size": 13379452928
+ },
+ "weight_map": {
+ "model.decoder.layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.0.encoder_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.encoder_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.encoder_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.encoder_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.encoder_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.encoder_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.encoder_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.encoder_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.encoder_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.encoder_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.encoder_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.encoder_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.encoder_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.encoder_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.encoder_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.encoder_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.encoder_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.encoder_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.encoder_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.encoder_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.encoder_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.encoder_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.encoder_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.encoder_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.encoder_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.encoder_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.encoder_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.encoder_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.encoder_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.encoder_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.11.encoder_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.11.encoder_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.11.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.11.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.11.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.11.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.11.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.11.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.11.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.11.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.11.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.11.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.12.encoder_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.encoder_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.self_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.self_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.self_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.self_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.encoder_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.encoder_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.self_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.self_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.self_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.self_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.encoder_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.encoder_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.self_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.self_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.self_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.self_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.encoder_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.encoder_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.self_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.self_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.self_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.self_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.encoder_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.encoder_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.self_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.self_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.self_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.self_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.encoder_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.encoder_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.self_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.self_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.self_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.self_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.encoder_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.encoder_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.self_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.self_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.self_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.self_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.encoder_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.encoder_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.self_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.self_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.self_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.self_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.2.encoder_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.encoder_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.encoder_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.encoder_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.encoder_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.encoder_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.encoder_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.encoder_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.encoder_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.encoder_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.20.encoder_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.encoder_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.self_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.self_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.self_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.self_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.encoder_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.encoder_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.self_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.self_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.self_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.self_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.encoder_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.encoder_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.self_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.self_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.self_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.self_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.encoder_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.encoder_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.self_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.self_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.self_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.self_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.3.encoder_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.encoder_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.encoder_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.encoder_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.encoder_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.encoder_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.encoder_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.encoder_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.encoder_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.encoder_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.encoder_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.encoder_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.encoder_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.encoder_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.encoder_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.encoder_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.encoder_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.encoder_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.encoder_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.encoder_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.encoder_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.encoder_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.encoder_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.encoder_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.encoder_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.encoder_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.encoder_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.encoder_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.encoder_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.encoder_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.encoder_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.encoder_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.encoder_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.encoder_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.encoder_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.encoder_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.encoder_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.encoder_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.encoder_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.encoder_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.encoder_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.encoder_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.encoder_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.encoder_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.encoder_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.encoder_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.encoder_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.encoder_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.encoder_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.encoder_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.encoder_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.encoder_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.encoder_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.encoder_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.encoder_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.encoder_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.encoder_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.encoder_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.encoder_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.encoder_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.encoder_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.encoder_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.encoder_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.encoder_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.encoder_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.encoder_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.encoder_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.encoder_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.encoder_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.encoder_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.0.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.14.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.14.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.14.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.14.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.14.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.14.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.14.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.14.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.14.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.14.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.14.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.14.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.14.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.14.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.14.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.14.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.15.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.2.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.20.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.3.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.shared.weight": "model-00001-of-00003.safetensors"
+ }
+}
diff --git a/checkpoint-2000/optimizer.pt b/checkpoint-2000/optimizer.pt
new file mode 100644
index 0000000000000000000000000000000000000000..6cf67d27758e3acc5a8495fda715feb0d6c46bf3
--- /dev/null
+++ b/checkpoint-2000/optimizer.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ef84b1ed891b67ba55a1f977bf286cc8244f02aa7867412cce968465ff2e7155
+size 16695613
diff --git a/checkpoint-2000/rng_state.pth b/checkpoint-2000/rng_state.pth
new file mode 100644
index 0000000000000000000000000000000000000000..b37b073b0cb77aaab8f561d7243e9e4f6701b01c
--- /dev/null
+++ b/checkpoint-2000/rng_state.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:21899bd22f2a224a53f41656db06af1ff9e25bab84684440b6e87e56ee498088
+size 14244
diff --git a/checkpoint-2000/scheduler.pt b/checkpoint-2000/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..b51f382f38c14543c5d9f760498ad98ccd2a5791
--- /dev/null
+++ b/checkpoint-2000/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:af2c02e1aee6f43146c60a10904b177fae4453e7ff3b69f22b17189b83f41a87
+size 1064
diff --git a/checkpoint-2000/sentencepiece.bpe.model b/checkpoint-2000/sentencepiece.bpe.model
new file mode 100644
index 0000000000000000000000000000000000000000..dc2262d3e1d375b235eb71c24119c8e73f85d4ad
--- /dev/null
+++ b/checkpoint-2000/sentencepiece.bpe.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:14bb8dfb35c0ffdea7bc01e56cea38b9e3d5efcdcb9c251d6b40538e1aab555a
+size 4852054
diff --git a/checkpoint-2000/special_tokens_map.json b/checkpoint-2000/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..770c6f4e25faf27bbc3878b806f2ecfb88c5169e
--- /dev/null
+++ b/checkpoint-2000/special_tokens_map.json
@@ -0,0 +1,255 @@
+{
+ "additional_special_tokens": [
+ "ace_Arab",
+ "ace_Latn",
+ "acm_Arab",
+ "acq_Arab",
+ "aeb_Arab",
+ "afr_Latn",
+ "ajp_Arab",
+ "aka_Latn",
+ "amh_Ethi",
+ "apc_Arab",
+ "arb_Arab",
+ "ars_Arab",
+ "ary_Arab",
+ "arz_Arab",
+ "asm_Beng",
+ "ast_Latn",
+ "awa_Deva",
+ "ayr_Latn",
+ "azb_Arab",
+ "azj_Latn",
+ "bak_Cyrl",
+ "bam_Latn",
+ "ban_Latn",
+ "bel_Cyrl",
+ "bem_Latn",
+ "ben_Beng",
+ "bho_Deva",
+ "bjn_Arab",
+ "bjn_Latn",
+ "bod_Tibt",
+ "bos_Latn",
+ "bug_Latn",
+ "bul_Cyrl",
+ "cat_Latn",
+ "ceb_Latn",
+ "ces_Latn",
+ "cjk_Latn",
+ "ckb_Arab",
+ "crh_Latn",
+ "cym_Latn",
+ "dan_Latn",
+ "deu_Latn",
+ "dik_Latn",
+ "dyu_Latn",
+ "dzo_Tibt",
+ "ell_Grek",
+ "eng_Latn",
+ "epo_Latn",
+ "est_Latn",
+ "eus_Latn",
+ "ewe_Latn",
+ "fao_Latn",
+ "pes_Arab",
+ "fij_Latn",
+ "fin_Latn",
+ "fon_Latn",
+ "fra_Latn",
+ "fur_Latn",
+ "fuv_Latn",
+ "gla_Latn",
+ "gle_Latn",
+ "glg_Latn",
+ "grn_Latn",
+ "guj_Gujr",
+ "hat_Latn",
+ "hau_Latn",
+ "heb_Hebr",
+ "hin_Deva",
+ "hne_Deva",
+ "hrv_Latn",
+ "hun_Latn",
+ "hye_Armn",
+ "ibo_Latn",
+ "ilo_Latn",
+ "ind_Latn",
+ "isl_Latn",
+ "ita_Latn",
+ "jav_Latn",
+ "jpn_Jpan",
+ "kab_Latn",
+ "kac_Latn",
+ "kam_Latn",
+ "kan_Knda",
+ "kas_Arab",
+ "kas_Deva",
+ "kat_Geor",
+ "knc_Arab",
+ "knc_Latn",
+ "kaz_Cyrl",
+ "kbp_Latn",
+ "kea_Latn",
+ "khm_Khmr",
+ "kik_Latn",
+ "kin_Latn",
+ "kir_Cyrl",
+ "kmb_Latn",
+ "kon_Latn",
+ "kor_Hang",
+ "kmr_Latn",
+ "lao_Laoo",
+ "lvs_Latn",
+ "lij_Latn",
+ "lim_Latn",
+ "lin_Latn",
+ "lit_Latn",
+ "lmo_Latn",
+ "ltg_Latn",
+ "ltz_Latn",
+ "lua_Latn",
+ "lug_Latn",
+ "luo_Latn",
+ "lus_Latn",
+ "mag_Deva",
+ "mai_Deva",
+ "mal_Mlym",
+ "mar_Deva",
+ "min_Latn",
+ "mkd_Cyrl",
+ "plt_Latn",
+ "mlt_Latn",
+ "mni_Beng",
+ "khk_Cyrl",
+ "mos_Latn",
+ "mri_Latn",
+ "zsm_Latn",
+ "mya_Mymr",
+ "nld_Latn",
+ "nno_Latn",
+ "nob_Latn",
+ "npi_Deva",
+ "nso_Latn",
+ "nus_Latn",
+ "nya_Latn",
+ "oci_Latn",
+ "gaz_Latn",
+ "ory_Orya",
+ "pag_Latn",
+ "pan_Guru",
+ "pap_Latn",
+ "pol_Latn",
+ "por_Latn",
+ "prs_Arab",
+ "pbt_Arab",
+ "quy_Latn",
+ "ron_Latn",
+ "run_Latn",
+ "rus_Cyrl",
+ "sag_Latn",
+ "san_Deva",
+ "sat_Beng",
+ "scn_Latn",
+ "shn_Mymr",
+ "sin_Sinh",
+ "slk_Latn",
+ "slv_Latn",
+ "smo_Latn",
+ "sna_Latn",
+ "snd_Arab",
+ "som_Latn",
+ "sot_Latn",
+ "spa_Latn",
+ "als_Latn",
+ "srd_Latn",
+ "srp_Cyrl",
+ "ssw_Latn",
+ "sun_Latn",
+ "swe_Latn",
+ "swh_Latn",
+ "szl_Latn",
+ "tam_Taml",
+ "tat_Cyrl",
+ "tel_Telu",
+ "tgk_Cyrl",
+ "tgl_Latn",
+ "tha_Thai",
+ "tir_Ethi",
+ "taq_Latn",
+ "taq_Tfng",
+ "tpi_Latn",
+ "tsn_Latn",
+ "tso_Latn",
+ "tuk_Latn",
+ "tum_Latn",
+ "tur_Latn",
+ "twi_Latn",
+ "tzm_Tfng",
+ "uig_Arab",
+ "ukr_Cyrl",
+ "umb_Latn",
+ "urd_Arab",
+ "uzn_Latn",
+ "vec_Latn",
+ "vie_Latn",
+ "war_Latn",
+ "wol_Latn",
+ "xho_Latn",
+ "ydd_Hebr",
+ "yor_Latn",
+ "yue_Hant",
+ "zho_Hans",
+ "zho_Hant",
+ "zul_Latn"
+ ],
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "cls_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "mask_token": {
+ "content": "",
+ "lstrip": true,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "sep_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/checkpoint-2000/tokenizer.json b/checkpoint-2000/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..98050e98b98364c06d83b3f41864076220cb8408
--- /dev/null
+++ b/checkpoint-2000/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3b39b25b0763a1dd69dec54081fafcf10770d9f2538a3bd975a0c4be6d60a9c2
+size 17331294
diff --git a/checkpoint-2000/tokenizer_config.json b/checkpoint-2000/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f1424d3657c008568198b44be241646482e7e9f2
--- /dev/null
+++ b/checkpoint-2000/tokenizer_config.json
@@ -0,0 +1,1878 @@
+{
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "3": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256001": {
+ "content": "ace_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256002": {
+ "content": "ace_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256003": {
+ "content": "acm_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256004": {
+ "content": "acq_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256005": {
+ "content": "aeb_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256006": {
+ "content": "afr_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256007": {
+ "content": "ajp_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256008": {
+ "content": "aka_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256009": {
+ "content": "amh_Ethi",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256010": {
+ "content": "apc_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256011": {
+ "content": "arb_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256012": {
+ "content": "ars_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256013": {
+ "content": "ary_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256014": {
+ "content": "arz_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256015": {
+ "content": "asm_Beng",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256016": {
+ "content": "ast_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256017": {
+ "content": "awa_Deva",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256018": {
+ "content": "ayr_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256019": {
+ "content": "azb_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256020": {
+ "content": "azj_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256021": {
+ "content": "bak_Cyrl",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256022": {
+ "content": "bam_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256023": {
+ "content": "ban_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256024": {
+ "content": "bel_Cyrl",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256025": {
+ "content": "bem_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256026": {
+ "content": "ben_Beng",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256027": {
+ "content": "bho_Deva",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256028": {
+ "content": "bjn_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256029": {
+ "content": "bjn_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256030": {
+ "content": "bod_Tibt",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256031": {
+ "content": "bos_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256032": {
+ "content": "bug_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256033": {
+ "content": "bul_Cyrl",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256034": {
+ "content": "cat_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256035": {
+ "content": "ceb_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256036": {
+ "content": "ces_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256037": {
+ "content": "cjk_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256038": {
+ "content": "ckb_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256039": {
+ "content": "crh_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256040": {
+ "content": "cym_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256041": {
+ "content": "dan_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256042": {
+ "content": "deu_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256043": {
+ "content": "dik_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256044": {
+ "content": "dyu_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256045": {
+ "content": "dzo_Tibt",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256046": {
+ "content": "ell_Grek",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256047": {
+ "content": "eng_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256048": {
+ "content": "epo_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256049": {
+ "content": "est_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256050": {
+ "content": "eus_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256051": {
+ "content": "ewe_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256052": {
+ "content": "fao_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256053": {
+ "content": "pes_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256054": {
+ "content": "fij_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256055": {
+ "content": "fin_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256056": {
+ "content": "fon_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256057": {
+ "content": "fra_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256058": {
+ "content": "fur_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256059": {
+ "content": "fuv_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256060": {
+ "content": "gla_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256061": {
+ "content": "gle_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256062": {
+ "content": "glg_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256063": {
+ "content": "grn_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256064": {
+ "content": "guj_Gujr",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256065": {
+ "content": "hat_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256066": {
+ "content": "hau_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256067": {
+ "content": "heb_Hebr",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256068": {
+ "content": "hin_Deva",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256069": {
+ "content": "hne_Deva",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256070": {
+ "content": "hrv_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256071": {
+ "content": "hun_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256072": {
+ "content": "hye_Armn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256073": {
+ "content": "ibo_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256074": {
+ "content": "ilo_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256075": {
+ "content": "ind_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256076": {
+ "content": "isl_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256077": {
+ "content": "ita_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256078": {
+ "content": "jav_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256079": {
+ "content": "jpn_Jpan",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256080": {
+ "content": "kab_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256081": {
+ "content": "kac_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256082": {
+ "content": "kam_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256083": {
+ "content": "kan_Knda",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256084": {
+ "content": "kas_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256085": {
+ "content": "kas_Deva",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256086": {
+ "content": "kat_Geor",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256087": {
+ "content": "knc_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256088": {
+ "content": "knc_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256089": {
+ "content": "kaz_Cyrl",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256090": {
+ "content": "kbp_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256091": {
+ "content": "kea_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256092": {
+ "content": "khm_Khmr",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256093": {
+ "content": "kik_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256094": {
+ "content": "kin_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256095": {
+ "content": "kir_Cyrl",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256096": {
+ "content": "kmb_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256097": {
+ "content": "kon_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256098": {
+ "content": "kor_Hang",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256099": {
+ "content": "kmr_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256100": {
+ "content": "lao_Laoo",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256101": {
+ "content": "lvs_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256102": {
+ "content": "lij_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256103": {
+ "content": "lim_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256104": {
+ "content": "lin_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256105": {
+ "content": "lit_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256106": {
+ "content": "lmo_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256107": {
+ "content": "ltg_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256108": {
+ "content": "ltz_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256109": {
+ "content": "lua_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256110": {
+ "content": "lug_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256111": {
+ "content": "luo_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256112": {
+ "content": "lus_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256113": {
+ "content": "mag_Deva",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256114": {
+ "content": "mai_Deva",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256115": {
+ "content": "mal_Mlym",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256116": {
+ "content": "mar_Deva",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256117": {
+ "content": "min_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256118": {
+ "content": "mkd_Cyrl",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256119": {
+ "content": "plt_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256120": {
+ "content": "mlt_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256121": {
+ "content": "mni_Beng",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256122": {
+ "content": "khk_Cyrl",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256123": {
+ "content": "mos_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256124": {
+ "content": "mri_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256125": {
+ "content": "zsm_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256126": {
+ "content": "mya_Mymr",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256127": {
+ "content": "nld_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256128": {
+ "content": "nno_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256129": {
+ "content": "nob_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256130": {
+ "content": "npi_Deva",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256131": {
+ "content": "nso_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256132": {
+ "content": "nus_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256133": {
+ "content": "nya_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256134": {
+ "content": "oci_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256135": {
+ "content": "gaz_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256136": {
+ "content": "ory_Orya",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256137": {
+ "content": "pag_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256138": {
+ "content": "pan_Guru",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256139": {
+ "content": "pap_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256140": {
+ "content": "pol_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256141": {
+ "content": "por_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256142": {
+ "content": "prs_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256143": {
+ "content": "pbt_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256144": {
+ "content": "quy_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256145": {
+ "content": "ron_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256146": {
+ "content": "run_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256147": {
+ "content": "rus_Cyrl",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256148": {
+ "content": "sag_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256149": {
+ "content": "san_Deva",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256150": {
+ "content": "sat_Beng",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256151": {
+ "content": "scn_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256152": {
+ "content": "shn_Mymr",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256153": {
+ "content": "sin_Sinh",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256154": {
+ "content": "slk_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256155": {
+ "content": "slv_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256156": {
+ "content": "smo_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256157": {
+ "content": "sna_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256158": {
+ "content": "snd_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256159": {
+ "content": "som_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256160": {
+ "content": "sot_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256161": {
+ "content": "spa_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256162": {
+ "content": "als_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256163": {
+ "content": "srd_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256164": {
+ "content": "srp_Cyrl",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256165": {
+ "content": "ssw_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256166": {
+ "content": "sun_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256167": {
+ "content": "swe_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256168": {
+ "content": "swh_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256169": {
+ "content": "szl_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256170": {
+ "content": "tam_Taml",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256171": {
+ "content": "tat_Cyrl",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256172": {
+ "content": "tel_Telu",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256173": {
+ "content": "tgk_Cyrl",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256174": {
+ "content": "tgl_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256175": {
+ "content": "tha_Thai",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256176": {
+ "content": "tir_Ethi",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256177": {
+ "content": "taq_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256178": {
+ "content": "taq_Tfng",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256179": {
+ "content": "tpi_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256180": {
+ "content": "tsn_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256181": {
+ "content": "tso_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256182": {
+ "content": "tuk_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256183": {
+ "content": "tum_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256184": {
+ "content": "tur_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256185": {
+ "content": "twi_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256186": {
+ "content": "tzm_Tfng",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256187": {
+ "content": "uig_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256188": {
+ "content": "ukr_Cyrl",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256189": {
+ "content": "umb_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256190": {
+ "content": "urd_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256191": {
+ "content": "uzn_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256192": {
+ "content": "vec_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256193": {
+ "content": "vie_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256194": {
+ "content": "war_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256195": {
+ "content": "wol_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256196": {
+ "content": "xho_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256197": {
+ "content": "ydd_Hebr",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256198": {
+ "content": "yor_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256199": {
+ "content": "yue_Hant",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256200": {
+ "content": "zho_Hans",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256201": {
+ "content": "zho_Hant",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256202": {
+ "content": "zul_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256203": {
+ "content": "",
+ "lstrip": true,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "additional_special_tokens": [
+ "ace_Arab",
+ "ace_Latn",
+ "acm_Arab",
+ "acq_Arab",
+ "aeb_Arab",
+ "afr_Latn",
+ "ajp_Arab",
+ "aka_Latn",
+ "amh_Ethi",
+ "apc_Arab",
+ "arb_Arab",
+ "ars_Arab",
+ "ary_Arab",
+ "arz_Arab",
+ "asm_Beng",
+ "ast_Latn",
+ "awa_Deva",
+ "ayr_Latn",
+ "azb_Arab",
+ "azj_Latn",
+ "bak_Cyrl",
+ "bam_Latn",
+ "ban_Latn",
+ "bel_Cyrl",
+ "bem_Latn",
+ "ben_Beng",
+ "bho_Deva",
+ "bjn_Arab",
+ "bjn_Latn",
+ "bod_Tibt",
+ "bos_Latn",
+ "bug_Latn",
+ "bul_Cyrl",
+ "cat_Latn",
+ "ceb_Latn",
+ "ces_Latn",
+ "cjk_Latn",
+ "ckb_Arab",
+ "crh_Latn",
+ "cym_Latn",
+ "dan_Latn",
+ "deu_Latn",
+ "dik_Latn",
+ "dyu_Latn",
+ "dzo_Tibt",
+ "ell_Grek",
+ "eng_Latn",
+ "epo_Latn",
+ "est_Latn",
+ "eus_Latn",
+ "ewe_Latn",
+ "fao_Latn",
+ "pes_Arab",
+ "fij_Latn",
+ "fin_Latn",
+ "fon_Latn",
+ "fra_Latn",
+ "fur_Latn",
+ "fuv_Latn",
+ "gla_Latn",
+ "gle_Latn",
+ "glg_Latn",
+ "grn_Latn",
+ "guj_Gujr",
+ "hat_Latn",
+ "hau_Latn",
+ "heb_Hebr",
+ "hin_Deva",
+ "hne_Deva",
+ "hrv_Latn",
+ "hun_Latn",
+ "hye_Armn",
+ "ibo_Latn",
+ "ilo_Latn",
+ "ind_Latn",
+ "isl_Latn",
+ "ita_Latn",
+ "jav_Latn",
+ "jpn_Jpan",
+ "kab_Latn",
+ "kac_Latn",
+ "kam_Latn",
+ "kan_Knda",
+ "kas_Arab",
+ "kas_Deva",
+ "kat_Geor",
+ "knc_Arab",
+ "knc_Latn",
+ "kaz_Cyrl",
+ "kbp_Latn",
+ "kea_Latn",
+ "khm_Khmr",
+ "kik_Latn",
+ "kin_Latn",
+ "kir_Cyrl",
+ "kmb_Latn",
+ "kon_Latn",
+ "kor_Hang",
+ "kmr_Latn",
+ "lao_Laoo",
+ "lvs_Latn",
+ "lij_Latn",
+ "lim_Latn",
+ "lin_Latn",
+ "lit_Latn",
+ "lmo_Latn",
+ "ltg_Latn",
+ "ltz_Latn",
+ "lua_Latn",
+ "lug_Latn",
+ "luo_Latn",
+ "lus_Latn",
+ "mag_Deva",
+ "mai_Deva",
+ "mal_Mlym",
+ "mar_Deva",
+ "min_Latn",
+ "mkd_Cyrl",
+ "plt_Latn",
+ "mlt_Latn",
+ "mni_Beng",
+ "khk_Cyrl",
+ "mos_Latn",
+ "mri_Latn",
+ "zsm_Latn",
+ "mya_Mymr",
+ "nld_Latn",
+ "nno_Latn",
+ "nob_Latn",
+ "npi_Deva",
+ "nso_Latn",
+ "nus_Latn",
+ "nya_Latn",
+ "oci_Latn",
+ "gaz_Latn",
+ "ory_Orya",
+ "pag_Latn",
+ "pan_Guru",
+ "pap_Latn",
+ "pol_Latn",
+ "por_Latn",
+ "prs_Arab",
+ "pbt_Arab",
+ "quy_Latn",
+ "ron_Latn",
+ "run_Latn",
+ "rus_Cyrl",
+ "sag_Latn",
+ "san_Deva",
+ "sat_Beng",
+ "scn_Latn",
+ "shn_Mymr",
+ "sin_Sinh",
+ "slk_Latn",
+ "slv_Latn",
+ "smo_Latn",
+ "sna_Latn",
+ "snd_Arab",
+ "som_Latn",
+ "sot_Latn",
+ "spa_Latn",
+ "als_Latn",
+ "srd_Latn",
+ "srp_Cyrl",
+ "ssw_Latn",
+ "sun_Latn",
+ "swe_Latn",
+ "swh_Latn",
+ "szl_Latn",
+ "tam_Taml",
+ "tat_Cyrl",
+ "tel_Telu",
+ "tgk_Cyrl",
+ "tgl_Latn",
+ "tha_Thai",
+ "tir_Ethi",
+ "taq_Latn",
+ "taq_Tfng",
+ "tpi_Latn",
+ "tsn_Latn",
+ "tso_Latn",
+ "tuk_Latn",
+ "tum_Latn",
+ "tur_Latn",
+ "twi_Latn",
+ "tzm_Tfng",
+ "uig_Arab",
+ "ukr_Cyrl",
+ "umb_Latn",
+ "urd_Arab",
+ "uzn_Latn",
+ "vec_Latn",
+ "vie_Latn",
+ "war_Latn",
+ "wol_Latn",
+ "xho_Latn",
+ "ydd_Hebr",
+ "yor_Latn",
+ "yue_Hant",
+ "zho_Hans",
+ "zho_Hant",
+ "zul_Latn"
+ ],
+ "bos_token": "",
+ "clean_up_tokenization_spaces": true,
+ "cls_token": "",
+ "eos_token": "",
+ "legacy_behaviour": false,
+ "mask_token": "",
+ "model_max_length": 1024,
+ "pad_token": "",
+ "sep_token": "",
+ "sp_model_kwargs": {},
+ "src_lang": "eng_Latn",
+ "tgt_lang": null,
+ "tokenizer_class": "NllbTokenizer",
+ "unk_token": ""
+}
diff --git a/checkpoint-2000/trainer_state.json b/checkpoint-2000/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..73893d0825da6635b4f2f35bbf05f1c1bd735cc1
--- /dev/null
+++ b/checkpoint-2000/trainer_state.json
@@ -0,0 +1,14033 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 2.0005626582476324,
+ "eval_steps": 500,
+ "global_step": 2000,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.001000281329123816,
+ "grad_norm": 5.902005195617676,
+ "learning_rate": 0.0001999999450590425,
+ "loss": 3.1875,
+ "step": 1
+ },
+ {
+ "epoch": 0.002000562658247632,
+ "grad_norm": 3.2577760219573975,
+ "learning_rate": 0.00019999978023623033,
+ "loss": 2.3666,
+ "step": 2
+ },
+ {
+ "epoch": 0.003000843987371448,
+ "grad_norm": 5.3700995445251465,
+ "learning_rate": 0.0001999995055317446,
+ "loss": 2.8282,
+ "step": 3
+ },
+ {
+ "epoch": 0.004001125316495264,
+ "grad_norm": 2.1445534229278564,
+ "learning_rate": 0.00019999912094588717,
+ "loss": 2.2322,
+ "step": 4
+ },
+ {
+ "epoch": 0.005001406645619081,
+ "grad_norm": 1.5143821239471436,
+ "learning_rate": 0.00019999862647908064,
+ "loss": 2.1709,
+ "step": 5
+ },
+ {
+ "epoch": 0.006001687974742896,
+ "grad_norm": 2.0491714477539062,
+ "learning_rate": 0.00019999802213186834,
+ "loss": 2.2863,
+ "step": 6
+ },
+ {
+ "epoch": 0.007001969303866712,
+ "grad_norm": 1.2016857862472534,
+ "learning_rate": 0.0001999973079049143,
+ "loss": 1.5595,
+ "step": 7
+ },
+ {
+ "epoch": 0.008002250632990529,
+ "grad_norm": 1.3860406875610352,
+ "learning_rate": 0.00019999648379900338,
+ "loss": 1.7264,
+ "step": 8
+ },
+ {
+ "epoch": 0.009002531962114344,
+ "grad_norm": 1.0861930847167969,
+ "learning_rate": 0.0001999955498150411,
+ "loss": 2.0533,
+ "step": 9
+ },
+ {
+ "epoch": 0.010002813291238161,
+ "grad_norm": 2.233243703842163,
+ "learning_rate": 0.00019999450595405374,
+ "loss": 1.9378,
+ "step": 10
+ },
+ {
+ "epoch": 0.011003094620361977,
+ "grad_norm": 1.302808165550232,
+ "learning_rate": 0.0001999933522171883,
+ "loss": 1.9182,
+ "step": 11
+ },
+ {
+ "epoch": 0.012003375949485792,
+ "grad_norm": 0.8285257816314697,
+ "learning_rate": 0.00019999208860571255,
+ "loss": 1.9146,
+ "step": 12
+ },
+ {
+ "epoch": 0.01300365727860961,
+ "grad_norm": 1.2248319387435913,
+ "learning_rate": 0.00019999071512101496,
+ "loss": 1.7467,
+ "step": 13
+ },
+ {
+ "epoch": 0.014003938607733425,
+ "grad_norm": 0.8307135105133057,
+ "learning_rate": 0.00019998923176460474,
+ "loss": 1.6896,
+ "step": 14
+ },
+ {
+ "epoch": 0.01500421993685724,
+ "grad_norm": 1.1531301736831665,
+ "learning_rate": 0.00019998763853811184,
+ "loss": 1.7549,
+ "step": 15
+ },
+ {
+ "epoch": 0.016004501265981057,
+ "grad_norm": 1.0071958303451538,
+ "learning_rate": 0.00019998593544328692,
+ "loss": 1.903,
+ "step": 16
+ },
+ {
+ "epoch": 0.017004782595104875,
+ "grad_norm": 0.9111937284469604,
+ "learning_rate": 0.00019998412248200138,
+ "loss": 1.8372,
+ "step": 17
+ },
+ {
+ "epoch": 0.01800506392422869,
+ "grad_norm": 0.9943836331367493,
+ "learning_rate": 0.00019998219965624734,
+ "loss": 1.7304,
+ "step": 18
+ },
+ {
+ "epoch": 0.019005345253352506,
+ "grad_norm": 0.8139007687568665,
+ "learning_rate": 0.0001999801669681376,
+ "loss": 1.6932,
+ "step": 19
+ },
+ {
+ "epoch": 0.020005626582476323,
+ "grad_norm": 0.7991273999214172,
+ "learning_rate": 0.00019997802441990573,
+ "loss": 1.9596,
+ "step": 20
+ },
+ {
+ "epoch": 0.021005907911600136,
+ "grad_norm": 0.832266628742218,
+ "learning_rate": 0.00019997577201390606,
+ "loss": 1.7116,
+ "step": 21
+ },
+ {
+ "epoch": 0.022006189240723954,
+ "grad_norm": 0.8465655446052551,
+ "learning_rate": 0.00019997340975261353,
+ "loss": 1.7711,
+ "step": 22
+ },
+ {
+ "epoch": 0.02300647056984777,
+ "grad_norm": 1.032426118850708,
+ "learning_rate": 0.00019997093763862383,
+ "loss": 1.6746,
+ "step": 23
+ },
+ {
+ "epoch": 0.024006751898971584,
+ "grad_norm": 1.0036743879318237,
+ "learning_rate": 0.0001999683556746534,
+ "loss": 1.7274,
+ "step": 24
+ },
+ {
+ "epoch": 0.0250070332280954,
+ "grad_norm": 0.9491412043571472,
+ "learning_rate": 0.0001999656638635393,
+ "loss": 2.0302,
+ "step": 25
+ },
+ {
+ "epoch": 0.02600731455721922,
+ "grad_norm": 0.9477822184562683,
+ "learning_rate": 0.0001999628622082394,
+ "loss": 1.6107,
+ "step": 26
+ },
+ {
+ "epoch": 0.027007595886343033,
+ "grad_norm": 1.0687041282653809,
+ "learning_rate": 0.0001999599507118322,
+ "loss": 1.8225,
+ "step": 27
+ },
+ {
+ "epoch": 0.02800787721546685,
+ "grad_norm": 1.6572712659835815,
+ "learning_rate": 0.00019995692937751683,
+ "loss": 1.896,
+ "step": 28
+ },
+ {
+ "epoch": 0.029008158544590667,
+ "grad_norm": 1.013258695602417,
+ "learning_rate": 0.0001999537982086133,
+ "loss": 1.7847,
+ "step": 29
+ },
+ {
+ "epoch": 0.03000843987371448,
+ "grad_norm": 0.7584932446479797,
+ "learning_rate": 0.00019995055720856218,
+ "loss": 1.5841,
+ "step": 30
+ },
+ {
+ "epoch": 0.031008721202838298,
+ "grad_norm": 1.1543537378311157,
+ "learning_rate": 0.00019994720638092468,
+ "loss": 1.8362,
+ "step": 31
+ },
+ {
+ "epoch": 0.032009002531962115,
+ "grad_norm": 0.8389608860015869,
+ "learning_rate": 0.00019994374572938277,
+ "loss": 1.7913,
+ "step": 32
+ },
+ {
+ "epoch": 0.03300928386108593,
+ "grad_norm": 0.7582125663757324,
+ "learning_rate": 0.00019994017525773913,
+ "loss": 1.5406,
+ "step": 33
+ },
+ {
+ "epoch": 0.03400956519020975,
+ "grad_norm": 0.7866935133934021,
+ "learning_rate": 0.00019993649496991705,
+ "loss": 1.5363,
+ "step": 34
+ },
+ {
+ "epoch": 0.03500984651933356,
+ "grad_norm": 0.8007768988609314,
+ "learning_rate": 0.00019993270486996046,
+ "loss": 1.7597,
+ "step": 35
+ },
+ {
+ "epoch": 0.03601012784845738,
+ "grad_norm": 0.8109031319618225,
+ "learning_rate": 0.000199928804962034,
+ "loss": 1.5554,
+ "step": 36
+ },
+ {
+ "epoch": 0.037010409177581194,
+ "grad_norm": 0.7722628116607666,
+ "learning_rate": 0.00019992479525042303,
+ "loss": 1.6437,
+ "step": 37
+ },
+ {
+ "epoch": 0.03801069050670501,
+ "grad_norm": 0.7336480021476746,
+ "learning_rate": 0.00019992067573953342,
+ "loss": 1.7276,
+ "step": 38
+ },
+ {
+ "epoch": 0.03901097183582883,
+ "grad_norm": 0.6940280795097351,
+ "learning_rate": 0.0001999164464338918,
+ "loss": 1.846,
+ "step": 39
+ },
+ {
+ "epoch": 0.040011253164952645,
+ "grad_norm": 0.7079702615737915,
+ "learning_rate": 0.0001999121073381454,
+ "loss": 1.7017,
+ "step": 40
+ },
+ {
+ "epoch": 0.041011534494076456,
+ "grad_norm": 0.7438498139381409,
+ "learning_rate": 0.0001999076584570621,
+ "loss": 1.665,
+ "step": 41
+ },
+ {
+ "epoch": 0.04201181582320027,
+ "grad_norm": 0.6951525211334229,
+ "learning_rate": 0.00019990309979553045,
+ "loss": 1.588,
+ "step": 42
+ },
+ {
+ "epoch": 0.04301209715232409,
+ "grad_norm": 0.9398604035377502,
+ "learning_rate": 0.00019989843135855958,
+ "loss": 1.6513,
+ "step": 43
+ },
+ {
+ "epoch": 0.04401237848144791,
+ "grad_norm": 0.7384347319602966,
+ "learning_rate": 0.00019989365315127922,
+ "loss": 1.5975,
+ "step": 44
+ },
+ {
+ "epoch": 0.045012659810571724,
+ "grad_norm": 0.9856846332550049,
+ "learning_rate": 0.0001998887651789398,
+ "loss": 1.644,
+ "step": 45
+ },
+ {
+ "epoch": 0.04601294113969554,
+ "grad_norm": 0.7322820425033569,
+ "learning_rate": 0.0001998837674469123,
+ "loss": 1.5207,
+ "step": 46
+ },
+ {
+ "epoch": 0.04701322246881936,
+ "grad_norm": 0.8695257902145386,
+ "learning_rate": 0.00019987865996068833,
+ "loss": 1.5572,
+ "step": 47
+ },
+ {
+ "epoch": 0.04801350379794317,
+ "grad_norm": 0.7231017351150513,
+ "learning_rate": 0.00019987344272588006,
+ "loss": 1.5841,
+ "step": 48
+ },
+ {
+ "epoch": 0.049013785127066986,
+ "grad_norm": 0.7147384285926819,
+ "learning_rate": 0.00019986811574822033,
+ "loss": 1.8628,
+ "step": 49
+ },
+ {
+ "epoch": 0.0500140664561908,
+ "grad_norm": 0.8631477355957031,
+ "learning_rate": 0.00019986267903356254,
+ "loss": 1.8487,
+ "step": 50
+ },
+ {
+ "epoch": 0.05101434778531462,
+ "grad_norm": 0.7995486855506897,
+ "learning_rate": 0.0001998571325878806,
+ "loss": 1.6491,
+ "step": 51
+ },
+ {
+ "epoch": 0.05201462911443844,
+ "grad_norm": 0.7828657031059265,
+ "learning_rate": 0.0001998514764172691,
+ "loss": 1.7496,
+ "step": 52
+ },
+ {
+ "epoch": 0.053014910443562255,
+ "grad_norm": 0.7789833545684814,
+ "learning_rate": 0.00019984571052794313,
+ "loss": 1.6628,
+ "step": 53
+ },
+ {
+ "epoch": 0.054015191772686065,
+ "grad_norm": 0.7077661752700806,
+ "learning_rate": 0.00019983983492623833,
+ "loss": 1.771,
+ "step": 54
+ },
+ {
+ "epoch": 0.05501547310180988,
+ "grad_norm": 0.7939582467079163,
+ "learning_rate": 0.00019983384961861096,
+ "loss": 1.707,
+ "step": 55
+ },
+ {
+ "epoch": 0.0560157544309337,
+ "grad_norm": 0.9438828229904175,
+ "learning_rate": 0.0001998277546116378,
+ "loss": 1.8334,
+ "step": 56
+ },
+ {
+ "epoch": 0.05701603576005752,
+ "grad_norm": 0.8028286695480347,
+ "learning_rate": 0.00019982154991201608,
+ "loss": 1.9117,
+ "step": 57
+ },
+ {
+ "epoch": 0.058016317089181334,
+ "grad_norm": 0.6563037037849426,
+ "learning_rate": 0.00019981523552656377,
+ "loss": 1.4767,
+ "step": 58
+ },
+ {
+ "epoch": 0.05901659841830515,
+ "grad_norm": 0.6600964665412903,
+ "learning_rate": 0.00019980881146221914,
+ "loss": 1.6656,
+ "step": 59
+ },
+ {
+ "epoch": 0.06001687974742896,
+ "grad_norm": 0.7966578602790833,
+ "learning_rate": 0.00019980227772604112,
+ "loss": 1.4844,
+ "step": 60
+ },
+ {
+ "epoch": 0.06101716107655278,
+ "grad_norm": 0.8019976615905762,
+ "learning_rate": 0.0001997956343252091,
+ "loss": 1.5682,
+ "step": 61
+ },
+ {
+ "epoch": 0.062017442405676595,
+ "grad_norm": 0.8935349583625793,
+ "learning_rate": 0.00019978888126702296,
+ "loss": 1.8131,
+ "step": 62
+ },
+ {
+ "epoch": 0.06301772373480041,
+ "grad_norm": 0.8085179924964905,
+ "learning_rate": 0.00019978201855890308,
+ "loss": 1.5602,
+ "step": 63
+ },
+ {
+ "epoch": 0.06401800506392423,
+ "grad_norm": 0.7631951570510864,
+ "learning_rate": 0.00019977504620839035,
+ "loss": 1.8008,
+ "step": 64
+ },
+ {
+ "epoch": 0.06501828639304805,
+ "grad_norm": 0.7315165996551514,
+ "learning_rate": 0.00019976796422314615,
+ "loss": 1.5735,
+ "step": 65
+ },
+ {
+ "epoch": 0.06601856772217186,
+ "grad_norm": 0.745726466178894,
+ "learning_rate": 0.00019976077261095226,
+ "loss": 1.5775,
+ "step": 66
+ },
+ {
+ "epoch": 0.06701884905129568,
+ "grad_norm": 0.9082249999046326,
+ "learning_rate": 0.00019975347137971098,
+ "loss": 1.7427,
+ "step": 67
+ },
+ {
+ "epoch": 0.0680191303804195,
+ "grad_norm": 0.6575669050216675,
+ "learning_rate": 0.00019974606053744503,
+ "loss": 1.5231,
+ "step": 68
+ },
+ {
+ "epoch": 0.06901941170954332,
+ "grad_norm": 0.7749233245849609,
+ "learning_rate": 0.00019973854009229763,
+ "loss": 1.5703,
+ "step": 69
+ },
+ {
+ "epoch": 0.07001969303866712,
+ "grad_norm": 0.7240824699401855,
+ "learning_rate": 0.00019973091005253232,
+ "loss": 1.5197,
+ "step": 70
+ },
+ {
+ "epoch": 0.07101997436779094,
+ "grad_norm": 0.8683856725692749,
+ "learning_rate": 0.0001997231704265332,
+ "loss": 1.6183,
+ "step": 71
+ },
+ {
+ "epoch": 0.07202025569691475,
+ "grad_norm": 0.6885640621185303,
+ "learning_rate": 0.00019971532122280464,
+ "loss": 1.6565,
+ "step": 72
+ },
+ {
+ "epoch": 0.07302053702603857,
+ "grad_norm": 0.6648329496383667,
+ "learning_rate": 0.0001997073624499716,
+ "loss": 1.5943,
+ "step": 73
+ },
+ {
+ "epoch": 0.07402081835516239,
+ "grad_norm": 0.8867416977882385,
+ "learning_rate": 0.0001996992941167792,
+ "loss": 1.7855,
+ "step": 74
+ },
+ {
+ "epoch": 0.0750210996842862,
+ "grad_norm": 0.7790491580963135,
+ "learning_rate": 0.00019969111623209323,
+ "loss": 1.6723,
+ "step": 75
+ },
+ {
+ "epoch": 0.07602138101341002,
+ "grad_norm": 0.7999201416969299,
+ "learning_rate": 0.00019968282880489957,
+ "loss": 1.5619,
+ "step": 76
+ },
+ {
+ "epoch": 0.07702166234253384,
+ "grad_norm": 0.6316407322883606,
+ "learning_rate": 0.00019967443184430467,
+ "loss": 1.6377,
+ "step": 77
+ },
+ {
+ "epoch": 0.07802194367165766,
+ "grad_norm": 0.7680445313453674,
+ "learning_rate": 0.0001996659253595353,
+ "loss": 1.5433,
+ "step": 78
+ },
+ {
+ "epoch": 0.07902222500078147,
+ "grad_norm": 0.7158446907997131,
+ "learning_rate": 0.0001996573093599385,
+ "loss": 1.5436,
+ "step": 79
+ },
+ {
+ "epoch": 0.08002250632990529,
+ "grad_norm": 0.7354825139045715,
+ "learning_rate": 0.00019964858385498172,
+ "loss": 1.6512,
+ "step": 80
+ },
+ {
+ "epoch": 0.08102278765902911,
+ "grad_norm": 0.7031210660934448,
+ "learning_rate": 0.00019963974885425266,
+ "loss": 1.6411,
+ "step": 81
+ },
+ {
+ "epoch": 0.08202306898815291,
+ "grad_norm": 0.8451045751571655,
+ "learning_rate": 0.00019963080436745945,
+ "loss": 1.6622,
+ "step": 82
+ },
+ {
+ "epoch": 0.08302335031727673,
+ "grad_norm": 0.8329521417617798,
+ "learning_rate": 0.00019962175040443044,
+ "loss": 1.9269,
+ "step": 83
+ },
+ {
+ "epoch": 0.08402363164640055,
+ "grad_norm": 0.6967645883560181,
+ "learning_rate": 0.0001996125869751143,
+ "loss": 1.7243,
+ "step": 84
+ },
+ {
+ "epoch": 0.08502391297552436,
+ "grad_norm": 0.8699042797088623,
+ "learning_rate": 0.00019960331408957997,
+ "loss": 1.7211,
+ "step": 85
+ },
+ {
+ "epoch": 0.08602419430464818,
+ "grad_norm": 0.6780512928962708,
+ "learning_rate": 0.00019959393175801671,
+ "loss": 1.6376,
+ "step": 86
+ },
+ {
+ "epoch": 0.087024475633772,
+ "grad_norm": 0.7213720679283142,
+ "learning_rate": 0.00019958443999073397,
+ "loss": 1.6048,
+ "step": 87
+ },
+ {
+ "epoch": 0.08802475696289581,
+ "grad_norm": 0.6077585816383362,
+ "learning_rate": 0.00019957483879816151,
+ "loss": 1.5231,
+ "step": 88
+ },
+ {
+ "epoch": 0.08902503829201963,
+ "grad_norm": 0.6854611039161682,
+ "learning_rate": 0.00019956512819084928,
+ "loss": 1.4726,
+ "step": 89
+ },
+ {
+ "epoch": 0.09002531962114345,
+ "grad_norm": 0.6969390511512756,
+ "learning_rate": 0.00019955530817946748,
+ "loss": 1.6435,
+ "step": 90
+ },
+ {
+ "epoch": 0.09102560095026727,
+ "grad_norm": 0.7178792953491211,
+ "learning_rate": 0.00019954537877480655,
+ "loss": 1.6967,
+ "step": 91
+ },
+ {
+ "epoch": 0.09202588227939108,
+ "grad_norm": 0.8248458504676819,
+ "learning_rate": 0.00019953533998777706,
+ "loss": 1.5884,
+ "step": 92
+ },
+ {
+ "epoch": 0.0930261636085149,
+ "grad_norm": 0.6472075581550598,
+ "learning_rate": 0.00019952519182940993,
+ "loss": 1.5074,
+ "step": 93
+ },
+ {
+ "epoch": 0.09402644493763872,
+ "grad_norm": 0.7548672556877136,
+ "learning_rate": 0.00019951493431085603,
+ "loss": 1.6934,
+ "step": 94
+ },
+ {
+ "epoch": 0.09502672626676252,
+ "grad_norm": 0.6680666208267212,
+ "learning_rate": 0.00019950456744338658,
+ "loss": 1.4014,
+ "step": 95
+ },
+ {
+ "epoch": 0.09602700759588634,
+ "grad_norm": 0.7270862460136414,
+ "learning_rate": 0.00019949409123839288,
+ "loss": 1.6991,
+ "step": 96
+ },
+ {
+ "epoch": 0.09702728892501016,
+ "grad_norm": 0.682833731174469,
+ "learning_rate": 0.00019948350570738642,
+ "loss": 1.4926,
+ "step": 97
+ },
+ {
+ "epoch": 0.09802757025413397,
+ "grad_norm": 0.6598315238952637,
+ "learning_rate": 0.0001994728108619987,
+ "loss": 1.6329,
+ "step": 98
+ },
+ {
+ "epoch": 0.09902785158325779,
+ "grad_norm": 0.830845832824707,
+ "learning_rate": 0.0001994620067139815,
+ "loss": 1.8517,
+ "step": 99
+ },
+ {
+ "epoch": 0.1000281329123816,
+ "grad_norm": 0.6964694857597351,
+ "learning_rate": 0.00019945109327520658,
+ "loss": 1.5459,
+ "step": 100
+ },
+ {
+ "epoch": 0.10102841424150542,
+ "grad_norm": 0.8640177249908447,
+ "learning_rate": 0.00019944007055766586,
+ "loss": 1.6638,
+ "step": 101
+ },
+ {
+ "epoch": 0.10202869557062924,
+ "grad_norm": 0.6485210657119751,
+ "learning_rate": 0.00019942893857347128,
+ "loss": 1.8025,
+ "step": 102
+ },
+ {
+ "epoch": 0.10302897689975306,
+ "grad_norm": 0.6746248006820679,
+ "learning_rate": 0.00019941769733485494,
+ "loss": 1.6954,
+ "step": 103
+ },
+ {
+ "epoch": 0.10402925822887688,
+ "grad_norm": 0.7386549115180969,
+ "learning_rate": 0.00019940634685416888,
+ "loss": 1.4547,
+ "step": 104
+ },
+ {
+ "epoch": 0.10502953955800069,
+ "grad_norm": 0.7518633008003235,
+ "learning_rate": 0.00019939488714388524,
+ "loss": 1.5098,
+ "step": 105
+ },
+ {
+ "epoch": 0.10602982088712451,
+ "grad_norm": 0.7350422739982605,
+ "learning_rate": 0.00019938331821659614,
+ "loss": 1.5452,
+ "step": 106
+ },
+ {
+ "epoch": 0.10703010221624833,
+ "grad_norm": 0.6544668674468994,
+ "learning_rate": 0.0001993716400850138,
+ "loss": 1.5106,
+ "step": 107
+ },
+ {
+ "epoch": 0.10803038354537213,
+ "grad_norm": 0.6108564138412476,
+ "learning_rate": 0.0001993598527619703,
+ "loss": 1.5818,
+ "step": 108
+ },
+ {
+ "epoch": 0.10903066487449595,
+ "grad_norm": 0.731071949005127,
+ "learning_rate": 0.00019934795626041783,
+ "loss": 1.4819,
+ "step": 109
+ },
+ {
+ "epoch": 0.11003094620361976,
+ "grad_norm": 0.5978986620903015,
+ "learning_rate": 0.0001993359505934285,
+ "loss": 1.5469,
+ "step": 110
+ },
+ {
+ "epoch": 0.11103122753274358,
+ "grad_norm": 0.7249881029129028,
+ "learning_rate": 0.00019932383577419432,
+ "loss": 1.7466,
+ "step": 111
+ },
+ {
+ "epoch": 0.1120315088618674,
+ "grad_norm": 0.6161806583404541,
+ "learning_rate": 0.0001993116118160273,
+ "loss": 1.3411,
+ "step": 112
+ },
+ {
+ "epoch": 0.11303179019099122,
+ "grad_norm": 0.6745229363441467,
+ "learning_rate": 0.00019929927873235938,
+ "loss": 1.5615,
+ "step": 113
+ },
+ {
+ "epoch": 0.11403207152011503,
+ "grad_norm": 0.6489872336387634,
+ "learning_rate": 0.00019928683653674237,
+ "loss": 1.6279,
+ "step": 114
+ },
+ {
+ "epoch": 0.11503235284923885,
+ "grad_norm": 0.7769975662231445,
+ "learning_rate": 0.00019927428524284805,
+ "loss": 1.6155,
+ "step": 115
+ },
+ {
+ "epoch": 0.11603263417836267,
+ "grad_norm": 0.734336793422699,
+ "learning_rate": 0.00019926162486446792,
+ "loss": 1.6837,
+ "step": 116
+ },
+ {
+ "epoch": 0.11703291550748648,
+ "grad_norm": 0.6966903805732727,
+ "learning_rate": 0.0001992488554155135,
+ "loss": 1.6659,
+ "step": 117
+ },
+ {
+ "epoch": 0.1180331968366103,
+ "grad_norm": 0.6714586615562439,
+ "learning_rate": 0.00019923597691001615,
+ "loss": 1.5161,
+ "step": 118
+ },
+ {
+ "epoch": 0.11903347816573412,
+ "grad_norm": 0.6390894651412964,
+ "learning_rate": 0.0001992229893621269,
+ "loss": 1.4561,
+ "step": 119
+ },
+ {
+ "epoch": 0.12003375949485792,
+ "grad_norm": 0.6481143832206726,
+ "learning_rate": 0.00019920989278611687,
+ "loss": 1.6331,
+ "step": 120
+ },
+ {
+ "epoch": 0.12103404082398174,
+ "grad_norm": 0.6819384694099426,
+ "learning_rate": 0.0001991966871963767,
+ "loss": 1.6508,
+ "step": 121
+ },
+ {
+ "epoch": 0.12203432215310556,
+ "grad_norm": 0.6839059591293335,
+ "learning_rate": 0.000199183372607417,
+ "loss": 1.6514,
+ "step": 122
+ },
+ {
+ "epoch": 0.12303460348222937,
+ "grad_norm": 0.6401050090789795,
+ "learning_rate": 0.0001991699490338681,
+ "loss": 1.8065,
+ "step": 123
+ },
+ {
+ "epoch": 0.12403488481135319,
+ "grad_norm": 0.6860588788986206,
+ "learning_rate": 0.00019915641649048005,
+ "loss": 1.7658,
+ "step": 124
+ },
+ {
+ "epoch": 0.12503516614047702,
+ "grad_norm": 0.6286434531211853,
+ "learning_rate": 0.0001991427749921227,
+ "loss": 1.7678,
+ "step": 125
+ },
+ {
+ "epoch": 0.12603544746960083,
+ "grad_norm": 0.6609922647476196,
+ "learning_rate": 0.00019912902455378556,
+ "loss": 1.4934,
+ "step": 126
+ },
+ {
+ "epoch": 0.12703572879872463,
+ "grad_norm": 0.7058399319648743,
+ "learning_rate": 0.00019911516519057788,
+ "loss": 1.6058,
+ "step": 127
+ },
+ {
+ "epoch": 0.12803601012784846,
+ "grad_norm": 0.6362051963806152,
+ "learning_rate": 0.00019910119691772863,
+ "loss": 1.502,
+ "step": 128
+ },
+ {
+ "epoch": 0.12903629145697226,
+ "grad_norm": 0.7493100762367249,
+ "learning_rate": 0.00019908711975058637,
+ "loss": 1.5287,
+ "step": 129
+ },
+ {
+ "epoch": 0.1300365727860961,
+ "grad_norm": 0.6492393612861633,
+ "learning_rate": 0.0001990729337046194,
+ "loss": 1.5716,
+ "step": 130
+ },
+ {
+ "epoch": 0.1310368541152199,
+ "grad_norm": 0.70331871509552,
+ "learning_rate": 0.0001990586387954156,
+ "loss": 1.5882,
+ "step": 131
+ },
+ {
+ "epoch": 0.13203713544434373,
+ "grad_norm": 0.7581572532653809,
+ "learning_rate": 0.00019904423503868247,
+ "loss": 1.7627,
+ "step": 132
+ },
+ {
+ "epoch": 0.13303741677346753,
+ "grad_norm": 0.7087228894233704,
+ "learning_rate": 0.00019902972245024715,
+ "loss": 1.6257,
+ "step": 133
+ },
+ {
+ "epoch": 0.13403769810259136,
+ "grad_norm": 0.7920627593994141,
+ "learning_rate": 0.00019901510104605637,
+ "loss": 1.572,
+ "step": 134
+ },
+ {
+ "epoch": 0.13503797943171517,
+ "grad_norm": 0.6869202256202698,
+ "learning_rate": 0.00019900037084217637,
+ "loss": 1.5478,
+ "step": 135
+ },
+ {
+ "epoch": 0.136038260760839,
+ "grad_norm": 0.6879409551620483,
+ "learning_rate": 0.00019898553185479303,
+ "loss": 1.3104,
+ "step": 136
+ },
+ {
+ "epoch": 0.1370385420899628,
+ "grad_norm": 0.6574143767356873,
+ "learning_rate": 0.00019897058410021167,
+ "loss": 1.7041,
+ "step": 137
+ },
+ {
+ "epoch": 0.13803882341908663,
+ "grad_norm": 0.7793259620666504,
+ "learning_rate": 0.00019895552759485722,
+ "loss": 1.5417,
+ "step": 138
+ },
+ {
+ "epoch": 0.13903910474821043,
+ "grad_norm": 0.6310438513755798,
+ "learning_rate": 0.00019894036235527395,
+ "loss": 1.4978,
+ "step": 139
+ },
+ {
+ "epoch": 0.14003938607733424,
+ "grad_norm": 0.6298012137413025,
+ "learning_rate": 0.00019892508839812584,
+ "loss": 1.5367,
+ "step": 140
+ },
+ {
+ "epoch": 0.14103966740645807,
+ "grad_norm": 0.5647856593132019,
+ "learning_rate": 0.00019890970574019617,
+ "loss": 1.537,
+ "step": 141
+ },
+ {
+ "epoch": 0.14203994873558187,
+ "grad_norm": 0.6491876244544983,
+ "learning_rate": 0.00019889421439838763,
+ "loss": 1.6992,
+ "step": 142
+ },
+ {
+ "epoch": 0.1430402300647057,
+ "grad_norm": 0.6574720144271851,
+ "learning_rate": 0.00019887861438972246,
+ "loss": 1.3837,
+ "step": 143
+ },
+ {
+ "epoch": 0.1440405113938295,
+ "grad_norm": 0.6267092227935791,
+ "learning_rate": 0.00019886290573134228,
+ "loss": 1.6307,
+ "step": 144
+ },
+ {
+ "epoch": 0.14504079272295334,
+ "grad_norm": 0.6785029172897339,
+ "learning_rate": 0.000198847088440508,
+ "loss": 1.574,
+ "step": 145
+ },
+ {
+ "epoch": 0.14604107405207714,
+ "grad_norm": 0.6218644380569458,
+ "learning_rate": 0.0001988311625346,
+ "loss": 1.4676,
+ "step": 146
+ },
+ {
+ "epoch": 0.14704135538120097,
+ "grad_norm": 0.6047986745834351,
+ "learning_rate": 0.00019881512803111796,
+ "loss": 1.4316,
+ "step": 147
+ },
+ {
+ "epoch": 0.14804163671032478,
+ "grad_norm": 0.7340937256813049,
+ "learning_rate": 0.00019879898494768093,
+ "loss": 1.5185,
+ "step": 148
+ },
+ {
+ "epoch": 0.1490419180394486,
+ "grad_norm": 0.5874620676040649,
+ "learning_rate": 0.00019878273330202717,
+ "loss": 1.5031,
+ "step": 149
+ },
+ {
+ "epoch": 0.1500421993685724,
+ "grad_norm": 0.6943556666374207,
+ "learning_rate": 0.00019876637311201433,
+ "loss": 1.7323,
+ "step": 150
+ },
+ {
+ "epoch": 0.15104248069769624,
+ "grad_norm": 0.6345832347869873,
+ "learning_rate": 0.00019874990439561934,
+ "loss": 1.4691,
+ "step": 151
+ },
+ {
+ "epoch": 0.15204276202682004,
+ "grad_norm": 0.7047753930091858,
+ "learning_rate": 0.0001987333271709383,
+ "loss": 1.5198,
+ "step": 152
+ },
+ {
+ "epoch": 0.15304304335594385,
+ "grad_norm": 0.6043322086334229,
+ "learning_rate": 0.00019871664145618657,
+ "loss": 1.5488,
+ "step": 153
+ },
+ {
+ "epoch": 0.15404332468506768,
+ "grad_norm": 0.5978446006774902,
+ "learning_rate": 0.00019869984726969878,
+ "loss": 1.4278,
+ "step": 154
+ },
+ {
+ "epoch": 0.15504360601419148,
+ "grad_norm": 0.6796436905860901,
+ "learning_rate": 0.00019868294462992866,
+ "loss": 1.5845,
+ "step": 155
+ },
+ {
+ "epoch": 0.1560438873433153,
+ "grad_norm": 0.7113372087478638,
+ "learning_rate": 0.00019866593355544922,
+ "loss": 1.7509,
+ "step": 156
+ },
+ {
+ "epoch": 0.15704416867243912,
+ "grad_norm": 0.5908107757568359,
+ "learning_rate": 0.00019864881406495246,
+ "loss": 1.5693,
+ "step": 157
+ },
+ {
+ "epoch": 0.15804445000156295,
+ "grad_norm": 0.7135252952575684,
+ "learning_rate": 0.00019863158617724967,
+ "loss": 1.6109,
+ "step": 158
+ },
+ {
+ "epoch": 0.15904473133068675,
+ "grad_norm": 0.5621710419654846,
+ "learning_rate": 0.00019861424991127115,
+ "loss": 1.5368,
+ "step": 159
+ },
+ {
+ "epoch": 0.16004501265981058,
+ "grad_norm": 0.6205443143844604,
+ "learning_rate": 0.00019859680528606637,
+ "loss": 1.5181,
+ "step": 160
+ },
+ {
+ "epoch": 0.16104529398893438,
+ "grad_norm": 0.6933260560035706,
+ "learning_rate": 0.00019857925232080373,
+ "loss": 1.4508,
+ "step": 161
+ },
+ {
+ "epoch": 0.16204557531805822,
+ "grad_norm": 0.6911661028862,
+ "learning_rate": 0.00019856159103477086,
+ "loss": 1.5423,
+ "step": 162
+ },
+ {
+ "epoch": 0.16304585664718202,
+ "grad_norm": 0.7684744000434875,
+ "learning_rate": 0.00019854382144737426,
+ "loss": 1.4097,
+ "step": 163
+ },
+ {
+ "epoch": 0.16404613797630582,
+ "grad_norm": 0.6657288074493408,
+ "learning_rate": 0.00019852594357813952,
+ "loss": 1.6145,
+ "step": 164
+ },
+ {
+ "epoch": 0.16504641930542965,
+ "grad_norm": 0.7030160427093506,
+ "learning_rate": 0.00019850795744671116,
+ "loss": 1.6551,
+ "step": 165
+ },
+ {
+ "epoch": 0.16604670063455346,
+ "grad_norm": 0.87894207239151,
+ "learning_rate": 0.0001984898630728527,
+ "loss": 1.6316,
+ "step": 166
+ },
+ {
+ "epoch": 0.1670469819636773,
+ "grad_norm": 0.6282681226730347,
+ "learning_rate": 0.0001984716604764466,
+ "loss": 1.451,
+ "step": 167
+ },
+ {
+ "epoch": 0.1680472632928011,
+ "grad_norm": 0.6729792952537537,
+ "learning_rate": 0.0001984533496774942,
+ "loss": 1.4381,
+ "step": 168
+ },
+ {
+ "epoch": 0.16904754462192492,
+ "grad_norm": 0.7300116419792175,
+ "learning_rate": 0.0001984349306961158,
+ "loss": 1.4244,
+ "step": 169
+ },
+ {
+ "epoch": 0.17004782595104873,
+ "grad_norm": 0.6853480935096741,
+ "learning_rate": 0.00019841640355255043,
+ "loss": 1.6174,
+ "step": 170
+ },
+ {
+ "epoch": 0.17104810728017256,
+ "grad_norm": 0.735612690448761,
+ "learning_rate": 0.00019839776826715614,
+ "loss": 1.5085,
+ "step": 171
+ },
+ {
+ "epoch": 0.17204838860929636,
+ "grad_norm": 0.6735563278198242,
+ "learning_rate": 0.00019837902486040978,
+ "loss": 1.507,
+ "step": 172
+ },
+ {
+ "epoch": 0.1730486699384202,
+ "grad_norm": 0.6617917418479919,
+ "learning_rate": 0.0001983601733529069,
+ "loss": 1.6774,
+ "step": 173
+ },
+ {
+ "epoch": 0.174048951267544,
+ "grad_norm": 0.7137823700904846,
+ "learning_rate": 0.00019834121376536187,
+ "loss": 1.4665,
+ "step": 174
+ },
+ {
+ "epoch": 0.17504923259666783,
+ "grad_norm": 0.6372626423835754,
+ "learning_rate": 0.00019832214611860793,
+ "loss": 1.3597,
+ "step": 175
+ },
+ {
+ "epoch": 0.17604951392579163,
+ "grad_norm": 0.7131632566452026,
+ "learning_rate": 0.00019830297043359692,
+ "loss": 1.4833,
+ "step": 176
+ },
+ {
+ "epoch": 0.17704979525491543,
+ "grad_norm": 0.7538559436798096,
+ "learning_rate": 0.00019828368673139947,
+ "loss": 1.4714,
+ "step": 177
+ },
+ {
+ "epoch": 0.17805007658403926,
+ "grad_norm": 0.5684806108474731,
+ "learning_rate": 0.0001982642950332049,
+ "loss": 1.5012,
+ "step": 178
+ },
+ {
+ "epoch": 0.17905035791316307,
+ "grad_norm": 0.621658444404602,
+ "learning_rate": 0.00019824479536032112,
+ "loss": 1.9119,
+ "step": 179
+ },
+ {
+ "epoch": 0.1800506392422869,
+ "grad_norm": 0.6564679741859436,
+ "learning_rate": 0.0001982251877341748,
+ "loss": 1.5131,
+ "step": 180
+ },
+ {
+ "epoch": 0.1810509205714107,
+ "grad_norm": 0.6546526551246643,
+ "learning_rate": 0.00019820547217631117,
+ "loss": 1.4493,
+ "step": 181
+ },
+ {
+ "epoch": 0.18205120190053453,
+ "grad_norm": 0.6504479050636292,
+ "learning_rate": 0.00019818564870839405,
+ "loss": 1.6131,
+ "step": 182
+ },
+ {
+ "epoch": 0.18305148322965833,
+ "grad_norm": 0.6269803047180176,
+ "learning_rate": 0.00019816571735220583,
+ "loss": 1.5936,
+ "step": 183
+ },
+ {
+ "epoch": 0.18405176455878217,
+ "grad_norm": 0.6303942799568176,
+ "learning_rate": 0.00019814567812964748,
+ "loss": 1.6948,
+ "step": 184
+ },
+ {
+ "epoch": 0.18505204588790597,
+ "grad_norm": 0.6562885046005249,
+ "learning_rate": 0.00019812553106273847,
+ "loss": 1.5542,
+ "step": 185
+ },
+ {
+ "epoch": 0.1860523272170298,
+ "grad_norm": 0.5844212174415588,
+ "learning_rate": 0.00019810527617361681,
+ "loss": 1.539,
+ "step": 186
+ },
+ {
+ "epoch": 0.1870526085461536,
+ "grad_norm": 0.6402295231819153,
+ "learning_rate": 0.00019808491348453894,
+ "loss": 1.4748,
+ "step": 187
+ },
+ {
+ "epoch": 0.18805288987527743,
+ "grad_norm": 0.6579477190971375,
+ "learning_rate": 0.00019806444301787978,
+ "loss": 1.5114,
+ "step": 188
+ },
+ {
+ "epoch": 0.18905317120440124,
+ "grad_norm": 0.6511597037315369,
+ "learning_rate": 0.0001980438647961327,
+ "loss": 1.4678,
+ "step": 189
+ },
+ {
+ "epoch": 0.19005345253352504,
+ "grad_norm": 0.6911427974700928,
+ "learning_rate": 0.00019802317884190935,
+ "loss": 1.6876,
+ "step": 190
+ },
+ {
+ "epoch": 0.19105373386264887,
+ "grad_norm": 0.6146433353424072,
+ "learning_rate": 0.00019800238517793996,
+ "loss": 1.5986,
+ "step": 191
+ },
+ {
+ "epoch": 0.19205401519177268,
+ "grad_norm": 0.6126302480697632,
+ "learning_rate": 0.00019798148382707296,
+ "loss": 1.571,
+ "step": 192
+ },
+ {
+ "epoch": 0.1930542965208965,
+ "grad_norm": 0.5751072764396667,
+ "learning_rate": 0.00019796047481227515,
+ "loss": 1.4921,
+ "step": 193
+ },
+ {
+ "epoch": 0.1940545778500203,
+ "grad_norm": 0.6484839916229248,
+ "learning_rate": 0.00019793935815663163,
+ "loss": 1.7495,
+ "step": 194
+ },
+ {
+ "epoch": 0.19505485917914414,
+ "grad_norm": 0.6875973343849182,
+ "learning_rate": 0.00019791813388334581,
+ "loss": 1.5782,
+ "step": 195
+ },
+ {
+ "epoch": 0.19605514050826794,
+ "grad_norm": 0.8130943179130554,
+ "learning_rate": 0.00019789680201573933,
+ "loss": 1.4964,
+ "step": 196
+ },
+ {
+ "epoch": 0.19705542183739178,
+ "grad_norm": 0.6734403371810913,
+ "learning_rate": 0.00019787536257725202,
+ "loss": 1.4787,
+ "step": 197
+ },
+ {
+ "epoch": 0.19805570316651558,
+ "grad_norm": 0.6480582356452942,
+ "learning_rate": 0.00019785381559144196,
+ "loss": 1.5629,
+ "step": 198
+ },
+ {
+ "epoch": 0.1990559844956394,
+ "grad_norm": 0.6554624438285828,
+ "learning_rate": 0.00019783216108198542,
+ "loss": 1.5806,
+ "step": 199
+ },
+ {
+ "epoch": 0.2000562658247632,
+ "grad_norm": 0.705443263053894,
+ "learning_rate": 0.00019781039907267677,
+ "loss": 1.8372,
+ "step": 200
+ },
+ {
+ "epoch": 0.20105654715388704,
+ "grad_norm": 0.706923246383667,
+ "learning_rate": 0.00019778852958742853,
+ "loss": 1.6405,
+ "step": 201
+ },
+ {
+ "epoch": 0.20205682848301085,
+ "grad_norm": 0.7062544822692871,
+ "learning_rate": 0.00019776655265027127,
+ "loss": 1.6,
+ "step": 202
+ },
+ {
+ "epoch": 0.20305710981213465,
+ "grad_norm": 0.7227569222450256,
+ "learning_rate": 0.00019774446828535371,
+ "loss": 1.5172,
+ "step": 203
+ },
+ {
+ "epoch": 0.20405739114125848,
+ "grad_norm": 0.6762563586235046,
+ "learning_rate": 0.00019772227651694256,
+ "loss": 1.6753,
+ "step": 204
+ },
+ {
+ "epoch": 0.20505767247038229,
+ "grad_norm": 0.6048421859741211,
+ "learning_rate": 0.00019769997736942258,
+ "loss": 1.4827,
+ "step": 205
+ },
+ {
+ "epoch": 0.20605795379950612,
+ "grad_norm": 0.6002956032752991,
+ "learning_rate": 0.00019767757086729647,
+ "loss": 1.5438,
+ "step": 206
+ },
+ {
+ "epoch": 0.20705823512862992,
+ "grad_norm": 0.7948954701423645,
+ "learning_rate": 0.00019765505703518496,
+ "loss": 1.4988,
+ "step": 207
+ },
+ {
+ "epoch": 0.20805851645775375,
+ "grad_norm": 0.6495680809020996,
+ "learning_rate": 0.00019763243589782662,
+ "loss": 1.5738,
+ "step": 208
+ },
+ {
+ "epoch": 0.20905879778687755,
+ "grad_norm": 0.6413107514381409,
+ "learning_rate": 0.00019760970748007803,
+ "loss": 1.3794,
+ "step": 209
+ },
+ {
+ "epoch": 0.21005907911600138,
+ "grad_norm": 0.5999665260314941,
+ "learning_rate": 0.0001975868718069136,
+ "loss": 1.4313,
+ "step": 210
+ },
+ {
+ "epoch": 0.2110593604451252,
+ "grad_norm": 0.6355773210525513,
+ "learning_rate": 0.00019756392890342563,
+ "loss": 1.5107,
+ "step": 211
+ },
+ {
+ "epoch": 0.21205964177424902,
+ "grad_norm": 0.6068251729011536,
+ "learning_rate": 0.00019754087879482422,
+ "loss": 1.536,
+ "step": 212
+ },
+ {
+ "epoch": 0.21305992310337282,
+ "grad_norm": 0.5568909049034119,
+ "learning_rate": 0.00019751772150643722,
+ "loss": 1.5372,
+ "step": 213
+ },
+ {
+ "epoch": 0.21406020443249665,
+ "grad_norm": 0.5771281719207764,
+ "learning_rate": 0.00019749445706371038,
+ "loss": 1.487,
+ "step": 214
+ },
+ {
+ "epoch": 0.21506048576162046,
+ "grad_norm": 0.6146671772003174,
+ "learning_rate": 0.00019747108549220702,
+ "loss": 1.4585,
+ "step": 215
+ },
+ {
+ "epoch": 0.21606076709074426,
+ "grad_norm": 0.5595754981040955,
+ "learning_rate": 0.00019744760681760832,
+ "loss": 1.4224,
+ "step": 216
+ },
+ {
+ "epoch": 0.2170610484198681,
+ "grad_norm": 0.5873929858207703,
+ "learning_rate": 0.00019742402106571314,
+ "loss": 1.4581,
+ "step": 217
+ },
+ {
+ "epoch": 0.2180613297489919,
+ "grad_norm": 0.5725668668746948,
+ "learning_rate": 0.00019740032826243788,
+ "loss": 1.4393,
+ "step": 218
+ },
+ {
+ "epoch": 0.21906161107811573,
+ "grad_norm": 0.6452648043632507,
+ "learning_rate": 0.0001973765284338167,
+ "loss": 1.6048,
+ "step": 219
+ },
+ {
+ "epoch": 0.22006189240723953,
+ "grad_norm": 0.6166092753410339,
+ "learning_rate": 0.00019735262160600127,
+ "loss": 1.4976,
+ "step": 220
+ },
+ {
+ "epoch": 0.22106217373636336,
+ "grad_norm": 0.7053269147872925,
+ "learning_rate": 0.00019732860780526088,
+ "loss": 1.6882,
+ "step": 221
+ },
+ {
+ "epoch": 0.22206245506548716,
+ "grad_norm": 0.7072796821594238,
+ "learning_rate": 0.00019730448705798239,
+ "loss": 1.5441,
+ "step": 222
+ },
+ {
+ "epoch": 0.223062736394611,
+ "grad_norm": 0.6704496145248413,
+ "learning_rate": 0.00019728025939067008,
+ "loss": 1.3791,
+ "step": 223
+ },
+ {
+ "epoch": 0.2240630177237348,
+ "grad_norm": 0.6141743659973145,
+ "learning_rate": 0.00019725592482994583,
+ "loss": 1.5831,
+ "step": 224
+ },
+ {
+ "epoch": 0.22506329905285863,
+ "grad_norm": 0.6235673427581787,
+ "learning_rate": 0.00019723148340254892,
+ "loss": 1.6103,
+ "step": 225
+ },
+ {
+ "epoch": 0.22606358038198243,
+ "grad_norm": 0.6383673548698425,
+ "learning_rate": 0.00019720693513533598,
+ "loss": 1.6284,
+ "step": 226
+ },
+ {
+ "epoch": 0.22706386171110624,
+ "grad_norm": 0.7666104435920715,
+ "learning_rate": 0.00019718228005528122,
+ "loss": 1.702,
+ "step": 227
+ },
+ {
+ "epoch": 0.22806414304023007,
+ "grad_norm": 0.6431383490562439,
+ "learning_rate": 0.00019715751818947603,
+ "loss": 1.4571,
+ "step": 228
+ },
+ {
+ "epoch": 0.22906442436935387,
+ "grad_norm": 0.6177626252174377,
+ "learning_rate": 0.0001971326495651293,
+ "loss": 1.4326,
+ "step": 229
+ },
+ {
+ "epoch": 0.2300647056984777,
+ "grad_norm": 0.7352898120880127,
+ "learning_rate": 0.00019710767420956705,
+ "loss": 1.7427,
+ "step": 230
+ },
+ {
+ "epoch": 0.2310649870276015,
+ "grad_norm": 0.6259469389915466,
+ "learning_rate": 0.0001970825921502328,
+ "loss": 1.634,
+ "step": 231
+ },
+ {
+ "epoch": 0.23206526835672533,
+ "grad_norm": 0.6699635982513428,
+ "learning_rate": 0.0001970574034146871,
+ "loss": 1.4705,
+ "step": 232
+ },
+ {
+ "epoch": 0.23306554968584914,
+ "grad_norm": 0.5577033162117004,
+ "learning_rate": 0.00019703210803060782,
+ "loss": 1.5438,
+ "step": 233
+ },
+ {
+ "epoch": 0.23406583101497297,
+ "grad_norm": 0.6063429117202759,
+ "learning_rate": 0.00019700670602579008,
+ "loss": 1.555,
+ "step": 234
+ },
+ {
+ "epoch": 0.23506611234409677,
+ "grad_norm": 0.6069104671478271,
+ "learning_rate": 0.00019698119742814606,
+ "loss": 1.5036,
+ "step": 235
+ },
+ {
+ "epoch": 0.2360663936732206,
+ "grad_norm": 0.6158379316329956,
+ "learning_rate": 0.00019695558226570507,
+ "loss": 1.3741,
+ "step": 236
+ },
+ {
+ "epoch": 0.2370666750023444,
+ "grad_norm": 0.6366294622421265,
+ "learning_rate": 0.00019692986056661356,
+ "loss": 1.4467,
+ "step": 237
+ },
+ {
+ "epoch": 0.23806695633146824,
+ "grad_norm": 0.6726595163345337,
+ "learning_rate": 0.00019690403235913504,
+ "loss": 1.3861,
+ "step": 238
+ },
+ {
+ "epoch": 0.23906723766059204,
+ "grad_norm": 0.6546512842178345,
+ "learning_rate": 0.00019687809767165,
+ "loss": 1.6886,
+ "step": 239
+ },
+ {
+ "epoch": 0.24006751898971584,
+ "grad_norm": 0.6623121500015259,
+ "learning_rate": 0.000196852056532656,
+ "loss": 1.5925,
+ "step": 240
+ },
+ {
+ "epoch": 0.24106780031883968,
+ "grad_norm": 0.6577529311180115,
+ "learning_rate": 0.00019682590897076752,
+ "loss": 1.4509,
+ "step": 241
+ },
+ {
+ "epoch": 0.24206808164796348,
+ "grad_norm": 0.5586327314376831,
+ "learning_rate": 0.00019679965501471608,
+ "loss": 1.6346,
+ "step": 242
+ },
+ {
+ "epoch": 0.2430683629770873,
+ "grad_norm": 0.6459937691688538,
+ "learning_rate": 0.0001967732946933499,
+ "loss": 1.4129,
+ "step": 243
+ },
+ {
+ "epoch": 0.2440686443062111,
+ "grad_norm": 0.778732180595398,
+ "learning_rate": 0.00019674682803563428,
+ "loss": 1.5129,
+ "step": 244
+ },
+ {
+ "epoch": 0.24506892563533494,
+ "grad_norm": 0.7264451384544373,
+ "learning_rate": 0.00019672025507065131,
+ "loss": 1.4483,
+ "step": 245
+ },
+ {
+ "epoch": 0.24606920696445875,
+ "grad_norm": 0.616084635257721,
+ "learning_rate": 0.00019669357582759983,
+ "loss": 1.5947,
+ "step": 246
+ },
+ {
+ "epoch": 0.24706948829358258,
+ "grad_norm": 0.5911642909049988,
+ "learning_rate": 0.00019666679033579552,
+ "loss": 1.6407,
+ "step": 247
+ },
+ {
+ "epoch": 0.24806976962270638,
+ "grad_norm": 0.6102796792984009,
+ "learning_rate": 0.00019663989862467082,
+ "loss": 1.5251,
+ "step": 248
+ },
+ {
+ "epoch": 0.2490700509518302,
+ "grad_norm": 0.5973434448242188,
+ "learning_rate": 0.00019661290072377482,
+ "loss": 1.3969,
+ "step": 249
+ },
+ {
+ "epoch": 0.25007033228095404,
+ "grad_norm": 0.8515523076057434,
+ "learning_rate": 0.00019658579666277334,
+ "loss": 1.5687,
+ "step": 250
+ },
+ {
+ "epoch": 0.2510706136100778,
+ "grad_norm": 0.5003417134284973,
+ "learning_rate": 0.0001965585864714488,
+ "loss": 1.4102,
+ "step": 251
+ },
+ {
+ "epoch": 0.25207089493920165,
+ "grad_norm": 0.5215190052986145,
+ "learning_rate": 0.00019653127017970034,
+ "loss": 1.2471,
+ "step": 252
+ },
+ {
+ "epoch": 0.2530711762683255,
+ "grad_norm": 0.6491619348526001,
+ "learning_rate": 0.0001965038478175436,
+ "loss": 1.6969,
+ "step": 253
+ },
+ {
+ "epoch": 0.25407145759744926,
+ "grad_norm": 0.6176133155822754,
+ "learning_rate": 0.00019647631941511082,
+ "loss": 1.5351,
+ "step": 254
+ },
+ {
+ "epoch": 0.2550717389265731,
+ "grad_norm": 0.6913408041000366,
+ "learning_rate": 0.0001964486850026507,
+ "loss": 1.4309,
+ "step": 255
+ },
+ {
+ "epoch": 0.2560720202556969,
+ "grad_norm": 0.5875718593597412,
+ "learning_rate": 0.00019642094461052852,
+ "loss": 1.4679,
+ "step": 256
+ },
+ {
+ "epoch": 0.25707230158482075,
+ "grad_norm": 0.6682264804840088,
+ "learning_rate": 0.00019639309826922585,
+ "loss": 1.5393,
+ "step": 257
+ },
+ {
+ "epoch": 0.2580725829139445,
+ "grad_norm": 0.7241432666778564,
+ "learning_rate": 0.0001963651460093409,
+ "loss": 1.4998,
+ "step": 258
+ },
+ {
+ "epoch": 0.25907286424306836,
+ "grad_norm": 0.5210353136062622,
+ "learning_rate": 0.00019633708786158806,
+ "loss": 1.3837,
+ "step": 259
+ },
+ {
+ "epoch": 0.2600731455721922,
+ "grad_norm": 0.584020733833313,
+ "learning_rate": 0.00019630892385679818,
+ "loss": 1.4961,
+ "step": 260
+ },
+ {
+ "epoch": 0.261073426901316,
+ "grad_norm": 0.6708115935325623,
+ "learning_rate": 0.00019628065402591845,
+ "loss": 1.5277,
+ "step": 261
+ },
+ {
+ "epoch": 0.2620737082304398,
+ "grad_norm": 0.5480003952980042,
+ "learning_rate": 0.00019625227840001225,
+ "loss": 1.556,
+ "step": 262
+ },
+ {
+ "epoch": 0.2630739895595636,
+ "grad_norm": 0.595191478729248,
+ "learning_rate": 0.0001962237970102593,
+ "loss": 1.3514,
+ "step": 263
+ },
+ {
+ "epoch": 0.26407427088868746,
+ "grad_norm": 0.7332099080085754,
+ "learning_rate": 0.0001961952098879555,
+ "loss": 1.5394,
+ "step": 264
+ },
+ {
+ "epoch": 0.26507455221781123,
+ "grad_norm": 0.596319317817688,
+ "learning_rate": 0.00019616651706451287,
+ "loss": 1.3828,
+ "step": 265
+ },
+ {
+ "epoch": 0.26607483354693506,
+ "grad_norm": 0.5998026132583618,
+ "learning_rate": 0.0001961377185714597,
+ "loss": 1.4479,
+ "step": 266
+ },
+ {
+ "epoch": 0.2670751148760589,
+ "grad_norm": 0.6220220923423767,
+ "learning_rate": 0.0001961088144404403,
+ "loss": 1.5121,
+ "step": 267
+ },
+ {
+ "epoch": 0.2680753962051827,
+ "grad_norm": 0.5865943431854248,
+ "learning_rate": 0.00019607980470321505,
+ "loss": 1.6747,
+ "step": 268
+ },
+ {
+ "epoch": 0.2690756775343065,
+ "grad_norm": 0.5790852904319763,
+ "learning_rate": 0.00019605068939166045,
+ "loss": 1.3798,
+ "step": 269
+ },
+ {
+ "epoch": 0.27007595886343033,
+ "grad_norm": 0.6157498955726624,
+ "learning_rate": 0.00019602146853776894,
+ "loss": 1.6799,
+ "step": 270
+ },
+ {
+ "epoch": 0.27107624019255416,
+ "grad_norm": 0.6214422583580017,
+ "learning_rate": 0.000195992142173649,
+ "loss": 1.4782,
+ "step": 271
+ },
+ {
+ "epoch": 0.272076521521678,
+ "grad_norm": 0.6460129618644714,
+ "learning_rate": 0.0001959627103315249,
+ "loss": 1.4874,
+ "step": 272
+ },
+ {
+ "epoch": 0.27307680285080177,
+ "grad_norm": 0.5928930640220642,
+ "learning_rate": 0.00019593317304373705,
+ "loss": 1.4557,
+ "step": 273
+ },
+ {
+ "epoch": 0.2740770841799256,
+ "grad_norm": 0.5123687982559204,
+ "learning_rate": 0.00019590353034274144,
+ "loss": 1.445,
+ "step": 274
+ },
+ {
+ "epoch": 0.27507736550904943,
+ "grad_norm": 0.607455313205719,
+ "learning_rate": 0.00019587378226111014,
+ "loss": 1.4468,
+ "step": 275
+ },
+ {
+ "epoch": 0.27607764683817326,
+ "grad_norm": 0.6108120083808899,
+ "learning_rate": 0.00019584392883153088,
+ "loss": 1.3834,
+ "step": 276
+ },
+ {
+ "epoch": 0.27707792816729704,
+ "grad_norm": 0.680404543876648,
+ "learning_rate": 0.00019581397008680717,
+ "loss": 1.5094,
+ "step": 277
+ },
+ {
+ "epoch": 0.27807820949642087,
+ "grad_norm": 0.6419563889503479,
+ "learning_rate": 0.00019578390605985826,
+ "loss": 1.6933,
+ "step": 278
+ },
+ {
+ "epoch": 0.2790784908255447,
+ "grad_norm": 0.5788853764533997,
+ "learning_rate": 0.00019575373678371909,
+ "loss": 1.4754,
+ "step": 279
+ },
+ {
+ "epoch": 0.2800787721546685,
+ "grad_norm": 0.5943770408630371,
+ "learning_rate": 0.00019572346229154025,
+ "loss": 1.2949,
+ "step": 280
+ },
+ {
+ "epoch": 0.2810790534837923,
+ "grad_norm": 0.5997135043144226,
+ "learning_rate": 0.00019569308261658787,
+ "loss": 1.5365,
+ "step": 281
+ },
+ {
+ "epoch": 0.28207933481291614,
+ "grad_norm": 0.692401647567749,
+ "learning_rate": 0.00019566259779224378,
+ "loss": 1.4946,
+ "step": 282
+ },
+ {
+ "epoch": 0.28307961614203997,
+ "grad_norm": 0.5856708884239197,
+ "learning_rate": 0.00019563200785200526,
+ "loss": 1.426,
+ "step": 283
+ },
+ {
+ "epoch": 0.28407989747116374,
+ "grad_norm": 1.2516822814941406,
+ "learning_rate": 0.00019560131282948516,
+ "loss": 1.5119,
+ "step": 284
+ },
+ {
+ "epoch": 0.2850801788002876,
+ "grad_norm": 0.6360501050949097,
+ "learning_rate": 0.0001955705127584117,
+ "loss": 1.3916,
+ "step": 285
+ },
+ {
+ "epoch": 0.2860804601294114,
+ "grad_norm": 0.6822036504745483,
+ "learning_rate": 0.00019553960767262863,
+ "loss": 1.5565,
+ "step": 286
+ },
+ {
+ "epoch": 0.28708074145853524,
+ "grad_norm": 0.6973714828491211,
+ "learning_rate": 0.00019550859760609503,
+ "loss": 1.5559,
+ "step": 287
+ },
+ {
+ "epoch": 0.288081022787659,
+ "grad_norm": 0.6595618724822998,
+ "learning_rate": 0.00019547748259288536,
+ "loss": 1.5824,
+ "step": 288
+ },
+ {
+ "epoch": 0.28908130411678284,
+ "grad_norm": 0.5625808238983154,
+ "learning_rate": 0.0001954462626671894,
+ "loss": 1.2669,
+ "step": 289
+ },
+ {
+ "epoch": 0.2900815854459067,
+ "grad_norm": 0.6318663358688354,
+ "learning_rate": 0.0001954149378633122,
+ "loss": 1.3896,
+ "step": 290
+ },
+ {
+ "epoch": 0.29108186677503045,
+ "grad_norm": 0.6655906438827515,
+ "learning_rate": 0.00019538350821567404,
+ "loss": 1.3889,
+ "step": 291
+ },
+ {
+ "epoch": 0.2920821481041543,
+ "grad_norm": 0.5947337746620178,
+ "learning_rate": 0.00019535197375881045,
+ "loss": 1.6112,
+ "step": 292
+ },
+ {
+ "epoch": 0.2930824294332781,
+ "grad_norm": 0.6139295101165771,
+ "learning_rate": 0.00019532033452737205,
+ "loss": 1.5185,
+ "step": 293
+ },
+ {
+ "epoch": 0.29408271076240194,
+ "grad_norm": 0.579953670501709,
+ "learning_rate": 0.00019528859055612468,
+ "loss": 1.3874,
+ "step": 294
+ },
+ {
+ "epoch": 0.2950829920915257,
+ "grad_norm": 0.6101506352424622,
+ "learning_rate": 0.0001952567418799492,
+ "loss": 1.5965,
+ "step": 295
+ },
+ {
+ "epoch": 0.29608327342064955,
+ "grad_norm": 0.6393965482711792,
+ "learning_rate": 0.00019522478853384155,
+ "loss": 1.4124,
+ "step": 296
+ },
+ {
+ "epoch": 0.2970835547497734,
+ "grad_norm": 0.6147856712341309,
+ "learning_rate": 0.00019519273055291266,
+ "loss": 1.3776,
+ "step": 297
+ },
+ {
+ "epoch": 0.2980838360788972,
+ "grad_norm": 0.6056416630744934,
+ "learning_rate": 0.00019516056797238846,
+ "loss": 1.4453,
+ "step": 298
+ },
+ {
+ "epoch": 0.299084117408021,
+ "grad_norm": 0.6705831289291382,
+ "learning_rate": 0.00019512830082760987,
+ "loss": 1.3248,
+ "step": 299
+ },
+ {
+ "epoch": 0.3000843987371448,
+ "grad_norm": 0.6664314866065979,
+ "learning_rate": 0.00019509592915403255,
+ "loss": 1.5865,
+ "step": 300
+ },
+ {
+ "epoch": 0.30108468006626865,
+ "grad_norm": 0.5325604677200317,
+ "learning_rate": 0.00019506345298722717,
+ "loss": 1.0646,
+ "step": 301
+ },
+ {
+ "epoch": 0.3020849613953925,
+ "grad_norm": 0.589242160320282,
+ "learning_rate": 0.00019503087236287913,
+ "loss": 1.2297,
+ "step": 302
+ },
+ {
+ "epoch": 0.30308524272451626,
+ "grad_norm": 0.5677699446678162,
+ "learning_rate": 0.00019499818731678873,
+ "loss": 1.3961,
+ "step": 303
+ },
+ {
+ "epoch": 0.3040855240536401,
+ "grad_norm": 0.5676394701004028,
+ "learning_rate": 0.00019496539788487082,
+ "loss": 1.3276,
+ "step": 304
+ },
+ {
+ "epoch": 0.3050858053827639,
+ "grad_norm": 0.7280861139297485,
+ "learning_rate": 0.0001949325041031551,
+ "loss": 1.6731,
+ "step": 305
+ },
+ {
+ "epoch": 0.3060860867118877,
+ "grad_norm": 0.690636396408081,
+ "learning_rate": 0.0001948995060077859,
+ "loss": 1.5443,
+ "step": 306
+ },
+ {
+ "epoch": 0.3070863680410115,
+ "grad_norm": 0.611426055431366,
+ "learning_rate": 0.0001948664036350221,
+ "loss": 1.5827,
+ "step": 307
+ },
+ {
+ "epoch": 0.30808664937013536,
+ "grad_norm": 0.7112497091293335,
+ "learning_rate": 0.00019483319702123732,
+ "loss": 1.5401,
+ "step": 308
+ },
+ {
+ "epoch": 0.3090869306992592,
+ "grad_norm": 0.6598275303840637,
+ "learning_rate": 0.00019479988620291956,
+ "loss": 1.6432,
+ "step": 309
+ },
+ {
+ "epoch": 0.31008721202838296,
+ "grad_norm": 0.5019932985305786,
+ "learning_rate": 0.00019476647121667137,
+ "loss": 1.2561,
+ "step": 310
+ },
+ {
+ "epoch": 0.3110874933575068,
+ "grad_norm": 0.7777897715568542,
+ "learning_rate": 0.00019473295209920983,
+ "loss": 1.6118,
+ "step": 311
+ },
+ {
+ "epoch": 0.3120877746866306,
+ "grad_norm": 0.6028640866279602,
+ "learning_rate": 0.00019469932888736632,
+ "loss": 1.4682,
+ "step": 312
+ },
+ {
+ "epoch": 0.31308805601575446,
+ "grad_norm": 0.554381251335144,
+ "learning_rate": 0.00019466560161808674,
+ "loss": 1.4179,
+ "step": 313
+ },
+ {
+ "epoch": 0.31408833734487823,
+ "grad_norm": 0.6212736368179321,
+ "learning_rate": 0.00019463177032843124,
+ "loss": 1.4327,
+ "step": 314
+ },
+ {
+ "epoch": 0.31508861867400206,
+ "grad_norm": 0.6829814910888672,
+ "learning_rate": 0.00019459783505557424,
+ "loss": 1.4455,
+ "step": 315
+ },
+ {
+ "epoch": 0.3160889000031259,
+ "grad_norm": 0.5808065533638,
+ "learning_rate": 0.00019456379583680452,
+ "loss": 1.3583,
+ "step": 316
+ },
+ {
+ "epoch": 0.31708918133224967,
+ "grad_norm": 0.6354159712791443,
+ "learning_rate": 0.000194529652709525,
+ "loss": 1.6916,
+ "step": 317
+ },
+ {
+ "epoch": 0.3180894626613735,
+ "grad_norm": 0.6299159526824951,
+ "learning_rate": 0.00019449540571125286,
+ "loss": 1.47,
+ "step": 318
+ },
+ {
+ "epoch": 0.31908974399049733,
+ "grad_norm": 0.6222877502441406,
+ "learning_rate": 0.00019446105487961926,
+ "loss": 1.4137,
+ "step": 319
+ },
+ {
+ "epoch": 0.32009002531962116,
+ "grad_norm": 0.5995916724205017,
+ "learning_rate": 0.0001944266002523696,
+ "loss": 1.3679,
+ "step": 320
+ },
+ {
+ "epoch": 0.32109030664874494,
+ "grad_norm": 0.599814236164093,
+ "learning_rate": 0.0001943920418673633,
+ "loss": 1.4075,
+ "step": 321
+ },
+ {
+ "epoch": 0.32209058797786877,
+ "grad_norm": 0.5409269332885742,
+ "learning_rate": 0.00019435737976257377,
+ "loss": 1.4289,
+ "step": 322
+ },
+ {
+ "epoch": 0.3230908693069926,
+ "grad_norm": 0.5298951864242554,
+ "learning_rate": 0.00019432261397608834,
+ "loss": 1.2834,
+ "step": 323
+ },
+ {
+ "epoch": 0.32409115063611643,
+ "grad_norm": 0.7196112871170044,
+ "learning_rate": 0.00019428774454610843,
+ "loss": 1.4845,
+ "step": 324
+ },
+ {
+ "epoch": 0.3250914319652402,
+ "grad_norm": 0.5605450868606567,
+ "learning_rate": 0.00019425277151094913,
+ "loss": 1.4575,
+ "step": 325
+ },
+ {
+ "epoch": 0.32609171329436404,
+ "grad_norm": 0.573080837726593,
+ "learning_rate": 0.00019421769490903957,
+ "loss": 1.5757,
+ "step": 326
+ },
+ {
+ "epoch": 0.32709199462348787,
+ "grad_norm": 0.5017902851104736,
+ "learning_rate": 0.0001941825147789225,
+ "loss": 1.5794,
+ "step": 327
+ },
+ {
+ "epoch": 0.32809227595261165,
+ "grad_norm": 0.643267810344696,
+ "learning_rate": 0.00019414723115925456,
+ "loss": 1.4903,
+ "step": 328
+ },
+ {
+ "epoch": 0.3290925572817355,
+ "grad_norm": 0.6522070169448853,
+ "learning_rate": 0.0001941118440888061,
+ "loss": 1.5907,
+ "step": 329
+ },
+ {
+ "epoch": 0.3300928386108593,
+ "grad_norm": 0.6496105790138245,
+ "learning_rate": 0.0001940763536064611,
+ "loss": 1.4225,
+ "step": 330
+ },
+ {
+ "epoch": 0.33109311993998314,
+ "grad_norm": 0.6011468768119812,
+ "learning_rate": 0.00019404075975121716,
+ "loss": 1.5022,
+ "step": 331
+ },
+ {
+ "epoch": 0.3320934012691069,
+ "grad_norm": 0.6327878832817078,
+ "learning_rate": 0.0001940050625621855,
+ "loss": 1.468,
+ "step": 332
+ },
+ {
+ "epoch": 0.33309368259823074,
+ "grad_norm": 0.6187490820884705,
+ "learning_rate": 0.00019396926207859084,
+ "loss": 1.5183,
+ "step": 333
+ },
+ {
+ "epoch": 0.3340939639273546,
+ "grad_norm": 0.7625093460083008,
+ "learning_rate": 0.0001939333583397715,
+ "loss": 1.4813,
+ "step": 334
+ },
+ {
+ "epoch": 0.3350942452564784,
+ "grad_norm": 0.5286359190940857,
+ "learning_rate": 0.00019389735138517915,
+ "loss": 1.3674,
+ "step": 335
+ },
+ {
+ "epoch": 0.3360945265856022,
+ "grad_norm": 0.5798503160476685,
+ "learning_rate": 0.00019386124125437895,
+ "loss": 1.3016,
+ "step": 336
+ },
+ {
+ "epoch": 0.337094807914726,
+ "grad_norm": 0.48794126510620117,
+ "learning_rate": 0.00019382502798704935,
+ "loss": 1.3642,
+ "step": 337
+ },
+ {
+ "epoch": 0.33809508924384984,
+ "grad_norm": 0.7394312620162964,
+ "learning_rate": 0.00019378871162298227,
+ "loss": 1.327,
+ "step": 338
+ },
+ {
+ "epoch": 0.3390953705729737,
+ "grad_norm": 0.5598319172859192,
+ "learning_rate": 0.00019375229220208276,
+ "loss": 1.4247,
+ "step": 339
+ },
+ {
+ "epoch": 0.34009565190209745,
+ "grad_norm": 0.6099628806114197,
+ "learning_rate": 0.00019371576976436917,
+ "loss": 1.4906,
+ "step": 340
+ },
+ {
+ "epoch": 0.3410959332312213,
+ "grad_norm": 0.6749781370162964,
+ "learning_rate": 0.00019367914434997312,
+ "loss": 1.367,
+ "step": 341
+ },
+ {
+ "epoch": 0.3420962145603451,
+ "grad_norm": 0.7721238136291504,
+ "learning_rate": 0.00019364241599913924,
+ "loss": 1.4464,
+ "step": 342
+ },
+ {
+ "epoch": 0.3430964958894689,
+ "grad_norm": 0.5762369632720947,
+ "learning_rate": 0.0001936055847522254,
+ "loss": 1.409,
+ "step": 343
+ },
+ {
+ "epoch": 0.3440967772185927,
+ "grad_norm": 0.6960498690605164,
+ "learning_rate": 0.00019356865064970244,
+ "loss": 1.3907,
+ "step": 344
+ },
+ {
+ "epoch": 0.34509705854771655,
+ "grad_norm": 0.5805984735488892,
+ "learning_rate": 0.0001935316137321543,
+ "loss": 1.4539,
+ "step": 345
+ },
+ {
+ "epoch": 0.3460973398768404,
+ "grad_norm": 0.5686045289039612,
+ "learning_rate": 0.00019349447404027782,
+ "loss": 1.4493,
+ "step": 346
+ },
+ {
+ "epoch": 0.34709762120596416,
+ "grad_norm": 0.5448501706123352,
+ "learning_rate": 0.00019345723161488283,
+ "loss": 1.5633,
+ "step": 347
+ },
+ {
+ "epoch": 0.348097902535088,
+ "grad_norm": 0.6388784050941467,
+ "learning_rate": 0.000193419886496892,
+ "loss": 1.7179,
+ "step": 348
+ },
+ {
+ "epoch": 0.3490981838642118,
+ "grad_norm": 0.5240457653999329,
+ "learning_rate": 0.00019338243872734086,
+ "loss": 1.4411,
+ "step": 349
+ },
+ {
+ "epoch": 0.35009846519333565,
+ "grad_norm": 0.5460641384124756,
+ "learning_rate": 0.00019334488834737775,
+ "loss": 1.361,
+ "step": 350
+ },
+ {
+ "epoch": 0.3510987465224594,
+ "grad_norm": 0.5495695471763611,
+ "learning_rate": 0.00019330723539826375,
+ "loss": 1.5891,
+ "step": 351
+ },
+ {
+ "epoch": 0.35209902785158326,
+ "grad_norm": 0.5618153214454651,
+ "learning_rate": 0.00019326947992137262,
+ "loss": 1.3084,
+ "step": 352
+ },
+ {
+ "epoch": 0.3530993091807071,
+ "grad_norm": 0.5603707432746887,
+ "learning_rate": 0.00019323162195819082,
+ "loss": 1.5732,
+ "step": 353
+ },
+ {
+ "epoch": 0.35409959050983086,
+ "grad_norm": 0.5732563138008118,
+ "learning_rate": 0.0001931936615503174,
+ "loss": 1.5045,
+ "step": 354
+ },
+ {
+ "epoch": 0.3550998718389547,
+ "grad_norm": 0.5997583866119385,
+ "learning_rate": 0.000193155598739464,
+ "loss": 1.4175,
+ "step": 355
+ },
+ {
+ "epoch": 0.3561001531680785,
+ "grad_norm": 0.5769765377044678,
+ "learning_rate": 0.0001931174335674547,
+ "loss": 1.4834,
+ "step": 356
+ },
+ {
+ "epoch": 0.35710043449720236,
+ "grad_norm": 0.5902683138847351,
+ "learning_rate": 0.0001930791660762262,
+ "loss": 1.4664,
+ "step": 357
+ },
+ {
+ "epoch": 0.35810071582632613,
+ "grad_norm": 0.6354758143424988,
+ "learning_rate": 0.00019304079630782752,
+ "loss": 1.3891,
+ "step": 358
+ },
+ {
+ "epoch": 0.35910099715544996,
+ "grad_norm": 0.6018317341804504,
+ "learning_rate": 0.0001930023243044201,
+ "loss": 1.4514,
+ "step": 359
+ },
+ {
+ "epoch": 0.3601012784845738,
+ "grad_norm": 0.5409123301506042,
+ "learning_rate": 0.00019296375010827773,
+ "loss": 1.4708,
+ "step": 360
+ },
+ {
+ "epoch": 0.3611015598136976,
+ "grad_norm": 0.5457523465156555,
+ "learning_rate": 0.00019292507376178643,
+ "loss": 1.4988,
+ "step": 361
+ },
+ {
+ "epoch": 0.3621018411428214,
+ "grad_norm": 0.626768946647644,
+ "learning_rate": 0.00019288629530744454,
+ "loss": 1.5722,
+ "step": 362
+ },
+ {
+ "epoch": 0.36310212247194523,
+ "grad_norm": 0.566554069519043,
+ "learning_rate": 0.0001928474147878626,
+ "loss": 1.2135,
+ "step": 363
+ },
+ {
+ "epoch": 0.36410240380106906,
+ "grad_norm": 0.7327786684036255,
+ "learning_rate": 0.0001928084322457632,
+ "loss": 1.5245,
+ "step": 364
+ },
+ {
+ "epoch": 0.3651026851301929,
+ "grad_norm": 0.5205698609352112,
+ "learning_rate": 0.00019276934772398114,
+ "loss": 1.2068,
+ "step": 365
+ },
+ {
+ "epoch": 0.36610296645931667,
+ "grad_norm": 1.0956753492355347,
+ "learning_rate": 0.00019273016126546323,
+ "loss": 1.5044,
+ "step": 366
+ },
+ {
+ "epoch": 0.3671032477884405,
+ "grad_norm": 0.6484043598175049,
+ "learning_rate": 0.00019269087291326833,
+ "loss": 1.6369,
+ "step": 367
+ },
+ {
+ "epoch": 0.36810352911756433,
+ "grad_norm": 0.6363429427146912,
+ "learning_rate": 0.00019265148271056722,
+ "loss": 1.4338,
+ "step": 368
+ },
+ {
+ "epoch": 0.3691038104466881,
+ "grad_norm": 0.6295244693756104,
+ "learning_rate": 0.0001926119907006426,
+ "loss": 1.4701,
+ "step": 369
+ },
+ {
+ "epoch": 0.37010409177581194,
+ "grad_norm": 0.6013259887695312,
+ "learning_rate": 0.00019257239692688907,
+ "loss": 1.7629,
+ "step": 370
+ },
+ {
+ "epoch": 0.37110437310493577,
+ "grad_norm": 0.6949493885040283,
+ "learning_rate": 0.00019253270143281296,
+ "loss": 1.6713,
+ "step": 371
+ },
+ {
+ "epoch": 0.3721046544340596,
+ "grad_norm": 0.6933801174163818,
+ "learning_rate": 0.00019249290426203252,
+ "loss": 1.6131,
+ "step": 372
+ },
+ {
+ "epoch": 0.3731049357631834,
+ "grad_norm": 0.5847527384757996,
+ "learning_rate": 0.0001924530054582776,
+ "loss": 1.3968,
+ "step": 373
+ },
+ {
+ "epoch": 0.3741052170923072,
+ "grad_norm": 0.6053057312965393,
+ "learning_rate": 0.0001924130050653898,
+ "loss": 1.3311,
+ "step": 374
+ },
+ {
+ "epoch": 0.37510549842143104,
+ "grad_norm": 0.5513793230056763,
+ "learning_rate": 0.00019237290312732226,
+ "loss": 1.5063,
+ "step": 375
+ },
+ {
+ "epoch": 0.37610577975055487,
+ "grad_norm": 0.5859197378158569,
+ "learning_rate": 0.00019233269968813984,
+ "loss": 1.3556,
+ "step": 376
+ },
+ {
+ "epoch": 0.37710606107967864,
+ "grad_norm": 0.5623495578765869,
+ "learning_rate": 0.00019229239479201876,
+ "loss": 1.3859,
+ "step": 377
+ },
+ {
+ "epoch": 0.3781063424088025,
+ "grad_norm": 0.602118968963623,
+ "learning_rate": 0.0001922519884832469,
+ "loss": 1.334,
+ "step": 378
+ },
+ {
+ "epoch": 0.3791066237379263,
+ "grad_norm": 0.5212380886077881,
+ "learning_rate": 0.0001922114808062234,
+ "loss": 1.401,
+ "step": 379
+ },
+ {
+ "epoch": 0.3801069050670501,
+ "grad_norm": 0.4969455599784851,
+ "learning_rate": 0.00019217087180545893,
+ "loss": 1.2292,
+ "step": 380
+ },
+ {
+ "epoch": 0.3811071863961739,
+ "grad_norm": 0.578629732131958,
+ "learning_rate": 0.0001921301615255754,
+ "loss": 1.5015,
+ "step": 381
+ },
+ {
+ "epoch": 0.38210746772529774,
+ "grad_norm": 0.593053936958313,
+ "learning_rate": 0.0001920893500113061,
+ "loss": 1.302,
+ "step": 382
+ },
+ {
+ "epoch": 0.3831077490544216,
+ "grad_norm": 0.5832563638687134,
+ "learning_rate": 0.00019204843730749547,
+ "loss": 1.3695,
+ "step": 383
+ },
+ {
+ "epoch": 0.38410803038354535,
+ "grad_norm": 0.5608510375022888,
+ "learning_rate": 0.00019200742345909915,
+ "loss": 1.3792,
+ "step": 384
+ },
+ {
+ "epoch": 0.3851083117126692,
+ "grad_norm": 0.5337334275245667,
+ "learning_rate": 0.00019196630851118398,
+ "loss": 1.4163,
+ "step": 385
+ },
+ {
+ "epoch": 0.386108593041793,
+ "grad_norm": 0.5460125803947449,
+ "learning_rate": 0.0001919250925089278,
+ "loss": 1.2439,
+ "step": 386
+ },
+ {
+ "epoch": 0.38710887437091684,
+ "grad_norm": 0.6217851638793945,
+ "learning_rate": 0.00019188377549761963,
+ "loss": 1.6428,
+ "step": 387
+ },
+ {
+ "epoch": 0.3881091557000406,
+ "grad_norm": 0.7154502868652344,
+ "learning_rate": 0.00019184235752265928,
+ "loss": 1.3468,
+ "step": 388
+ },
+ {
+ "epoch": 0.38910943702916445,
+ "grad_norm": 0.5044635534286499,
+ "learning_rate": 0.00019180083862955772,
+ "loss": 1.1877,
+ "step": 389
+ },
+ {
+ "epoch": 0.3901097183582883,
+ "grad_norm": 0.5755971074104309,
+ "learning_rate": 0.00019175921886393666,
+ "loss": 1.3475,
+ "step": 390
+ },
+ {
+ "epoch": 0.39110999968741206,
+ "grad_norm": 0.6121137738227844,
+ "learning_rate": 0.00019171749827152869,
+ "loss": 1.4342,
+ "step": 391
+ },
+ {
+ "epoch": 0.3921102810165359,
+ "grad_norm": 0.5615536570549011,
+ "learning_rate": 0.0001916756768981772,
+ "loss": 1.5471,
+ "step": 392
+ },
+ {
+ "epoch": 0.3931105623456597,
+ "grad_norm": 0.6527026295661926,
+ "learning_rate": 0.00019163375478983632,
+ "loss": 1.6363,
+ "step": 393
+ },
+ {
+ "epoch": 0.39411084367478355,
+ "grad_norm": 0.6465044617652893,
+ "learning_rate": 0.00019159173199257085,
+ "loss": 1.3823,
+ "step": 394
+ },
+ {
+ "epoch": 0.3951111250039073,
+ "grad_norm": 0.5620000958442688,
+ "learning_rate": 0.00019154960855255628,
+ "loss": 1.5418,
+ "step": 395
+ },
+ {
+ "epoch": 0.39611140633303116,
+ "grad_norm": 0.7090588808059692,
+ "learning_rate": 0.0001915073845160786,
+ "loss": 1.4593,
+ "step": 396
+ },
+ {
+ "epoch": 0.397111687662155,
+ "grad_norm": 0.6644489169120789,
+ "learning_rate": 0.00019146505992953446,
+ "loss": 1.4236,
+ "step": 397
+ },
+ {
+ "epoch": 0.3981119689912788,
+ "grad_norm": 0.6038135886192322,
+ "learning_rate": 0.00019142263483943085,
+ "loss": 1.1805,
+ "step": 398
+ },
+ {
+ "epoch": 0.3991122503204026,
+ "grad_norm": 0.6746726036071777,
+ "learning_rate": 0.00019138010929238534,
+ "loss": 1.5264,
+ "step": 399
+ },
+ {
+ "epoch": 0.4001125316495264,
+ "grad_norm": 0.5871374607086182,
+ "learning_rate": 0.00019133748333512575,
+ "loss": 1.3709,
+ "step": 400
+ },
+ {
+ "epoch": 0.40111281297865026,
+ "grad_norm": 0.5743412375450134,
+ "learning_rate": 0.00019129475701449035,
+ "loss": 1.4677,
+ "step": 401
+ },
+ {
+ "epoch": 0.4021130943077741,
+ "grad_norm": 0.6184396743774414,
+ "learning_rate": 0.0001912519303774276,
+ "loss": 1.4228,
+ "step": 402
+ },
+ {
+ "epoch": 0.40311337563689786,
+ "grad_norm": 0.5872434973716736,
+ "learning_rate": 0.0001912090034709963,
+ "loss": 1.3495,
+ "step": 403
+ },
+ {
+ "epoch": 0.4041136569660217,
+ "grad_norm": 0.6500155925750732,
+ "learning_rate": 0.00019116597634236525,
+ "loss": 1.4315,
+ "step": 404
+ },
+ {
+ "epoch": 0.4051139382951455,
+ "grad_norm": 0.5240740180015564,
+ "learning_rate": 0.0001911228490388136,
+ "loss": 1.4954,
+ "step": 405
+ },
+ {
+ "epoch": 0.4061142196242693,
+ "grad_norm": 0.5531806945800781,
+ "learning_rate": 0.00019107962160773035,
+ "loss": 1.3949,
+ "step": 406
+ },
+ {
+ "epoch": 0.40711450095339313,
+ "grad_norm": 0.5266262888908386,
+ "learning_rate": 0.0001910362940966147,
+ "loss": 1.2859,
+ "step": 407
+ },
+ {
+ "epoch": 0.40811478228251696,
+ "grad_norm": 0.5734869241714478,
+ "learning_rate": 0.00019099286655307568,
+ "loss": 1.2451,
+ "step": 408
+ },
+ {
+ "epoch": 0.4091150636116408,
+ "grad_norm": 0.5922874212265015,
+ "learning_rate": 0.0001909493390248324,
+ "loss": 1.5429,
+ "step": 409
+ },
+ {
+ "epoch": 0.41011534494076457,
+ "grad_norm": 0.542540431022644,
+ "learning_rate": 0.00019090571155971366,
+ "loss": 1.4138,
+ "step": 410
+ },
+ {
+ "epoch": 0.4111156262698884,
+ "grad_norm": 0.57356196641922,
+ "learning_rate": 0.00019086198420565823,
+ "loss": 1.2592,
+ "step": 411
+ },
+ {
+ "epoch": 0.41211590759901223,
+ "grad_norm": 0.6042733192443848,
+ "learning_rate": 0.00019081815701071445,
+ "loss": 1.5524,
+ "step": 412
+ },
+ {
+ "epoch": 0.41311618892813606,
+ "grad_norm": 0.46550241112709045,
+ "learning_rate": 0.0001907742300230406,
+ "loss": 1.308,
+ "step": 413
+ },
+ {
+ "epoch": 0.41411647025725984,
+ "grad_norm": 0.6283137798309326,
+ "learning_rate": 0.00019073020329090444,
+ "loss": 1.4753,
+ "step": 414
+ },
+ {
+ "epoch": 0.41511675158638367,
+ "grad_norm": 0.5254876613616943,
+ "learning_rate": 0.0001906860768626834,
+ "loss": 1.2157,
+ "step": 415
+ },
+ {
+ "epoch": 0.4161170329155075,
+ "grad_norm": 0.59089195728302,
+ "learning_rate": 0.00019064185078686443,
+ "loss": 1.2684,
+ "step": 416
+ },
+ {
+ "epoch": 0.4171173142446313,
+ "grad_norm": 0.7129126787185669,
+ "learning_rate": 0.000190597525112044,
+ "loss": 1.3974,
+ "step": 417
+ },
+ {
+ "epoch": 0.4181175955737551,
+ "grad_norm": 0.607305109500885,
+ "learning_rate": 0.000190553099886928,
+ "loss": 1.4312,
+ "step": 418
+ },
+ {
+ "epoch": 0.41911787690287894,
+ "grad_norm": 0.49921515583992004,
+ "learning_rate": 0.00019050857516033173,
+ "loss": 1.3469,
+ "step": 419
+ },
+ {
+ "epoch": 0.42011815823200277,
+ "grad_norm": 0.6167325377464294,
+ "learning_rate": 0.00019046395098117983,
+ "loss": 1.4723,
+ "step": 420
+ },
+ {
+ "epoch": 0.42111843956112655,
+ "grad_norm": 0.6144593358039856,
+ "learning_rate": 0.00019041922739850616,
+ "loss": 1.5502,
+ "step": 421
+ },
+ {
+ "epoch": 0.4221187208902504,
+ "grad_norm": 0.61333167552948,
+ "learning_rate": 0.00019037440446145385,
+ "loss": 1.3283,
+ "step": 422
+ },
+ {
+ "epoch": 0.4231190022193742,
+ "grad_norm": 0.5881702303886414,
+ "learning_rate": 0.00019032948221927524,
+ "loss": 1.4206,
+ "step": 423
+ },
+ {
+ "epoch": 0.42411928354849804,
+ "grad_norm": 0.5334322452545166,
+ "learning_rate": 0.00019028446072133175,
+ "loss": 1.4603,
+ "step": 424
+ },
+ {
+ "epoch": 0.4251195648776218,
+ "grad_norm": 0.5730605721473694,
+ "learning_rate": 0.00019023934001709383,
+ "loss": 1.4375,
+ "step": 425
+ },
+ {
+ "epoch": 0.42611984620674564,
+ "grad_norm": 0.6227820515632629,
+ "learning_rate": 0.00019019412015614098,
+ "loss": 1.4888,
+ "step": 426
+ },
+ {
+ "epoch": 0.4271201275358695,
+ "grad_norm": 0.5811313390731812,
+ "learning_rate": 0.00019014880118816164,
+ "loss": 1.3492,
+ "step": 427
+ },
+ {
+ "epoch": 0.4281204088649933,
+ "grad_norm": 0.5685800313949585,
+ "learning_rate": 0.0001901033831629532,
+ "loss": 1.5052,
+ "step": 428
+ },
+ {
+ "epoch": 0.4291206901941171,
+ "grad_norm": 0.5961394309997559,
+ "learning_rate": 0.00019005786613042185,
+ "loss": 1.3324,
+ "step": 429
+ },
+ {
+ "epoch": 0.4301209715232409,
+ "grad_norm": 0.5845314860343933,
+ "learning_rate": 0.00019001225014058255,
+ "loss": 1.5733,
+ "step": 430
+ },
+ {
+ "epoch": 0.43112125285236474,
+ "grad_norm": 0.5400176048278809,
+ "learning_rate": 0.00018996653524355902,
+ "loss": 1.3973,
+ "step": 431
+ },
+ {
+ "epoch": 0.4321215341814885,
+ "grad_norm": 0.5462201833724976,
+ "learning_rate": 0.00018992072148958368,
+ "loss": 1.2167,
+ "step": 432
+ },
+ {
+ "epoch": 0.43312181551061235,
+ "grad_norm": 0.6200360059738159,
+ "learning_rate": 0.00018987480892899758,
+ "loss": 1.5596,
+ "step": 433
+ },
+ {
+ "epoch": 0.4341220968397362,
+ "grad_norm": 0.5230718851089478,
+ "learning_rate": 0.00018982879761225027,
+ "loss": 1.3661,
+ "step": 434
+ },
+ {
+ "epoch": 0.43512237816886,
+ "grad_norm": 0.5868643522262573,
+ "learning_rate": 0.00018978268758989991,
+ "loss": 1.4792,
+ "step": 435
+ },
+ {
+ "epoch": 0.4361226594979838,
+ "grad_norm": 0.580892026424408,
+ "learning_rate": 0.00018973647891261307,
+ "loss": 1.3275,
+ "step": 436
+ },
+ {
+ "epoch": 0.4371229408271076,
+ "grad_norm": 0.5903263688087463,
+ "learning_rate": 0.00018969017163116472,
+ "loss": 1.4721,
+ "step": 437
+ },
+ {
+ "epoch": 0.43812322215623145,
+ "grad_norm": 0.5108968019485474,
+ "learning_rate": 0.0001896437657964382,
+ "loss": 1.3785,
+ "step": 438
+ },
+ {
+ "epoch": 0.4391235034853553,
+ "grad_norm": 0.6707500219345093,
+ "learning_rate": 0.00018959726145942508,
+ "loss": 1.5033,
+ "step": 439
+ },
+ {
+ "epoch": 0.44012378481447906,
+ "grad_norm": 0.5793184638023376,
+ "learning_rate": 0.00018955065867122528,
+ "loss": 1.3629,
+ "step": 440
+ },
+ {
+ "epoch": 0.4411240661436029,
+ "grad_norm": 0.5549041628837585,
+ "learning_rate": 0.00018950395748304678,
+ "loss": 1.5557,
+ "step": 441
+ },
+ {
+ "epoch": 0.4421243474727267,
+ "grad_norm": 0.5406919121742249,
+ "learning_rate": 0.0001894571579462058,
+ "loss": 1.4441,
+ "step": 442
+ },
+ {
+ "epoch": 0.4431246288018505,
+ "grad_norm": 0.5131089091300964,
+ "learning_rate": 0.00018941026011212654,
+ "loss": 1.3051,
+ "step": 443
+ },
+ {
+ "epoch": 0.4441249101309743,
+ "grad_norm": 0.601586639881134,
+ "learning_rate": 0.00018936326403234125,
+ "loss": 1.5297,
+ "step": 444
+ },
+ {
+ "epoch": 0.44512519146009816,
+ "grad_norm": 0.5036457180976868,
+ "learning_rate": 0.00018931616975849006,
+ "loss": 1.357,
+ "step": 445
+ },
+ {
+ "epoch": 0.446125472789222,
+ "grad_norm": 0.5471266508102417,
+ "learning_rate": 0.00018926897734232115,
+ "loss": 1.2176,
+ "step": 446
+ },
+ {
+ "epoch": 0.44712575411834576,
+ "grad_norm": 0.6057867407798767,
+ "learning_rate": 0.0001892216868356904,
+ "loss": 1.4763,
+ "step": 447
+ },
+ {
+ "epoch": 0.4481260354474696,
+ "grad_norm": 0.5384593605995178,
+ "learning_rate": 0.0001891742982905615,
+ "loss": 1.513,
+ "step": 448
+ },
+ {
+ "epoch": 0.4491263167765934,
+ "grad_norm": 0.6144880056381226,
+ "learning_rate": 0.00018912681175900598,
+ "loss": 1.5782,
+ "step": 449
+ },
+ {
+ "epoch": 0.45012659810571726,
+ "grad_norm": 0.4838174879550934,
+ "learning_rate": 0.00018907922729320285,
+ "loss": 1.4085,
+ "step": 450
+ },
+ {
+ "epoch": 0.45112687943484103,
+ "grad_norm": 0.6852928400039673,
+ "learning_rate": 0.00018903154494543889,
+ "loss": 1.5989,
+ "step": 451
+ },
+ {
+ "epoch": 0.45212716076396486,
+ "grad_norm": 0.47527411580085754,
+ "learning_rate": 0.00018898376476810834,
+ "loss": 1.3409,
+ "step": 452
+ },
+ {
+ "epoch": 0.4531274420930887,
+ "grad_norm": 0.5665884613990784,
+ "learning_rate": 0.00018893588681371303,
+ "loss": 1.5395,
+ "step": 453
+ },
+ {
+ "epoch": 0.45412772342221247,
+ "grad_norm": 0.5792158246040344,
+ "learning_rate": 0.00018888791113486213,
+ "loss": 1.516,
+ "step": 454
+ },
+ {
+ "epoch": 0.4551280047513363,
+ "grad_norm": 0.5223523378372192,
+ "learning_rate": 0.00018883983778427227,
+ "loss": 1.3678,
+ "step": 455
+ },
+ {
+ "epoch": 0.45612828608046013,
+ "grad_norm": 0.5927590131759644,
+ "learning_rate": 0.0001887916668147673,
+ "loss": 1.3617,
+ "step": 456
+ },
+ {
+ "epoch": 0.45712856740958396,
+ "grad_norm": 0.7266496419906616,
+ "learning_rate": 0.00018874339827927846,
+ "loss": 1.3734,
+ "step": 457
+ },
+ {
+ "epoch": 0.45812884873870774,
+ "grad_norm": 0.6495805978775024,
+ "learning_rate": 0.00018869503223084414,
+ "loss": 1.5282,
+ "step": 458
+ },
+ {
+ "epoch": 0.45912913006783157,
+ "grad_norm": 0.6099816560745239,
+ "learning_rate": 0.00018864656872260985,
+ "loss": 1.4691,
+ "step": 459
+ },
+ {
+ "epoch": 0.4601294113969554,
+ "grad_norm": 0.5208227038383484,
+ "learning_rate": 0.00018859800780782828,
+ "loss": 1.3949,
+ "step": 460
+ },
+ {
+ "epoch": 0.46112969272607923,
+ "grad_norm": 0.5526600480079651,
+ "learning_rate": 0.000188549349539859,
+ "loss": 1.3557,
+ "step": 461
+ },
+ {
+ "epoch": 0.462129974055203,
+ "grad_norm": 0.5537740588188171,
+ "learning_rate": 0.00018850059397216876,
+ "loss": 1.4703,
+ "step": 462
+ },
+ {
+ "epoch": 0.46313025538432684,
+ "grad_norm": 0.5553976893424988,
+ "learning_rate": 0.00018845174115833099,
+ "loss": 1.4356,
+ "step": 463
+ },
+ {
+ "epoch": 0.46413053671345067,
+ "grad_norm": 0.6027779579162598,
+ "learning_rate": 0.0001884027911520262,
+ "loss": 1.4763,
+ "step": 464
+ },
+ {
+ "epoch": 0.4651308180425745,
+ "grad_norm": 0.5559154748916626,
+ "learning_rate": 0.00018835374400704154,
+ "loss": 1.4148,
+ "step": 465
+ },
+ {
+ "epoch": 0.4661310993716983,
+ "grad_norm": 0.6124109029769897,
+ "learning_rate": 0.00018830459977727096,
+ "loss": 1.4468,
+ "step": 466
+ },
+ {
+ "epoch": 0.4671313807008221,
+ "grad_norm": 0.4762580692768097,
+ "learning_rate": 0.0001882553585167151,
+ "loss": 1.3714,
+ "step": 467
+ },
+ {
+ "epoch": 0.46813166202994594,
+ "grad_norm": 0.5793487429618835,
+ "learning_rate": 0.00018820602027948114,
+ "loss": 1.4828,
+ "step": 468
+ },
+ {
+ "epoch": 0.4691319433590697,
+ "grad_norm": 0.55177241563797,
+ "learning_rate": 0.00018815658511978298,
+ "loss": 1.4157,
+ "step": 469
+ },
+ {
+ "epoch": 0.47013222468819355,
+ "grad_norm": 0.5065292716026306,
+ "learning_rate": 0.00018810705309194083,
+ "loss": 1.4519,
+ "step": 470
+ },
+ {
+ "epoch": 0.4711325060173174,
+ "grad_norm": 0.5401413440704346,
+ "learning_rate": 0.00018805742425038145,
+ "loss": 1.4344,
+ "step": 471
+ },
+ {
+ "epoch": 0.4721327873464412,
+ "grad_norm": 0.7173880338668823,
+ "learning_rate": 0.00018800769864963802,
+ "loss": 1.7325,
+ "step": 472
+ },
+ {
+ "epoch": 0.473133068675565,
+ "grad_norm": 0.507682204246521,
+ "learning_rate": 0.00018795787634434994,
+ "loss": 1.37,
+ "step": 473
+ },
+ {
+ "epoch": 0.4741333500046888,
+ "grad_norm": 0.551888644695282,
+ "learning_rate": 0.0001879079573892629,
+ "loss": 1.3695,
+ "step": 474
+ },
+ {
+ "epoch": 0.47513363133381264,
+ "grad_norm": 0.5109260082244873,
+ "learning_rate": 0.00018785794183922883,
+ "loss": 1.4001,
+ "step": 475
+ },
+ {
+ "epoch": 0.4761339126629365,
+ "grad_norm": 0.4565551280975342,
+ "learning_rate": 0.00018780782974920572,
+ "loss": 1.1752,
+ "step": 476
+ },
+ {
+ "epoch": 0.47713419399206025,
+ "grad_norm": 0.5651509761810303,
+ "learning_rate": 0.00018775762117425777,
+ "loss": 1.4291,
+ "step": 477
+ },
+ {
+ "epoch": 0.4781344753211841,
+ "grad_norm": 0.5827792286872864,
+ "learning_rate": 0.0001877073161695551,
+ "loss": 1.3438,
+ "step": 478
+ },
+ {
+ "epoch": 0.4791347566503079,
+ "grad_norm": 0.5719752907752991,
+ "learning_rate": 0.00018765691479037376,
+ "loss": 1.4683,
+ "step": 479
+ },
+ {
+ "epoch": 0.4801350379794317,
+ "grad_norm": 0.5153111815452576,
+ "learning_rate": 0.00018760641709209583,
+ "loss": 1.4392,
+ "step": 480
+ },
+ {
+ "epoch": 0.4811353193085555,
+ "grad_norm": 0.5455904603004456,
+ "learning_rate": 0.0001875558231302091,
+ "loss": 1.1603,
+ "step": 481
+ },
+ {
+ "epoch": 0.48213560063767935,
+ "grad_norm": 0.5857074856758118,
+ "learning_rate": 0.00018750513296030718,
+ "loss": 1.3099,
+ "step": 482
+ },
+ {
+ "epoch": 0.4831358819668032,
+ "grad_norm": 0.6051676273345947,
+ "learning_rate": 0.00018745434663808942,
+ "loss": 1.3587,
+ "step": 483
+ },
+ {
+ "epoch": 0.48413616329592696,
+ "grad_norm": 0.588749885559082,
+ "learning_rate": 0.0001874034642193608,
+ "loss": 1.5277,
+ "step": 484
+ },
+ {
+ "epoch": 0.4851364446250508,
+ "grad_norm": 0.5295410752296448,
+ "learning_rate": 0.0001873524857600319,
+ "loss": 1.2084,
+ "step": 485
+ },
+ {
+ "epoch": 0.4861367259541746,
+ "grad_norm": 0.5313368439674377,
+ "learning_rate": 0.00018730141131611882,
+ "loss": 1.4002,
+ "step": 486
+ },
+ {
+ "epoch": 0.48713700728329845,
+ "grad_norm": 0.5166353583335876,
+ "learning_rate": 0.00018725024094374315,
+ "loss": 1.208,
+ "step": 487
+ },
+ {
+ "epoch": 0.4881372886124222,
+ "grad_norm": 0.5478363037109375,
+ "learning_rate": 0.00018719897469913184,
+ "loss": 1.3236,
+ "step": 488
+ },
+ {
+ "epoch": 0.48913756994154606,
+ "grad_norm": 0.5531913042068481,
+ "learning_rate": 0.00018714761263861728,
+ "loss": 1.4938,
+ "step": 489
+ },
+ {
+ "epoch": 0.4901378512706699,
+ "grad_norm": 0.5334530472755432,
+ "learning_rate": 0.000187096154818637,
+ "loss": 1.4172,
+ "step": 490
+ },
+ {
+ "epoch": 0.4911381325997937,
+ "grad_norm": 0.5667001605033875,
+ "learning_rate": 0.00018704460129573391,
+ "loss": 1.3517,
+ "step": 491
+ },
+ {
+ "epoch": 0.4921384139289175,
+ "grad_norm": 0.5568780303001404,
+ "learning_rate": 0.00018699295212655596,
+ "loss": 1.4287,
+ "step": 492
+ },
+ {
+ "epoch": 0.4931386952580413,
+ "grad_norm": 0.6663610935211182,
+ "learning_rate": 0.00018694120736785632,
+ "loss": 1.5416,
+ "step": 493
+ },
+ {
+ "epoch": 0.49413897658716516,
+ "grad_norm": 0.5753045082092285,
+ "learning_rate": 0.00018688936707649304,
+ "loss": 1.5552,
+ "step": 494
+ },
+ {
+ "epoch": 0.49513925791628893,
+ "grad_norm": 0.5707410573959351,
+ "learning_rate": 0.00018683743130942928,
+ "loss": 1.5332,
+ "step": 495
+ },
+ {
+ "epoch": 0.49613953924541276,
+ "grad_norm": 0.5847951173782349,
+ "learning_rate": 0.00018678540012373302,
+ "loss": 1.3488,
+ "step": 496
+ },
+ {
+ "epoch": 0.4971398205745366,
+ "grad_norm": 0.60503751039505,
+ "learning_rate": 0.00018673327357657715,
+ "loss": 1.3924,
+ "step": 497
+ },
+ {
+ "epoch": 0.4981401019036604,
+ "grad_norm": 0.635142982006073,
+ "learning_rate": 0.0001866810517252393,
+ "loss": 1.4392,
+ "step": 498
+ },
+ {
+ "epoch": 0.4991403832327842,
+ "grad_norm": 0.5536782741546631,
+ "learning_rate": 0.00018662873462710184,
+ "loss": 1.286,
+ "step": 499
+ },
+ {
+ "epoch": 0.5001406645619081,
+ "grad_norm": 0.5676659345626831,
+ "learning_rate": 0.0001865763223396518,
+ "loss": 1.3006,
+ "step": 500
+ },
+ {
+ "epoch": 0.5011409458910319,
+ "grad_norm": 0.5546663403511047,
+ "learning_rate": 0.00018652381492048083,
+ "loss": 1.418,
+ "step": 501
+ },
+ {
+ "epoch": 0.5021412272201556,
+ "grad_norm": 0.5137162804603577,
+ "learning_rate": 0.00018647121242728506,
+ "loss": 1.3173,
+ "step": 502
+ },
+ {
+ "epoch": 0.5031415085492795,
+ "grad_norm": 0.5474348068237305,
+ "learning_rate": 0.00018641851491786512,
+ "loss": 1.6652,
+ "step": 503
+ },
+ {
+ "epoch": 0.5041417898784033,
+ "grad_norm": 0.5563383102416992,
+ "learning_rate": 0.00018636572245012606,
+ "loss": 1.4519,
+ "step": 504
+ },
+ {
+ "epoch": 0.5051420712075271,
+ "grad_norm": 0.5621083974838257,
+ "learning_rate": 0.00018631283508207725,
+ "loss": 1.5418,
+ "step": 505
+ },
+ {
+ "epoch": 0.506142352536651,
+ "grad_norm": 0.49915972352027893,
+ "learning_rate": 0.00018625985287183233,
+ "loss": 1.2969,
+ "step": 506
+ },
+ {
+ "epoch": 0.5071426338657747,
+ "grad_norm": 0.601996660232544,
+ "learning_rate": 0.00018620677587760916,
+ "loss": 1.4483,
+ "step": 507
+ },
+ {
+ "epoch": 0.5081429151948985,
+ "grad_norm": 0.5594652891159058,
+ "learning_rate": 0.00018615360415772978,
+ "loss": 1.4094,
+ "step": 508
+ },
+ {
+ "epoch": 0.5091431965240224,
+ "grad_norm": 0.557381808757782,
+ "learning_rate": 0.00018610033777062025,
+ "loss": 1.216,
+ "step": 509
+ },
+ {
+ "epoch": 0.5101434778531462,
+ "grad_norm": 0.5841740369796753,
+ "learning_rate": 0.0001860469767748108,
+ "loss": 1.4924,
+ "step": 510
+ },
+ {
+ "epoch": 0.5111437591822701,
+ "grad_norm": 0.4968324899673462,
+ "learning_rate": 0.00018599352122893539,
+ "loss": 1.2474,
+ "step": 511
+ },
+ {
+ "epoch": 0.5121440405113938,
+ "grad_norm": 0.5390318632125854,
+ "learning_rate": 0.00018593997119173205,
+ "loss": 1.4484,
+ "step": 512
+ },
+ {
+ "epoch": 0.5131443218405176,
+ "grad_norm": 0.6626128554344177,
+ "learning_rate": 0.00018588632672204264,
+ "loss": 1.5664,
+ "step": 513
+ },
+ {
+ "epoch": 0.5141446031696415,
+ "grad_norm": 0.6183133721351624,
+ "learning_rate": 0.0001858325878788126,
+ "loss": 1.5603,
+ "step": 514
+ },
+ {
+ "epoch": 0.5151448844987653,
+ "grad_norm": 0.5574773550033569,
+ "learning_rate": 0.00018577875472109134,
+ "loss": 1.3668,
+ "step": 515
+ },
+ {
+ "epoch": 0.516145165827889,
+ "grad_norm": 0.5127518773078918,
+ "learning_rate": 0.0001857248273080317,
+ "loss": 1.264,
+ "step": 516
+ },
+ {
+ "epoch": 0.5171454471570129,
+ "grad_norm": 0.6540619134902954,
+ "learning_rate": 0.00018567080569889015,
+ "loss": 1.3091,
+ "step": 517
+ },
+ {
+ "epoch": 0.5181457284861367,
+ "grad_norm": 0.5286336541175842,
+ "learning_rate": 0.00018561668995302667,
+ "loss": 1.3581,
+ "step": 518
+ },
+ {
+ "epoch": 0.5191460098152605,
+ "grad_norm": 0.6609972715377808,
+ "learning_rate": 0.00018556248012990468,
+ "loss": 1.3123,
+ "step": 519
+ },
+ {
+ "epoch": 0.5201462911443844,
+ "grad_norm": 0.48230236768722534,
+ "learning_rate": 0.000185508176289091,
+ "loss": 1.2372,
+ "step": 520
+ },
+ {
+ "epoch": 0.5211465724735082,
+ "grad_norm": 0.5173765420913696,
+ "learning_rate": 0.00018545377849025566,
+ "loss": 1.327,
+ "step": 521
+ },
+ {
+ "epoch": 0.522146853802632,
+ "grad_norm": 0.5822583436965942,
+ "learning_rate": 0.0001853992867931721,
+ "loss": 1.3851,
+ "step": 522
+ },
+ {
+ "epoch": 0.5231471351317558,
+ "grad_norm": 0.6025621891021729,
+ "learning_rate": 0.00018534470125771674,
+ "loss": 1.5627,
+ "step": 523
+ },
+ {
+ "epoch": 0.5241474164608796,
+ "grad_norm": 0.5516778230667114,
+ "learning_rate": 0.0001852900219438693,
+ "loss": 1.4036,
+ "step": 524
+ },
+ {
+ "epoch": 0.5251476977900035,
+ "grad_norm": 0.5738380551338196,
+ "learning_rate": 0.0001852352489117124,
+ "loss": 1.5042,
+ "step": 525
+ },
+ {
+ "epoch": 0.5261479791191273,
+ "grad_norm": 0.6360776424407959,
+ "learning_rate": 0.00018518038222143174,
+ "loss": 1.4101,
+ "step": 526
+ },
+ {
+ "epoch": 0.527148260448251,
+ "grad_norm": 0.5776675939559937,
+ "learning_rate": 0.00018512542193331583,
+ "loss": 1.6015,
+ "step": 527
+ },
+ {
+ "epoch": 0.5281485417773749,
+ "grad_norm": 0.5662726759910583,
+ "learning_rate": 0.00018507036810775615,
+ "loss": 1.3186,
+ "step": 528
+ },
+ {
+ "epoch": 0.5291488231064987,
+ "grad_norm": 0.6518335938453674,
+ "learning_rate": 0.00018501522080524688,
+ "loss": 1.4882,
+ "step": 529
+ },
+ {
+ "epoch": 0.5301491044356225,
+ "grad_norm": 0.5475590825080872,
+ "learning_rate": 0.0001849599800863849,
+ "loss": 1.487,
+ "step": 530
+ },
+ {
+ "epoch": 0.5311493857647464,
+ "grad_norm": 0.6275209188461304,
+ "learning_rate": 0.0001849046460118698,
+ "loss": 1.3563,
+ "step": 531
+ },
+ {
+ "epoch": 0.5321496670938701,
+ "grad_norm": 0.5629132390022278,
+ "learning_rate": 0.0001848492186425037,
+ "loss": 1.516,
+ "step": 532
+ },
+ {
+ "epoch": 0.533149948422994,
+ "grad_norm": 0.5251057744026184,
+ "learning_rate": 0.0001847936980391913,
+ "loss": 1.5254,
+ "step": 533
+ },
+ {
+ "epoch": 0.5341502297521178,
+ "grad_norm": 0.5635396838188171,
+ "learning_rate": 0.00018473808426293964,
+ "loss": 1.3408,
+ "step": 534
+ },
+ {
+ "epoch": 0.5351505110812416,
+ "grad_norm": 0.527082622051239,
+ "learning_rate": 0.00018468237737485823,
+ "loss": 1.2664,
+ "step": 535
+ },
+ {
+ "epoch": 0.5361507924103655,
+ "grad_norm": 0.6555044054985046,
+ "learning_rate": 0.00018462657743615888,
+ "loss": 1.464,
+ "step": 536
+ },
+ {
+ "epoch": 0.5371510737394892,
+ "grad_norm": 0.5468676686286926,
+ "learning_rate": 0.00018457068450815562,
+ "loss": 1.3733,
+ "step": 537
+ },
+ {
+ "epoch": 0.538151355068613,
+ "grad_norm": 0.5662835836410522,
+ "learning_rate": 0.00018451469865226464,
+ "loss": 1.509,
+ "step": 538
+ },
+ {
+ "epoch": 0.5391516363977369,
+ "grad_norm": 0.5553548336029053,
+ "learning_rate": 0.00018445861993000436,
+ "loss": 1.2476,
+ "step": 539
+ },
+ {
+ "epoch": 0.5401519177268607,
+ "grad_norm": 0.6240925192832947,
+ "learning_rate": 0.00018440244840299506,
+ "loss": 1.5835,
+ "step": 540
+ },
+ {
+ "epoch": 0.5411521990559846,
+ "grad_norm": 0.6107541918754578,
+ "learning_rate": 0.0001843461841329591,
+ "loss": 1.7176,
+ "step": 541
+ },
+ {
+ "epoch": 0.5421524803851083,
+ "grad_norm": 0.6990326642990112,
+ "learning_rate": 0.0001842898271817208,
+ "loss": 1.4235,
+ "step": 542
+ },
+ {
+ "epoch": 0.5431527617142321,
+ "grad_norm": 0.583871603012085,
+ "learning_rate": 0.00018423337761120618,
+ "loss": 1.5283,
+ "step": 543
+ },
+ {
+ "epoch": 0.544153043043356,
+ "grad_norm": 0.5585455894470215,
+ "learning_rate": 0.00018417683548344318,
+ "loss": 1.4875,
+ "step": 544
+ },
+ {
+ "epoch": 0.5451533243724798,
+ "grad_norm": 0.5199955701828003,
+ "learning_rate": 0.00018412020086056133,
+ "loss": 1.3989,
+ "step": 545
+ },
+ {
+ "epoch": 0.5461536057016035,
+ "grad_norm": 0.5517343878746033,
+ "learning_rate": 0.0001840634738047918,
+ "loss": 1.4073,
+ "step": 546
+ },
+ {
+ "epoch": 0.5471538870307274,
+ "grad_norm": 0.7140716314315796,
+ "learning_rate": 0.0001840066543784675,
+ "loss": 1.4477,
+ "step": 547
+ },
+ {
+ "epoch": 0.5481541683598512,
+ "grad_norm": 0.548422634601593,
+ "learning_rate": 0.00018394974264402257,
+ "loss": 1.4198,
+ "step": 548
+ },
+ {
+ "epoch": 0.549154449688975,
+ "grad_norm": 0.5907624363899231,
+ "learning_rate": 0.00018389273866399275,
+ "loss": 1.4033,
+ "step": 549
+ },
+ {
+ "epoch": 0.5501547310180989,
+ "grad_norm": 0.5327603220939636,
+ "learning_rate": 0.00018383564250101512,
+ "loss": 1.2674,
+ "step": 550
+ },
+ {
+ "epoch": 0.5511550123472226,
+ "grad_norm": 0.4678132236003876,
+ "learning_rate": 0.000183778454217828,
+ "loss": 1.3644,
+ "step": 551
+ },
+ {
+ "epoch": 0.5521552936763465,
+ "grad_norm": 0.674040675163269,
+ "learning_rate": 0.0001837211738772711,
+ "loss": 1.6942,
+ "step": 552
+ },
+ {
+ "epoch": 0.5531555750054703,
+ "grad_norm": 0.5374539494514465,
+ "learning_rate": 0.000183663801542285,
+ "loss": 1.1887,
+ "step": 553
+ },
+ {
+ "epoch": 0.5541558563345941,
+ "grad_norm": 0.5528072118759155,
+ "learning_rate": 0.00018360633727591155,
+ "loss": 1.2,
+ "step": 554
+ },
+ {
+ "epoch": 0.555156137663718,
+ "grad_norm": 0.6597411632537842,
+ "learning_rate": 0.00018354878114129367,
+ "loss": 1.402,
+ "step": 555
+ },
+ {
+ "epoch": 0.5561564189928417,
+ "grad_norm": 0.5931501388549805,
+ "learning_rate": 0.00018349113320167504,
+ "loss": 1.5583,
+ "step": 556
+ },
+ {
+ "epoch": 0.5571567003219655,
+ "grad_norm": 0.6331121921539307,
+ "learning_rate": 0.00018343339352040042,
+ "loss": 1.7882,
+ "step": 557
+ },
+ {
+ "epoch": 0.5581569816510894,
+ "grad_norm": 0.5221824645996094,
+ "learning_rate": 0.00018337556216091517,
+ "loss": 1.2457,
+ "step": 558
+ },
+ {
+ "epoch": 0.5591572629802132,
+ "grad_norm": 0.6008853912353516,
+ "learning_rate": 0.00018331763918676556,
+ "loss": 1.5916,
+ "step": 559
+ },
+ {
+ "epoch": 0.560157544309337,
+ "grad_norm": 0.5409006476402283,
+ "learning_rate": 0.00018325962466159848,
+ "loss": 1.3457,
+ "step": 560
+ },
+ {
+ "epoch": 0.5611578256384608,
+ "grad_norm": 0.5095859169960022,
+ "learning_rate": 0.00018320151864916135,
+ "loss": 1.3622,
+ "step": 561
+ },
+ {
+ "epoch": 0.5621581069675846,
+ "grad_norm": 0.5716331005096436,
+ "learning_rate": 0.00018314332121330225,
+ "loss": 1.6168,
+ "step": 562
+ },
+ {
+ "epoch": 0.5631583882967085,
+ "grad_norm": 0.600307047367096,
+ "learning_rate": 0.0001830850324179695,
+ "loss": 1.4117,
+ "step": 563
+ },
+ {
+ "epoch": 0.5641586696258323,
+ "grad_norm": 0.7528484463691711,
+ "learning_rate": 0.00018302665232721208,
+ "loss": 1.3418,
+ "step": 564
+ },
+ {
+ "epoch": 0.565158950954956,
+ "grad_norm": 0.6119087338447571,
+ "learning_rate": 0.0001829681810051791,
+ "loss": 1.4908,
+ "step": 565
+ },
+ {
+ "epoch": 0.5661592322840799,
+ "grad_norm": 0.6440190672874451,
+ "learning_rate": 0.00018290961851611995,
+ "loss": 1.3511,
+ "step": 566
+ },
+ {
+ "epoch": 0.5671595136132037,
+ "grad_norm": 0.647294282913208,
+ "learning_rate": 0.00018285096492438424,
+ "loss": 1.5165,
+ "step": 567
+ },
+ {
+ "epoch": 0.5681597949423275,
+ "grad_norm": 0.5499668717384338,
+ "learning_rate": 0.00018279222029442163,
+ "loss": 1.2876,
+ "step": 568
+ },
+ {
+ "epoch": 0.5691600762714514,
+ "grad_norm": 0.5629482865333557,
+ "learning_rate": 0.00018273338469078186,
+ "loss": 1.2256,
+ "step": 569
+ },
+ {
+ "epoch": 0.5701603576005752,
+ "grad_norm": 0.48661288619041443,
+ "learning_rate": 0.00018267445817811466,
+ "loss": 1.44,
+ "step": 570
+ },
+ {
+ "epoch": 0.5711606389296989,
+ "grad_norm": 0.5713567733764648,
+ "learning_rate": 0.00018261544082116954,
+ "loss": 1.741,
+ "step": 571
+ },
+ {
+ "epoch": 0.5721609202588228,
+ "grad_norm": 0.6130850315093994,
+ "learning_rate": 0.00018255633268479595,
+ "loss": 1.526,
+ "step": 572
+ },
+ {
+ "epoch": 0.5731612015879466,
+ "grad_norm": 0.5415536761283875,
+ "learning_rate": 0.00018249713383394303,
+ "loss": 1.2405,
+ "step": 573
+ },
+ {
+ "epoch": 0.5741614829170705,
+ "grad_norm": 0.600574791431427,
+ "learning_rate": 0.0001824378443336596,
+ "loss": 1.4534,
+ "step": 574
+ },
+ {
+ "epoch": 0.5751617642461943,
+ "grad_norm": 0.5479387044906616,
+ "learning_rate": 0.00018237846424909413,
+ "loss": 1.4277,
+ "step": 575
+ },
+ {
+ "epoch": 0.576162045575318,
+ "grad_norm": 0.5536132454872131,
+ "learning_rate": 0.00018231899364549455,
+ "loss": 1.3918,
+ "step": 576
+ },
+ {
+ "epoch": 0.5771623269044419,
+ "grad_norm": 0.6228598356246948,
+ "learning_rate": 0.00018225943258820833,
+ "loss": 1.413,
+ "step": 577
+ },
+ {
+ "epoch": 0.5781626082335657,
+ "grad_norm": 0.5498123168945312,
+ "learning_rate": 0.00018219978114268227,
+ "loss": 1.3558,
+ "step": 578
+ },
+ {
+ "epoch": 0.5791628895626895,
+ "grad_norm": 0.5427498817443848,
+ "learning_rate": 0.00018214003937446253,
+ "loss": 1.509,
+ "step": 579
+ },
+ {
+ "epoch": 0.5801631708918134,
+ "grad_norm": 0.522285521030426,
+ "learning_rate": 0.00018208020734919455,
+ "loss": 1.3847,
+ "step": 580
+ },
+ {
+ "epoch": 0.5811634522209371,
+ "grad_norm": 0.5963860750198364,
+ "learning_rate": 0.00018202028513262288,
+ "loss": 1.4605,
+ "step": 581
+ },
+ {
+ "epoch": 0.5821637335500609,
+ "grad_norm": 0.4854499101638794,
+ "learning_rate": 0.00018196027279059117,
+ "loss": 1.4968,
+ "step": 582
+ },
+ {
+ "epoch": 0.5831640148791848,
+ "grad_norm": 0.503466010093689,
+ "learning_rate": 0.00018190017038904215,
+ "loss": 1.2568,
+ "step": 583
+ },
+ {
+ "epoch": 0.5841642962083086,
+ "grad_norm": 0.6027483940124512,
+ "learning_rate": 0.0001818399779940175,
+ "loss": 1.5744,
+ "step": 584
+ },
+ {
+ "epoch": 0.5851645775374325,
+ "grad_norm": 0.5450258851051331,
+ "learning_rate": 0.0001817796956716578,
+ "loss": 1.2672,
+ "step": 585
+ },
+ {
+ "epoch": 0.5861648588665562,
+ "grad_norm": 0.5376724600791931,
+ "learning_rate": 0.00018171932348820234,
+ "loss": 1.5099,
+ "step": 586
+ },
+ {
+ "epoch": 0.58716514019568,
+ "grad_norm": 0.513921856880188,
+ "learning_rate": 0.0001816588615099893,
+ "loss": 1.3213,
+ "step": 587
+ },
+ {
+ "epoch": 0.5881654215248039,
+ "grad_norm": 0.7540159225463867,
+ "learning_rate": 0.00018159830980345548,
+ "loss": 1.2231,
+ "step": 588
+ },
+ {
+ "epoch": 0.5891657028539277,
+ "grad_norm": 0.5917702317237854,
+ "learning_rate": 0.0001815376684351362,
+ "loss": 1.6094,
+ "step": 589
+ },
+ {
+ "epoch": 0.5901659841830514,
+ "grad_norm": 0.5507463216781616,
+ "learning_rate": 0.00018147693747166534,
+ "loss": 1.3904,
+ "step": 590
+ },
+ {
+ "epoch": 0.5911662655121753,
+ "grad_norm": 0.545695960521698,
+ "learning_rate": 0.00018141611697977529,
+ "loss": 1.5172,
+ "step": 591
+ },
+ {
+ "epoch": 0.5921665468412991,
+ "grad_norm": 0.5876530408859253,
+ "learning_rate": 0.00018135520702629675,
+ "loss": 1.3676,
+ "step": 592
+ },
+ {
+ "epoch": 0.5931668281704229,
+ "grad_norm": 0.5510894060134888,
+ "learning_rate": 0.0001812942076781588,
+ "loss": 1.4379,
+ "step": 593
+ },
+ {
+ "epoch": 0.5941671094995468,
+ "grad_norm": 0.5105913877487183,
+ "learning_rate": 0.0001812331190023886,
+ "loss": 1.3687,
+ "step": 594
+ },
+ {
+ "epoch": 0.5951673908286705,
+ "grad_norm": 0.47876060009002686,
+ "learning_rate": 0.0001811719410661116,
+ "loss": 1.3178,
+ "step": 595
+ },
+ {
+ "epoch": 0.5961676721577944,
+ "grad_norm": 0.6079074144363403,
+ "learning_rate": 0.00018111067393655132,
+ "loss": 1.4713,
+ "step": 596
+ },
+ {
+ "epoch": 0.5971679534869182,
+ "grad_norm": 0.5363487601280212,
+ "learning_rate": 0.0001810493176810292,
+ "loss": 1.1868,
+ "step": 597
+ },
+ {
+ "epoch": 0.598168234816042,
+ "grad_norm": 0.5252292156219482,
+ "learning_rate": 0.00018098787236696474,
+ "loss": 1.303,
+ "step": 598
+ },
+ {
+ "epoch": 0.5991685161451659,
+ "grad_norm": 0.5377137064933777,
+ "learning_rate": 0.00018092633806187513,
+ "loss": 1.3653,
+ "step": 599
+ },
+ {
+ "epoch": 0.6001687974742896,
+ "grad_norm": 0.5274302363395691,
+ "learning_rate": 0.0001808647148333755,
+ "loss": 1.3693,
+ "step": 600
+ },
+ {
+ "epoch": 0.6011690788034134,
+ "grad_norm": 0.5664658546447754,
+ "learning_rate": 0.00018080300274917862,
+ "loss": 1.3807,
+ "step": 601
+ },
+ {
+ "epoch": 0.6021693601325373,
+ "grad_norm": 0.6609538197517395,
+ "learning_rate": 0.00018074120187709495,
+ "loss": 1.5015,
+ "step": 602
+ },
+ {
+ "epoch": 0.6031696414616611,
+ "grad_norm": 0.4943195879459381,
+ "learning_rate": 0.00018067931228503246,
+ "loss": 1.4436,
+ "step": 603
+ },
+ {
+ "epoch": 0.604169922790785,
+ "grad_norm": 0.549712598323822,
+ "learning_rate": 0.00018061733404099655,
+ "loss": 1.455,
+ "step": 604
+ },
+ {
+ "epoch": 0.6051702041199087,
+ "grad_norm": 0.5765941143035889,
+ "learning_rate": 0.00018055526721309016,
+ "loss": 1.3317,
+ "step": 605
+ },
+ {
+ "epoch": 0.6061704854490325,
+ "grad_norm": 0.5223068594932556,
+ "learning_rate": 0.0001804931118695135,
+ "loss": 1.3456,
+ "step": 606
+ },
+ {
+ "epoch": 0.6071707667781564,
+ "grad_norm": 0.5385129451751709,
+ "learning_rate": 0.00018043086807856403,
+ "loss": 1.3388,
+ "step": 607
+ },
+ {
+ "epoch": 0.6081710481072802,
+ "grad_norm": 0.5244528651237488,
+ "learning_rate": 0.00018036853590863648,
+ "loss": 1.398,
+ "step": 608
+ },
+ {
+ "epoch": 0.609171329436404,
+ "grad_norm": 0.5274112224578857,
+ "learning_rate": 0.00018030611542822257,
+ "loss": 1.3105,
+ "step": 609
+ },
+ {
+ "epoch": 0.6101716107655278,
+ "grad_norm": 0.5351893305778503,
+ "learning_rate": 0.00018024360670591114,
+ "loss": 1.3128,
+ "step": 610
+ },
+ {
+ "epoch": 0.6111718920946516,
+ "grad_norm": 0.5729460120201111,
+ "learning_rate": 0.00018018100981038798,
+ "loss": 1.3606,
+ "step": 611
+ },
+ {
+ "epoch": 0.6121721734237754,
+ "grad_norm": 0.5494408011436462,
+ "learning_rate": 0.00018011832481043576,
+ "loss": 1.4517,
+ "step": 612
+ },
+ {
+ "epoch": 0.6131724547528993,
+ "grad_norm": 0.5205882787704468,
+ "learning_rate": 0.00018005555177493394,
+ "loss": 1.4943,
+ "step": 613
+ },
+ {
+ "epoch": 0.614172736082023,
+ "grad_norm": 0.5488479137420654,
+ "learning_rate": 0.00017999269077285875,
+ "loss": 1.3939,
+ "step": 614
+ },
+ {
+ "epoch": 0.6151730174111469,
+ "grad_norm": 0.5779786109924316,
+ "learning_rate": 0.00017992974187328305,
+ "loss": 1.5744,
+ "step": 615
+ },
+ {
+ "epoch": 0.6161732987402707,
+ "grad_norm": 0.5576769113540649,
+ "learning_rate": 0.00017986670514537627,
+ "loss": 1.2284,
+ "step": 616
+ },
+ {
+ "epoch": 0.6171735800693945,
+ "grad_norm": 0.4912784993648529,
+ "learning_rate": 0.00017980358065840444,
+ "loss": 1.292,
+ "step": 617
+ },
+ {
+ "epoch": 0.6181738613985184,
+ "grad_norm": 0.657666027545929,
+ "learning_rate": 0.0001797403684817299,
+ "loss": 1.4918,
+ "step": 618
+ },
+ {
+ "epoch": 0.6191741427276422,
+ "grad_norm": 0.5642833113670349,
+ "learning_rate": 0.00017967706868481144,
+ "loss": 1.4718,
+ "step": 619
+ },
+ {
+ "epoch": 0.6201744240567659,
+ "grad_norm": 0.7243106961250305,
+ "learning_rate": 0.00017961368133720407,
+ "loss": 1.4342,
+ "step": 620
+ },
+ {
+ "epoch": 0.6211747053858898,
+ "grad_norm": 0.4982456564903259,
+ "learning_rate": 0.000179550206508559,
+ "loss": 1.4478,
+ "step": 621
+ },
+ {
+ "epoch": 0.6221749867150136,
+ "grad_norm": 0.5249592065811157,
+ "learning_rate": 0.00017948664426862364,
+ "loss": 1.485,
+ "step": 622
+ },
+ {
+ "epoch": 0.6231752680441374,
+ "grad_norm": 0.6167681217193604,
+ "learning_rate": 0.00017942299468724134,
+ "loss": 1.4813,
+ "step": 623
+ },
+ {
+ "epoch": 0.6241755493732613,
+ "grad_norm": 0.5300460457801819,
+ "learning_rate": 0.0001793592578343515,
+ "loss": 1.1364,
+ "step": 624
+ },
+ {
+ "epoch": 0.625175830702385,
+ "grad_norm": 0.5908417105674744,
+ "learning_rate": 0.0001792954337799894,
+ "loss": 1.4402,
+ "step": 625
+ },
+ {
+ "epoch": 0.6261761120315089,
+ "grad_norm": 0.5684035420417786,
+ "learning_rate": 0.00017923152259428612,
+ "loss": 1.4847,
+ "step": 626
+ },
+ {
+ "epoch": 0.6271763933606327,
+ "grad_norm": 0.5421493053436279,
+ "learning_rate": 0.00017916752434746856,
+ "loss": 1.3348,
+ "step": 627
+ },
+ {
+ "epoch": 0.6281766746897565,
+ "grad_norm": 0.5295160412788391,
+ "learning_rate": 0.0001791034391098591,
+ "loss": 1.4703,
+ "step": 628
+ },
+ {
+ "epoch": 0.6291769560188804,
+ "grad_norm": 0.5196051001548767,
+ "learning_rate": 0.00017903926695187595,
+ "loss": 1.3478,
+ "step": 629
+ },
+ {
+ "epoch": 0.6301772373480041,
+ "grad_norm": 0.4994469881057739,
+ "learning_rate": 0.0001789750079440326,
+ "loss": 1.2368,
+ "step": 630
+ },
+ {
+ "epoch": 0.6311775186771279,
+ "grad_norm": 0.5117055177688599,
+ "learning_rate": 0.00017891066215693817,
+ "loss": 1.3429,
+ "step": 631
+ },
+ {
+ "epoch": 0.6321778000062518,
+ "grad_norm": 0.49438026547431946,
+ "learning_rate": 0.00017884622966129695,
+ "loss": 1.301,
+ "step": 632
+ },
+ {
+ "epoch": 0.6331780813353756,
+ "grad_norm": 0.6113334894180298,
+ "learning_rate": 0.00017878171052790868,
+ "loss": 1.4636,
+ "step": 633
+ },
+ {
+ "epoch": 0.6341783626644993,
+ "grad_norm": 0.6063141822814941,
+ "learning_rate": 0.00017871710482766817,
+ "loss": 1.2262,
+ "step": 634
+ },
+ {
+ "epoch": 0.6351786439936232,
+ "grad_norm": 0.5604403614997864,
+ "learning_rate": 0.00017865241263156546,
+ "loss": 1.4112,
+ "step": 635
+ },
+ {
+ "epoch": 0.636178925322747,
+ "grad_norm": 0.523415207862854,
+ "learning_rate": 0.0001785876340106855,
+ "loss": 1.3281,
+ "step": 636
+ },
+ {
+ "epoch": 0.6371792066518709,
+ "grad_norm": 0.5602991580963135,
+ "learning_rate": 0.0001785227690362083,
+ "loss": 1.44,
+ "step": 637
+ },
+ {
+ "epoch": 0.6381794879809947,
+ "grad_norm": 0.46946853399276733,
+ "learning_rate": 0.00017845781777940878,
+ "loss": 1.2956,
+ "step": 638
+ },
+ {
+ "epoch": 0.6391797693101184,
+ "grad_norm": 0.5586503744125366,
+ "learning_rate": 0.00017839278031165658,
+ "loss": 1.5419,
+ "step": 639
+ },
+ {
+ "epoch": 0.6401800506392423,
+ "grad_norm": 0.5270752310752869,
+ "learning_rate": 0.00017832765670441612,
+ "loss": 1.305,
+ "step": 640
+ },
+ {
+ "epoch": 0.6411803319683661,
+ "grad_norm": 0.57756108045578,
+ "learning_rate": 0.0001782624470292465,
+ "loss": 1.2145,
+ "step": 641
+ },
+ {
+ "epoch": 0.6421806132974899,
+ "grad_norm": 0.5709058046340942,
+ "learning_rate": 0.0001781971513578013,
+ "loss": 1.4804,
+ "step": 642
+ },
+ {
+ "epoch": 0.6431808946266138,
+ "grad_norm": 0.505849301815033,
+ "learning_rate": 0.00017813176976182873,
+ "loss": 1.3964,
+ "step": 643
+ },
+ {
+ "epoch": 0.6441811759557375,
+ "grad_norm": 0.5171617269515991,
+ "learning_rate": 0.00017806630231317127,
+ "loss": 1.3283,
+ "step": 644
+ },
+ {
+ "epoch": 0.6451814572848613,
+ "grad_norm": 0.5567512512207031,
+ "learning_rate": 0.00017800074908376584,
+ "loss": 1.481,
+ "step": 645
+ },
+ {
+ "epoch": 0.6461817386139852,
+ "grad_norm": 0.5000666379928589,
+ "learning_rate": 0.00017793511014564358,
+ "loss": 1.2856,
+ "step": 646
+ },
+ {
+ "epoch": 0.647182019943109,
+ "grad_norm": 0.49550777673721313,
+ "learning_rate": 0.00017786938557092983,
+ "loss": 1.3447,
+ "step": 647
+ },
+ {
+ "epoch": 0.6481823012722329,
+ "grad_norm": 0.5904624462127686,
+ "learning_rate": 0.00017780357543184397,
+ "loss": 1.241,
+ "step": 648
+ },
+ {
+ "epoch": 0.6491825826013566,
+ "grad_norm": 0.4615901708602905,
+ "learning_rate": 0.00017773767980069945,
+ "loss": 1.3436,
+ "step": 649
+ },
+ {
+ "epoch": 0.6501828639304804,
+ "grad_norm": 0.48083069920539856,
+ "learning_rate": 0.0001776716987499037,
+ "loss": 1.3906,
+ "step": 650
+ },
+ {
+ "epoch": 0.6511831452596043,
+ "grad_norm": 0.4525931775569916,
+ "learning_rate": 0.0001776056323519579,
+ "loss": 1.3417,
+ "step": 651
+ },
+ {
+ "epoch": 0.6521834265887281,
+ "grad_norm": 0.6179555058479309,
+ "learning_rate": 0.00017753948067945712,
+ "loss": 1.3438,
+ "step": 652
+ },
+ {
+ "epoch": 0.6531837079178519,
+ "grad_norm": 0.5525293946266174,
+ "learning_rate": 0.00017747324380509006,
+ "loss": 1.4551,
+ "step": 653
+ },
+ {
+ "epoch": 0.6541839892469757,
+ "grad_norm": 0.533028781414032,
+ "learning_rate": 0.00017740692180163908,
+ "loss": 1.4396,
+ "step": 654
+ },
+ {
+ "epoch": 0.6551842705760995,
+ "grad_norm": 0.5196881890296936,
+ "learning_rate": 0.00017734051474198003,
+ "loss": 1.3032,
+ "step": 655
+ },
+ {
+ "epoch": 0.6561845519052233,
+ "grad_norm": 0.5190469622612,
+ "learning_rate": 0.0001772740226990823,
+ "loss": 1.4049,
+ "step": 656
+ },
+ {
+ "epoch": 0.6571848332343472,
+ "grad_norm": 0.49517175555229187,
+ "learning_rate": 0.00017720744574600863,
+ "loss": 1.3696,
+ "step": 657
+ },
+ {
+ "epoch": 0.658185114563471,
+ "grad_norm": 0.5165138244628906,
+ "learning_rate": 0.00017714078395591502,
+ "loss": 1.3667,
+ "step": 658
+ },
+ {
+ "epoch": 0.6591853958925948,
+ "grad_norm": 0.5624507665634155,
+ "learning_rate": 0.00017707403740205071,
+ "loss": 1.2109,
+ "step": 659
+ },
+ {
+ "epoch": 0.6601856772217186,
+ "grad_norm": 0.45942649245262146,
+ "learning_rate": 0.00017700720615775812,
+ "loss": 1.259,
+ "step": 660
+ },
+ {
+ "epoch": 0.6611859585508424,
+ "grad_norm": 0.5019019842147827,
+ "learning_rate": 0.0001769402902964727,
+ "loss": 1.3739,
+ "step": 661
+ },
+ {
+ "epoch": 0.6621862398799663,
+ "grad_norm": 0.4661652743816376,
+ "learning_rate": 0.00017687328989172288,
+ "loss": 1.2606,
+ "step": 662
+ },
+ {
+ "epoch": 0.66318652120909,
+ "grad_norm": 0.5310545563697815,
+ "learning_rate": 0.00017680620501712996,
+ "loss": 1.3406,
+ "step": 663
+ },
+ {
+ "epoch": 0.6641868025382138,
+ "grad_norm": 0.5190532207489014,
+ "learning_rate": 0.00017673903574640814,
+ "loss": 1.3052,
+ "step": 664
+ },
+ {
+ "epoch": 0.6651870838673377,
+ "grad_norm": 0.5265533328056335,
+ "learning_rate": 0.00017667178215336423,
+ "loss": 1.2326,
+ "step": 665
+ },
+ {
+ "epoch": 0.6661873651964615,
+ "grad_norm": 0.5971291065216064,
+ "learning_rate": 0.0001766044443118978,
+ "loss": 1.4291,
+ "step": 666
+ },
+ {
+ "epoch": 0.6671876465255854,
+ "grad_norm": 0.5295760631561279,
+ "learning_rate": 0.000176537022296001,
+ "loss": 1.2781,
+ "step": 667
+ },
+ {
+ "epoch": 0.6681879278547092,
+ "grad_norm": 0.5124595761299133,
+ "learning_rate": 0.00017646951617975837,
+ "loss": 1.318,
+ "step": 668
+ },
+ {
+ "epoch": 0.6691882091838329,
+ "grad_norm": 0.5968078970909119,
+ "learning_rate": 0.00017640192603734692,
+ "loss": 1.1483,
+ "step": 669
+ },
+ {
+ "epoch": 0.6701884905129568,
+ "grad_norm": 0.6211404204368591,
+ "learning_rate": 0.00017633425194303606,
+ "loss": 1.1164,
+ "step": 670
+ },
+ {
+ "epoch": 0.6711887718420806,
+ "grad_norm": 0.5539883375167847,
+ "learning_rate": 0.00017626649397118734,
+ "loss": 1.453,
+ "step": 671
+ },
+ {
+ "epoch": 0.6721890531712044,
+ "grad_norm": 0.5188294649124146,
+ "learning_rate": 0.00017619865219625452,
+ "loss": 1.5201,
+ "step": 672
+ },
+ {
+ "epoch": 0.6731893345003283,
+ "grad_norm": 0.531973659992218,
+ "learning_rate": 0.00017613072669278343,
+ "loss": 1.3176,
+ "step": 673
+ },
+ {
+ "epoch": 0.674189615829452,
+ "grad_norm": 0.5878707766532898,
+ "learning_rate": 0.00017606271753541192,
+ "loss": 1.5326,
+ "step": 674
+ },
+ {
+ "epoch": 0.6751898971585758,
+ "grad_norm": 0.595443844795227,
+ "learning_rate": 0.00017599462479886974,
+ "loss": 1.4033,
+ "step": 675
+ },
+ {
+ "epoch": 0.6761901784876997,
+ "grad_norm": 0.5093846321105957,
+ "learning_rate": 0.00017592644855797854,
+ "loss": 1.2995,
+ "step": 676
+ },
+ {
+ "epoch": 0.6771904598168235,
+ "grad_norm": 0.5521978735923767,
+ "learning_rate": 0.00017585818888765168,
+ "loss": 1.2912,
+ "step": 677
+ },
+ {
+ "epoch": 0.6781907411459474,
+ "grad_norm": 0.4612530469894409,
+ "learning_rate": 0.0001757898458628941,
+ "loss": 1.1902,
+ "step": 678
+ },
+ {
+ "epoch": 0.6791910224750711,
+ "grad_norm": 0.4973600506782532,
+ "learning_rate": 0.00017572141955880252,
+ "loss": 1.3547,
+ "step": 679
+ },
+ {
+ "epoch": 0.6801913038041949,
+ "grad_norm": 0.606407105922699,
+ "learning_rate": 0.00017565291005056504,
+ "loss": 1.371,
+ "step": 680
+ },
+ {
+ "epoch": 0.6811915851333188,
+ "grad_norm": 0.5027814507484436,
+ "learning_rate": 0.00017558431741346122,
+ "loss": 1.4551,
+ "step": 681
+ },
+ {
+ "epoch": 0.6821918664624426,
+ "grad_norm": 0.5732039213180542,
+ "learning_rate": 0.00017551564172286197,
+ "loss": 1.4181,
+ "step": 682
+ },
+ {
+ "epoch": 0.6831921477915663,
+ "grad_norm": 0.6327995657920837,
+ "learning_rate": 0.00017544688305422943,
+ "loss": 1.237,
+ "step": 683
+ },
+ {
+ "epoch": 0.6841924291206902,
+ "grad_norm": 0.5779625177383423,
+ "learning_rate": 0.00017537804148311695,
+ "loss": 1.5356,
+ "step": 684
+ },
+ {
+ "epoch": 0.685192710449814,
+ "grad_norm": 0.6031951308250427,
+ "learning_rate": 0.00017530911708516902,
+ "loss": 1.3776,
+ "step": 685
+ },
+ {
+ "epoch": 0.6861929917789378,
+ "grad_norm": 0.4811258018016815,
+ "learning_rate": 0.00017524010993612098,
+ "loss": 1.185,
+ "step": 686
+ },
+ {
+ "epoch": 0.6871932731080617,
+ "grad_norm": 0.5048002600669861,
+ "learning_rate": 0.00017517102011179933,
+ "loss": 1.3335,
+ "step": 687
+ },
+ {
+ "epoch": 0.6881935544371854,
+ "grad_norm": 0.5963343977928162,
+ "learning_rate": 0.0001751018476881212,
+ "loss": 1.4326,
+ "step": 688
+ },
+ {
+ "epoch": 0.6891938357663093,
+ "grad_norm": 0.4770168960094452,
+ "learning_rate": 0.00017503259274109464,
+ "loss": 1.4664,
+ "step": 689
+ },
+ {
+ "epoch": 0.6901941170954331,
+ "grad_norm": 0.5020537376403809,
+ "learning_rate": 0.00017496325534681825,
+ "loss": 1.349,
+ "step": 690
+ },
+ {
+ "epoch": 0.6911943984245569,
+ "grad_norm": 0.5567785501480103,
+ "learning_rate": 0.00017489383558148136,
+ "loss": 1.452,
+ "step": 691
+ },
+ {
+ "epoch": 0.6921946797536808,
+ "grad_norm": 0.5167350769042969,
+ "learning_rate": 0.00017482433352136365,
+ "loss": 1.1148,
+ "step": 692
+ },
+ {
+ "epoch": 0.6931949610828045,
+ "grad_norm": 0.6030716300010681,
+ "learning_rate": 0.00017475474924283536,
+ "loss": 1.3473,
+ "step": 693
+ },
+ {
+ "epoch": 0.6941952424119283,
+ "grad_norm": 0.5643062591552734,
+ "learning_rate": 0.00017468508282235704,
+ "loss": 1.3476,
+ "step": 694
+ },
+ {
+ "epoch": 0.6951955237410522,
+ "grad_norm": 0.5124102234840393,
+ "learning_rate": 0.00017461533433647946,
+ "loss": 1.339,
+ "step": 695
+ },
+ {
+ "epoch": 0.696195805070176,
+ "grad_norm": 0.5690215229988098,
+ "learning_rate": 0.00017454550386184362,
+ "loss": 1.3816,
+ "step": 696
+ },
+ {
+ "epoch": 0.6971960863992998,
+ "grad_norm": 0.5938367247581482,
+ "learning_rate": 0.00017447559147518055,
+ "loss": 1.4554,
+ "step": 697
+ },
+ {
+ "epoch": 0.6981963677284236,
+ "grad_norm": 0.5288996696472168,
+ "learning_rate": 0.00017440559725331135,
+ "loss": 1.2904,
+ "step": 698
+ },
+ {
+ "epoch": 0.6991966490575474,
+ "grad_norm": 0.5047140121459961,
+ "learning_rate": 0.000174335521273147,
+ "loss": 1.2362,
+ "step": 699
+ },
+ {
+ "epoch": 0.7001969303866713,
+ "grad_norm": 0.5563321709632874,
+ "learning_rate": 0.00017426536361168834,
+ "loss": 1.2863,
+ "step": 700
+ },
+ {
+ "epoch": 0.7011972117157951,
+ "grad_norm": 0.48857688903808594,
+ "learning_rate": 0.00017419512434602594,
+ "loss": 1.3387,
+ "step": 701
+ },
+ {
+ "epoch": 0.7021974930449189,
+ "grad_norm": 0.5205016732215881,
+ "learning_rate": 0.00017412480355334005,
+ "loss": 1.3874,
+ "step": 702
+ },
+ {
+ "epoch": 0.7031977743740427,
+ "grad_norm": 0.5850381851196289,
+ "learning_rate": 0.00017405440131090048,
+ "loss": 1.5369,
+ "step": 703
+ },
+ {
+ "epoch": 0.7041980557031665,
+ "grad_norm": 0.5708681344985962,
+ "learning_rate": 0.00017398391769606658,
+ "loss": 1.3622,
+ "step": 704
+ },
+ {
+ "epoch": 0.7051983370322903,
+ "grad_norm": 0.5743641257286072,
+ "learning_rate": 0.00017391335278628712,
+ "loss": 1.2946,
+ "step": 705
+ },
+ {
+ "epoch": 0.7061986183614142,
+ "grad_norm": 0.5376024842262268,
+ "learning_rate": 0.00017384270665910014,
+ "loss": 1.2952,
+ "step": 706
+ },
+ {
+ "epoch": 0.707198899690538,
+ "grad_norm": 0.6123641133308411,
+ "learning_rate": 0.000173771979392133,
+ "loss": 1.4239,
+ "step": 707
+ },
+ {
+ "epoch": 0.7081991810196617,
+ "grad_norm": 0.5639240741729736,
+ "learning_rate": 0.00017370117106310214,
+ "loss": 1.3627,
+ "step": 708
+ },
+ {
+ "epoch": 0.7091994623487856,
+ "grad_norm": 0.5551653504371643,
+ "learning_rate": 0.0001736302817498131,
+ "loss": 1.3435,
+ "step": 709
+ },
+ {
+ "epoch": 0.7101997436779094,
+ "grad_norm": 0.4746958911418915,
+ "learning_rate": 0.00017355931153016044,
+ "loss": 1.2402,
+ "step": 710
+ },
+ {
+ "epoch": 0.7112000250070333,
+ "grad_norm": 0.4722553491592407,
+ "learning_rate": 0.0001734882604821276,
+ "loss": 1.3962,
+ "step": 711
+ },
+ {
+ "epoch": 0.712200306336157,
+ "grad_norm": 0.5038101077079773,
+ "learning_rate": 0.0001734171286837868,
+ "loss": 1.3261,
+ "step": 712
+ },
+ {
+ "epoch": 0.7132005876652808,
+ "grad_norm": 0.5004639625549316,
+ "learning_rate": 0.00017334591621329906,
+ "loss": 1.4943,
+ "step": 713
+ },
+ {
+ "epoch": 0.7142008689944047,
+ "grad_norm": 0.5141516327857971,
+ "learning_rate": 0.00017327462314891402,
+ "loss": 1.2754,
+ "step": 714
+ },
+ {
+ "epoch": 0.7152011503235285,
+ "grad_norm": 0.5491873025894165,
+ "learning_rate": 0.00017320324956896977,
+ "loss": 1.3052,
+ "step": 715
+ },
+ {
+ "epoch": 0.7162014316526523,
+ "grad_norm": 0.49937358498573303,
+ "learning_rate": 0.00017313179555189306,
+ "loss": 1.2277,
+ "step": 716
+ },
+ {
+ "epoch": 0.7172017129817762,
+ "grad_norm": 0.6419594287872314,
+ "learning_rate": 0.00017306026117619889,
+ "loss": 1.4844,
+ "step": 717
+ },
+ {
+ "epoch": 0.7182019943108999,
+ "grad_norm": 0.521108090877533,
+ "learning_rate": 0.0001729886465204906,
+ "loss": 1.2917,
+ "step": 718
+ },
+ {
+ "epoch": 0.7192022756400237,
+ "grad_norm": 0.532421886920929,
+ "learning_rate": 0.0001729169516634598,
+ "loss": 1.4555,
+ "step": 719
+ },
+ {
+ "epoch": 0.7202025569691476,
+ "grad_norm": 0.5168073177337646,
+ "learning_rate": 0.0001728451766838861,
+ "loss": 1.2116,
+ "step": 720
+ },
+ {
+ "epoch": 0.7212028382982714,
+ "grad_norm": 0.5593972206115723,
+ "learning_rate": 0.00017277332166063726,
+ "loss": 1.4345,
+ "step": 721
+ },
+ {
+ "epoch": 0.7222031196273953,
+ "grad_norm": 0.5317432284355164,
+ "learning_rate": 0.00017270138667266894,
+ "loss": 1.2987,
+ "step": 722
+ },
+ {
+ "epoch": 0.723203400956519,
+ "grad_norm": 0.6262248158454895,
+ "learning_rate": 0.00017262937179902472,
+ "loss": 1.2591,
+ "step": 723
+ },
+ {
+ "epoch": 0.7242036822856428,
+ "grad_norm": 0.5377100110054016,
+ "learning_rate": 0.00017255727711883588,
+ "loss": 1.366,
+ "step": 724
+ },
+ {
+ "epoch": 0.7252039636147667,
+ "grad_norm": 0.5637168288230896,
+ "learning_rate": 0.00017248510271132144,
+ "loss": 1.4593,
+ "step": 725
+ },
+ {
+ "epoch": 0.7262042449438905,
+ "grad_norm": 0.5360320210456848,
+ "learning_rate": 0.00017241284865578802,
+ "loss": 1.4797,
+ "step": 726
+ },
+ {
+ "epoch": 0.7272045262730142,
+ "grad_norm": 0.48500168323516846,
+ "learning_rate": 0.00017234051503162978,
+ "loss": 1.3875,
+ "step": 727
+ },
+ {
+ "epoch": 0.7282048076021381,
+ "grad_norm": 0.5666176080703735,
+ "learning_rate": 0.0001722681019183283,
+ "loss": 1.4683,
+ "step": 728
+ },
+ {
+ "epoch": 0.7292050889312619,
+ "grad_norm": 0.5710940361022949,
+ "learning_rate": 0.00017219560939545246,
+ "loss": 1.5538,
+ "step": 729
+ },
+ {
+ "epoch": 0.7302053702603858,
+ "grad_norm": 0.5658044219017029,
+ "learning_rate": 0.00017212303754265843,
+ "loss": 1.248,
+ "step": 730
+ },
+ {
+ "epoch": 0.7312056515895096,
+ "grad_norm": 0.5355331301689148,
+ "learning_rate": 0.0001720503864396896,
+ "loss": 1.259,
+ "step": 731
+ },
+ {
+ "epoch": 0.7322059329186333,
+ "grad_norm": 0.5683363676071167,
+ "learning_rate": 0.00017197765616637636,
+ "loss": 1.4242,
+ "step": 732
+ },
+ {
+ "epoch": 0.7332062142477572,
+ "grad_norm": 0.488972932100296,
+ "learning_rate": 0.0001719048468026361,
+ "loss": 1.3442,
+ "step": 733
+ },
+ {
+ "epoch": 0.734206495576881,
+ "grad_norm": 0.45563748478889465,
+ "learning_rate": 0.00017183195842847322,
+ "loss": 1.3236,
+ "step": 734
+ },
+ {
+ "epoch": 0.7352067769060048,
+ "grad_norm": 0.5114185214042664,
+ "learning_rate": 0.0001717589911239788,
+ "loss": 1.3071,
+ "step": 735
+ },
+ {
+ "epoch": 0.7362070582351287,
+ "grad_norm": 0.558686375617981,
+ "learning_rate": 0.00017168594496933074,
+ "loss": 1.2889,
+ "step": 736
+ },
+ {
+ "epoch": 0.7372073395642524,
+ "grad_norm": 0.49099281430244446,
+ "learning_rate": 0.00017161282004479351,
+ "loss": 1.1701,
+ "step": 737
+ },
+ {
+ "epoch": 0.7382076208933762,
+ "grad_norm": 0.549524188041687,
+ "learning_rate": 0.0001715396164307182,
+ "loss": 1.2853,
+ "step": 738
+ },
+ {
+ "epoch": 0.7392079022225001,
+ "grad_norm": 0.5683863162994385,
+ "learning_rate": 0.0001714663342075424,
+ "loss": 1.4201,
+ "step": 739
+ },
+ {
+ "epoch": 0.7402081835516239,
+ "grad_norm": 0.5957104563713074,
+ "learning_rate": 0.00017139297345578994,
+ "loss": 1.3406,
+ "step": 740
+ },
+ {
+ "epoch": 0.7412084648807478,
+ "grad_norm": 0.4645147919654846,
+ "learning_rate": 0.00017131953425607104,
+ "loss": 1.2344,
+ "step": 741
+ },
+ {
+ "epoch": 0.7422087462098715,
+ "grad_norm": 0.4981783330440521,
+ "learning_rate": 0.00017124601668908212,
+ "loss": 1.422,
+ "step": 742
+ },
+ {
+ "epoch": 0.7432090275389953,
+ "grad_norm": 0.5426530838012695,
+ "learning_rate": 0.00017117242083560568,
+ "loss": 1.4275,
+ "step": 743
+ },
+ {
+ "epoch": 0.7442093088681192,
+ "grad_norm": 0.5585354566574097,
+ "learning_rate": 0.00017109874677651024,
+ "loss": 1.5049,
+ "step": 744
+ },
+ {
+ "epoch": 0.745209590197243,
+ "grad_norm": 0.5639151930809021,
+ "learning_rate": 0.0001710249945927503,
+ "loss": 1.4019,
+ "step": 745
+ },
+ {
+ "epoch": 0.7462098715263668,
+ "grad_norm": 0.8334717750549316,
+ "learning_rate": 0.00017095116436536612,
+ "loss": 1.5607,
+ "step": 746
+ },
+ {
+ "epoch": 0.7472101528554906,
+ "grad_norm": 0.513970673084259,
+ "learning_rate": 0.00017087725617548385,
+ "loss": 1.1967,
+ "step": 747
+ },
+ {
+ "epoch": 0.7482104341846144,
+ "grad_norm": 0.6200702786445618,
+ "learning_rate": 0.00017080327010431513,
+ "loss": 1.2298,
+ "step": 748
+ },
+ {
+ "epoch": 0.7492107155137382,
+ "grad_norm": 0.54522305727005,
+ "learning_rate": 0.00017072920623315734,
+ "loss": 1.3214,
+ "step": 749
+ },
+ {
+ "epoch": 0.7502109968428621,
+ "grad_norm": 0.6682360172271729,
+ "learning_rate": 0.00017065506464339326,
+ "loss": 1.4631,
+ "step": 750
+ },
+ {
+ "epoch": 0.7512112781719859,
+ "grad_norm": 0.5061535239219666,
+ "learning_rate": 0.00017058084541649106,
+ "loss": 1.5062,
+ "step": 751
+ },
+ {
+ "epoch": 0.7522115595011097,
+ "grad_norm": 0.5790627598762512,
+ "learning_rate": 0.00017050654863400429,
+ "loss": 1.1371,
+ "step": 752
+ },
+ {
+ "epoch": 0.7532118408302335,
+ "grad_norm": 0.6058077216148376,
+ "learning_rate": 0.00017043217437757164,
+ "loss": 1.2185,
+ "step": 753
+ },
+ {
+ "epoch": 0.7542121221593573,
+ "grad_norm": 0.5494515895843506,
+ "learning_rate": 0.00017035772272891702,
+ "loss": 1.2468,
+ "step": 754
+ },
+ {
+ "epoch": 0.7552124034884812,
+ "grad_norm": 0.5687912106513977,
+ "learning_rate": 0.00017028319376984928,
+ "loss": 1.5621,
+ "step": 755
+ },
+ {
+ "epoch": 0.756212684817605,
+ "grad_norm": 0.5341185927391052,
+ "learning_rate": 0.00017020858758226229,
+ "loss": 1.3598,
+ "step": 756
+ },
+ {
+ "epoch": 0.7572129661467287,
+ "grad_norm": 0.5373026132583618,
+ "learning_rate": 0.0001701339042481347,
+ "loss": 1.4185,
+ "step": 757
+ },
+ {
+ "epoch": 0.7582132474758526,
+ "grad_norm": 0.46508973836898804,
+ "learning_rate": 0.00017005914384953007,
+ "loss": 1.2962,
+ "step": 758
+ },
+ {
+ "epoch": 0.7592135288049764,
+ "grad_norm": 0.4580937325954437,
+ "learning_rate": 0.00016998430646859654,
+ "loss": 1.0707,
+ "step": 759
+ },
+ {
+ "epoch": 0.7602138101341002,
+ "grad_norm": 0.5277093052864075,
+ "learning_rate": 0.00016990939218756683,
+ "loss": 1.2529,
+ "step": 760
+ },
+ {
+ "epoch": 0.761214091463224,
+ "grad_norm": 0.5356671214103699,
+ "learning_rate": 0.0001698344010887582,
+ "loss": 1.4032,
+ "step": 761
+ },
+ {
+ "epoch": 0.7622143727923478,
+ "grad_norm": 0.6881769299507141,
+ "learning_rate": 0.0001697593332545723,
+ "loss": 1.4885,
+ "step": 762
+ },
+ {
+ "epoch": 0.7632146541214717,
+ "grad_norm": 0.5370383262634277,
+ "learning_rate": 0.0001696841887674951,
+ "loss": 1.3271,
+ "step": 763
+ },
+ {
+ "epoch": 0.7642149354505955,
+ "grad_norm": 0.4792316257953644,
+ "learning_rate": 0.00016960896771009684,
+ "loss": 1.2274,
+ "step": 764
+ },
+ {
+ "epoch": 0.7652152167797193,
+ "grad_norm": 0.5276592373847961,
+ "learning_rate": 0.00016953367016503182,
+ "loss": 1.2399,
+ "step": 765
+ },
+ {
+ "epoch": 0.7662154981088432,
+ "grad_norm": 0.4789050221443176,
+ "learning_rate": 0.00016945829621503838,
+ "loss": 1.4002,
+ "step": 766
+ },
+ {
+ "epoch": 0.7672157794379669,
+ "grad_norm": 0.492712140083313,
+ "learning_rate": 0.00016938284594293897,
+ "loss": 1.3897,
+ "step": 767
+ },
+ {
+ "epoch": 0.7682160607670907,
+ "grad_norm": 0.5009675621986389,
+ "learning_rate": 0.00016930731943163972,
+ "loss": 1.3797,
+ "step": 768
+ },
+ {
+ "epoch": 0.7692163420962146,
+ "grad_norm": 0.4863432049751282,
+ "learning_rate": 0.00016923171676413063,
+ "loss": 1.4251,
+ "step": 769
+ },
+ {
+ "epoch": 0.7702166234253384,
+ "grad_norm": 0.5190616846084595,
+ "learning_rate": 0.00016915603802348535,
+ "loss": 1.4265,
+ "step": 770
+ },
+ {
+ "epoch": 0.7712169047544621,
+ "grad_norm": 0.5603469610214233,
+ "learning_rate": 0.00016908028329286112,
+ "loss": 1.2852,
+ "step": 771
+ },
+ {
+ "epoch": 0.772217186083586,
+ "grad_norm": 0.5128753185272217,
+ "learning_rate": 0.0001690044526554987,
+ "loss": 1.3324,
+ "step": 772
+ },
+ {
+ "epoch": 0.7732174674127098,
+ "grad_norm": 0.4992072284221649,
+ "learning_rate": 0.00016892854619472223,
+ "loss": 1.2498,
+ "step": 773
+ },
+ {
+ "epoch": 0.7742177487418337,
+ "grad_norm": 0.6128407716751099,
+ "learning_rate": 0.00016885256399393924,
+ "loss": 1.2967,
+ "step": 774
+ },
+ {
+ "epoch": 0.7752180300709575,
+ "grad_norm": 0.5186858177185059,
+ "learning_rate": 0.00016877650613664034,
+ "loss": 1.2654,
+ "step": 775
+ },
+ {
+ "epoch": 0.7762183114000812,
+ "grad_norm": 0.5207421183586121,
+ "learning_rate": 0.00016870037270639942,
+ "loss": 1.2994,
+ "step": 776
+ },
+ {
+ "epoch": 0.7772185927292051,
+ "grad_norm": 0.509912371635437,
+ "learning_rate": 0.0001686241637868734,
+ "loss": 1.3971,
+ "step": 777
+ },
+ {
+ "epoch": 0.7782188740583289,
+ "grad_norm": 0.47703370451927185,
+ "learning_rate": 0.00016854787946180198,
+ "loss": 1.282,
+ "step": 778
+ },
+ {
+ "epoch": 0.7792191553874527,
+ "grad_norm": 0.5404442548751831,
+ "learning_rate": 0.00016847151981500789,
+ "loss": 1.1986,
+ "step": 779
+ },
+ {
+ "epoch": 0.7802194367165766,
+ "grad_norm": 0.541050136089325,
+ "learning_rate": 0.00016839508493039657,
+ "loss": 1.4478,
+ "step": 780
+ },
+ {
+ "epoch": 0.7812197180457003,
+ "grad_norm": 0.46520569920539856,
+ "learning_rate": 0.00016831857489195618,
+ "loss": 1.2385,
+ "step": 781
+ },
+ {
+ "epoch": 0.7822199993748241,
+ "grad_norm": 0.5150445699691772,
+ "learning_rate": 0.00016824198978375736,
+ "loss": 1.3695,
+ "step": 782
+ },
+ {
+ "epoch": 0.783220280703948,
+ "grad_norm": 0.5754334926605225,
+ "learning_rate": 0.00016816532968995328,
+ "loss": 1.3026,
+ "step": 783
+ },
+ {
+ "epoch": 0.7842205620330718,
+ "grad_norm": 0.5335776209831238,
+ "learning_rate": 0.0001680885946947796,
+ "loss": 1.3391,
+ "step": 784
+ },
+ {
+ "epoch": 0.7852208433621957,
+ "grad_norm": 0.6596659421920776,
+ "learning_rate": 0.00016801178488255413,
+ "loss": 1.3224,
+ "step": 785
+ },
+ {
+ "epoch": 0.7862211246913194,
+ "grad_norm": 0.5251991748809814,
+ "learning_rate": 0.00016793490033767698,
+ "loss": 1.1744,
+ "step": 786
+ },
+ {
+ "epoch": 0.7872214060204432,
+ "grad_norm": 0.5112204551696777,
+ "learning_rate": 0.00016785794114463037,
+ "loss": 1.2455,
+ "step": 787
+ },
+ {
+ "epoch": 0.7882216873495671,
+ "grad_norm": 0.532893717288971,
+ "learning_rate": 0.00016778090738797853,
+ "loss": 1.2437,
+ "step": 788
+ },
+ {
+ "epoch": 0.7892219686786909,
+ "grad_norm": 0.5534240007400513,
+ "learning_rate": 0.00016770379915236766,
+ "loss": 1.396,
+ "step": 789
+ },
+ {
+ "epoch": 0.7902222500078147,
+ "grad_norm": 0.5164292454719543,
+ "learning_rate": 0.00016762661652252567,
+ "loss": 1.3138,
+ "step": 790
+ },
+ {
+ "epoch": 0.7912225313369385,
+ "grad_norm": 0.5660764575004578,
+ "learning_rate": 0.00016754935958326244,
+ "loss": 1.3014,
+ "step": 791
+ },
+ {
+ "epoch": 0.7922228126660623,
+ "grad_norm": 0.5137651562690735,
+ "learning_rate": 0.00016747202841946928,
+ "loss": 1.2834,
+ "step": 792
+ },
+ {
+ "epoch": 0.7932230939951862,
+ "grad_norm": 0.5546874403953552,
+ "learning_rate": 0.00016739462311611919,
+ "loss": 1.2841,
+ "step": 793
+ },
+ {
+ "epoch": 0.79422337532431,
+ "grad_norm": 0.5112007260322571,
+ "learning_rate": 0.00016731714375826657,
+ "loss": 1.1873,
+ "step": 794
+ },
+ {
+ "epoch": 0.7952236566534338,
+ "grad_norm": 0.5462679862976074,
+ "learning_rate": 0.00016723959043104728,
+ "loss": 1.2602,
+ "step": 795
+ },
+ {
+ "epoch": 0.7962239379825576,
+ "grad_norm": 0.5083702802658081,
+ "learning_rate": 0.00016716196321967832,
+ "loss": 1.334,
+ "step": 796
+ },
+ {
+ "epoch": 0.7972242193116814,
+ "grad_norm": 0.5491913557052612,
+ "learning_rate": 0.00016708426220945802,
+ "loss": 1.335,
+ "step": 797
+ },
+ {
+ "epoch": 0.7982245006408052,
+ "grad_norm": 0.5257419943809509,
+ "learning_rate": 0.00016700648748576574,
+ "loss": 1.374,
+ "step": 798
+ },
+ {
+ "epoch": 0.7992247819699291,
+ "grad_norm": 0.5252013206481934,
+ "learning_rate": 0.0001669286391340618,
+ "loss": 1.281,
+ "step": 799
+ },
+ {
+ "epoch": 0.8002250632990529,
+ "grad_norm": 0.5784058570861816,
+ "learning_rate": 0.00016685071723988748,
+ "loss": 1.385,
+ "step": 800
+ },
+ {
+ "epoch": 0.8012253446281766,
+ "grad_norm": 0.5508819818496704,
+ "learning_rate": 0.00016677272188886483,
+ "loss": 1.5138,
+ "step": 801
+ },
+ {
+ "epoch": 0.8022256259573005,
+ "grad_norm": 0.5943104028701782,
+ "learning_rate": 0.00016669465316669667,
+ "loss": 1.2341,
+ "step": 802
+ },
+ {
+ "epoch": 0.8032259072864243,
+ "grad_norm": 0.5109750032424927,
+ "learning_rate": 0.00016661651115916642,
+ "loss": 1.361,
+ "step": 803
+ },
+ {
+ "epoch": 0.8042261886155482,
+ "grad_norm": 0.5322972536087036,
+ "learning_rate": 0.00016653829595213794,
+ "loss": 1.3383,
+ "step": 804
+ },
+ {
+ "epoch": 0.805226469944672,
+ "grad_norm": 0.4870489537715912,
+ "learning_rate": 0.00016646000763155568,
+ "loss": 1.2932,
+ "step": 805
+ },
+ {
+ "epoch": 0.8062267512737957,
+ "grad_norm": 0.6070749163627625,
+ "learning_rate": 0.00016638164628344425,
+ "loss": 1.3517,
+ "step": 806
+ },
+ {
+ "epoch": 0.8072270326029196,
+ "grad_norm": 0.5695485472679138,
+ "learning_rate": 0.00016630321199390867,
+ "loss": 1.295,
+ "step": 807
+ },
+ {
+ "epoch": 0.8082273139320434,
+ "grad_norm": 0.49092933535575867,
+ "learning_rate": 0.00016622470484913406,
+ "loss": 1.1708,
+ "step": 808
+ },
+ {
+ "epoch": 0.8092275952611672,
+ "grad_norm": 0.5488709807395935,
+ "learning_rate": 0.00016614612493538551,
+ "loss": 1.3101,
+ "step": 809
+ },
+ {
+ "epoch": 0.810227876590291,
+ "grad_norm": 0.6875150799751282,
+ "learning_rate": 0.00016606747233900815,
+ "loss": 1.3,
+ "step": 810
+ },
+ {
+ "epoch": 0.8112281579194148,
+ "grad_norm": 0.5599775910377502,
+ "learning_rate": 0.00016598874714642697,
+ "loss": 1.5711,
+ "step": 811
+ },
+ {
+ "epoch": 0.8122284392485386,
+ "grad_norm": 0.7102994322776794,
+ "learning_rate": 0.00016590994944414678,
+ "loss": 1.4553,
+ "step": 812
+ },
+ {
+ "epoch": 0.8132287205776625,
+ "grad_norm": 0.5191233158111572,
+ "learning_rate": 0.00016583107931875192,
+ "loss": 1.4292,
+ "step": 813
+ },
+ {
+ "epoch": 0.8142290019067863,
+ "grad_norm": 0.4739600718021393,
+ "learning_rate": 0.0001657521368569064,
+ "loss": 1.3776,
+ "step": 814
+ },
+ {
+ "epoch": 0.8152292832359102,
+ "grad_norm": 0.5282078981399536,
+ "learning_rate": 0.0001656731221453537,
+ "loss": 1.4359,
+ "step": 815
+ },
+ {
+ "epoch": 0.8162295645650339,
+ "grad_norm": 0.690367579460144,
+ "learning_rate": 0.00016559403527091675,
+ "loss": 1.1747,
+ "step": 816
+ },
+ {
+ "epoch": 0.8172298458941577,
+ "grad_norm": 0.5715120434761047,
+ "learning_rate": 0.0001655148763204977,
+ "loss": 1.3289,
+ "step": 817
+ },
+ {
+ "epoch": 0.8182301272232816,
+ "grad_norm": 0.7024423480033875,
+ "learning_rate": 0.00016543564538107797,
+ "loss": 1.4758,
+ "step": 818
+ },
+ {
+ "epoch": 0.8192304085524054,
+ "grad_norm": 0.5568886399269104,
+ "learning_rate": 0.00016535634253971794,
+ "loss": 1.5172,
+ "step": 819
+ },
+ {
+ "epoch": 0.8202306898815291,
+ "grad_norm": 0.5847441554069519,
+ "learning_rate": 0.00016527696788355714,
+ "loss": 1.1993,
+ "step": 820
+ },
+ {
+ "epoch": 0.821230971210653,
+ "grad_norm": 0.5402149558067322,
+ "learning_rate": 0.00016519752149981397,
+ "loss": 1.2921,
+ "step": 821
+ },
+ {
+ "epoch": 0.8222312525397768,
+ "grad_norm": 0.6050311326980591,
+ "learning_rate": 0.0001651180034757856,
+ "loss": 1.59,
+ "step": 822
+ },
+ {
+ "epoch": 0.8232315338689006,
+ "grad_norm": 0.6215486526489258,
+ "learning_rate": 0.00016503841389884798,
+ "loss": 1.4562,
+ "step": 823
+ },
+ {
+ "epoch": 0.8242318151980245,
+ "grad_norm": 0.6507789492607117,
+ "learning_rate": 0.00016495875285645566,
+ "loss": 1.349,
+ "step": 824
+ },
+ {
+ "epoch": 0.8252320965271482,
+ "grad_norm": 0.5273147225379944,
+ "learning_rate": 0.00016487902043614173,
+ "loss": 1.4016,
+ "step": 825
+ },
+ {
+ "epoch": 0.8262323778562721,
+ "grad_norm": 0.579987645149231,
+ "learning_rate": 0.0001647992167255177,
+ "loss": 1.4077,
+ "step": 826
+ },
+ {
+ "epoch": 0.8272326591853959,
+ "grad_norm": 0.5068405270576477,
+ "learning_rate": 0.0001647193418122734,
+ "loss": 1.5075,
+ "step": 827
+ },
+ {
+ "epoch": 0.8282329405145197,
+ "grad_norm": 0.519982099533081,
+ "learning_rate": 0.00016463939578417692,
+ "loss": 1.2721,
+ "step": 828
+ },
+ {
+ "epoch": 0.8292332218436436,
+ "grad_norm": 0.5181561708450317,
+ "learning_rate": 0.0001645593787290745,
+ "loss": 1.2299,
+ "step": 829
+ },
+ {
+ "epoch": 0.8302335031727673,
+ "grad_norm": 0.47413337230682373,
+ "learning_rate": 0.0001644792907348904,
+ "loss": 1.2462,
+ "step": 830
+ },
+ {
+ "epoch": 0.8312337845018911,
+ "grad_norm": 0.5426570773124695,
+ "learning_rate": 0.00016439913188962685,
+ "loss": 1.4496,
+ "step": 831
+ },
+ {
+ "epoch": 0.832234065831015,
+ "grad_norm": 0.5744379758834839,
+ "learning_rate": 0.0001643189022813639,
+ "loss": 1.3284,
+ "step": 832
+ },
+ {
+ "epoch": 0.8332343471601388,
+ "grad_norm": 0.49693235754966736,
+ "learning_rate": 0.0001642386019982594,
+ "loss": 1.4082,
+ "step": 833
+ },
+ {
+ "epoch": 0.8342346284892626,
+ "grad_norm": 0.5346773862838745,
+ "learning_rate": 0.00016415823112854883,
+ "loss": 1.4238,
+ "step": 834
+ },
+ {
+ "epoch": 0.8352349098183864,
+ "grad_norm": 0.6201802492141724,
+ "learning_rate": 0.00016407778976054526,
+ "loss": 1.3288,
+ "step": 835
+ },
+ {
+ "epoch": 0.8362351911475102,
+ "grad_norm": 0.5161807537078857,
+ "learning_rate": 0.0001639972779826392,
+ "loss": 1.3798,
+ "step": 836
+ },
+ {
+ "epoch": 0.8372354724766341,
+ "grad_norm": 0.4670160412788391,
+ "learning_rate": 0.0001639166958832985,
+ "loss": 1.3765,
+ "step": 837
+ },
+ {
+ "epoch": 0.8382357538057579,
+ "grad_norm": 0.6492543816566467,
+ "learning_rate": 0.00016383604355106837,
+ "loss": 1.5277,
+ "step": 838
+ },
+ {
+ "epoch": 0.8392360351348817,
+ "grad_norm": 0.5766328573226929,
+ "learning_rate": 0.00016375532107457108,
+ "loss": 1.2481,
+ "step": 839
+ },
+ {
+ "epoch": 0.8402363164640055,
+ "grad_norm": 0.6431072950363159,
+ "learning_rate": 0.00016367452854250603,
+ "loss": 1.2755,
+ "step": 840
+ },
+ {
+ "epoch": 0.8412365977931293,
+ "grad_norm": 0.5121828317642212,
+ "learning_rate": 0.00016359366604364972,
+ "loss": 1.2927,
+ "step": 841
+ },
+ {
+ "epoch": 0.8422368791222531,
+ "grad_norm": 0.5222392678260803,
+ "learning_rate": 0.00016351273366685526,
+ "loss": 1.2626,
+ "step": 842
+ },
+ {
+ "epoch": 0.843237160451377,
+ "grad_norm": 0.5536903142929077,
+ "learning_rate": 0.00016343173150105278,
+ "loss": 1.1892,
+ "step": 843
+ },
+ {
+ "epoch": 0.8442374417805008,
+ "grad_norm": 0.5569381713867188,
+ "learning_rate": 0.00016335065963524897,
+ "loss": 1.4263,
+ "step": 844
+ },
+ {
+ "epoch": 0.8452377231096245,
+ "grad_norm": 0.6490715742111206,
+ "learning_rate": 0.0001632695181585272,
+ "loss": 1.452,
+ "step": 845
+ },
+ {
+ "epoch": 0.8462380044387484,
+ "grad_norm": 0.5965350270271301,
+ "learning_rate": 0.00016318830716004722,
+ "loss": 1.4189,
+ "step": 846
+ },
+ {
+ "epoch": 0.8472382857678722,
+ "grad_norm": 0.45904603600502014,
+ "learning_rate": 0.00016310702672904528,
+ "loss": 1.4024,
+ "step": 847
+ },
+ {
+ "epoch": 0.8482385670969961,
+ "grad_norm": 0.4320334494113922,
+ "learning_rate": 0.00016302567695483382,
+ "loss": 1.2105,
+ "step": 848
+ },
+ {
+ "epoch": 0.8492388484261199,
+ "grad_norm": 0.527032196521759,
+ "learning_rate": 0.0001629442579268016,
+ "loss": 1.1996,
+ "step": 849
+ },
+ {
+ "epoch": 0.8502391297552436,
+ "grad_norm": 0.6317036747932434,
+ "learning_rate": 0.00016286276973441333,
+ "loss": 1.4811,
+ "step": 850
+ },
+ {
+ "epoch": 0.8512394110843675,
+ "grad_norm": 0.5726277828216553,
+ "learning_rate": 0.00016278121246720987,
+ "loss": 1.3249,
+ "step": 851
+ },
+ {
+ "epoch": 0.8522396924134913,
+ "grad_norm": 0.4624577462673187,
+ "learning_rate": 0.00016269958621480788,
+ "loss": 1.3291,
+ "step": 852
+ },
+ {
+ "epoch": 0.8532399737426151,
+ "grad_norm": 0.5774461627006531,
+ "learning_rate": 0.0001626178910668998,
+ "loss": 1.2891,
+ "step": 853
+ },
+ {
+ "epoch": 0.854240255071739,
+ "grad_norm": 0.503584086894989,
+ "learning_rate": 0.00016253612711325386,
+ "loss": 1.3048,
+ "step": 854
+ },
+ {
+ "epoch": 0.8552405364008627,
+ "grad_norm": 0.4560583233833313,
+ "learning_rate": 0.0001624542944437139,
+ "loss": 1.2658,
+ "step": 855
+ },
+ {
+ "epoch": 0.8562408177299866,
+ "grad_norm": 0.49611610174179077,
+ "learning_rate": 0.00016237239314819917,
+ "loss": 1.1017,
+ "step": 856
+ },
+ {
+ "epoch": 0.8572410990591104,
+ "grad_norm": 0.5600405931472778,
+ "learning_rate": 0.0001622904233167044,
+ "loss": 1.3274,
+ "step": 857
+ },
+ {
+ "epoch": 0.8582413803882342,
+ "grad_norm": 0.5849353075027466,
+ "learning_rate": 0.0001622083850392996,
+ "loss": 1.274,
+ "step": 858
+ },
+ {
+ "epoch": 0.859241661717358,
+ "grad_norm": 0.5781377553939819,
+ "learning_rate": 0.00016212627840613003,
+ "loss": 1.4157,
+ "step": 859
+ },
+ {
+ "epoch": 0.8602419430464818,
+ "grad_norm": 0.4908173680305481,
+ "learning_rate": 0.000162044103507416,
+ "loss": 1.3,
+ "step": 860
+ },
+ {
+ "epoch": 0.8612422243756056,
+ "grad_norm": 0.5844553112983704,
+ "learning_rate": 0.00016196186043345288,
+ "loss": 1.2325,
+ "step": 861
+ },
+ {
+ "epoch": 0.8622425057047295,
+ "grad_norm": 0.5381117463111877,
+ "learning_rate": 0.00016187954927461093,
+ "loss": 1.41,
+ "step": 862
+ },
+ {
+ "epoch": 0.8632427870338533,
+ "grad_norm": 0.5468165278434753,
+ "learning_rate": 0.00016179717012133521,
+ "loss": 1.4272,
+ "step": 863
+ },
+ {
+ "epoch": 0.864243068362977,
+ "grad_norm": 0.5702970027923584,
+ "learning_rate": 0.00016171472306414554,
+ "loss": 1.3624,
+ "step": 864
+ },
+ {
+ "epoch": 0.8652433496921009,
+ "grad_norm": 0.5430637001991272,
+ "learning_rate": 0.00016163220819363628,
+ "loss": 1.2555,
+ "step": 865
+ },
+ {
+ "epoch": 0.8662436310212247,
+ "grad_norm": 0.5266844034194946,
+ "learning_rate": 0.00016154962560047643,
+ "loss": 1.3743,
+ "step": 866
+ },
+ {
+ "epoch": 0.8672439123503486,
+ "grad_norm": 0.5201333165168762,
+ "learning_rate": 0.00016146697537540924,
+ "loss": 1.3959,
+ "step": 867
+ },
+ {
+ "epoch": 0.8682441936794724,
+ "grad_norm": 0.44362199306488037,
+ "learning_rate": 0.0001613842576092524,
+ "loss": 1.2661,
+ "step": 868
+ },
+ {
+ "epoch": 0.8692444750085961,
+ "grad_norm": 0.5465226769447327,
+ "learning_rate": 0.00016130147239289778,
+ "loss": 1.3688,
+ "step": 869
+ },
+ {
+ "epoch": 0.87024475633772,
+ "grad_norm": 0.5353460907936096,
+ "learning_rate": 0.00016121861981731135,
+ "loss": 1.2327,
+ "step": 870
+ },
+ {
+ "epoch": 0.8712450376668438,
+ "grad_norm": 0.5463739633560181,
+ "learning_rate": 0.00016113569997353312,
+ "loss": 1.2994,
+ "step": 871
+ },
+ {
+ "epoch": 0.8722453189959676,
+ "grad_norm": 0.5219647288322449,
+ "learning_rate": 0.000161052712952677,
+ "loss": 1.3916,
+ "step": 872
+ },
+ {
+ "epoch": 0.8732456003250915,
+ "grad_norm": 0.4675636887550354,
+ "learning_rate": 0.0001609696588459307,
+ "loss": 1.2786,
+ "step": 873
+ },
+ {
+ "epoch": 0.8742458816542152,
+ "grad_norm": 0.48863986134529114,
+ "learning_rate": 0.00016088653774455568,
+ "loss": 1.1762,
+ "step": 874
+ },
+ {
+ "epoch": 0.875246162983339,
+ "grad_norm": 0.48759785294532776,
+ "learning_rate": 0.00016080334973988695,
+ "loss": 1.2107,
+ "step": 875
+ },
+ {
+ "epoch": 0.8762464443124629,
+ "grad_norm": 0.7353807687759399,
+ "learning_rate": 0.00016072009492333318,
+ "loss": 1.4855,
+ "step": 876
+ },
+ {
+ "epoch": 0.8772467256415867,
+ "grad_norm": 0.4878953993320465,
+ "learning_rate": 0.0001606367733863763,
+ "loss": 1.2343,
+ "step": 877
+ },
+ {
+ "epoch": 0.8782470069707106,
+ "grad_norm": 0.4764840304851532,
+ "learning_rate": 0.00016055338522057158,
+ "loss": 1.3159,
+ "step": 878
+ },
+ {
+ "epoch": 0.8792472882998343,
+ "grad_norm": 0.5289160013198853,
+ "learning_rate": 0.00016046993051754756,
+ "loss": 1.3298,
+ "step": 879
+ },
+ {
+ "epoch": 0.8802475696289581,
+ "grad_norm": 0.5421459078788757,
+ "learning_rate": 0.00016038640936900586,
+ "loss": 1.4081,
+ "step": 880
+ },
+ {
+ "epoch": 0.881247850958082,
+ "grad_norm": 0.5096681118011475,
+ "learning_rate": 0.00016030282186672116,
+ "loss": 1.2406,
+ "step": 881
+ },
+ {
+ "epoch": 0.8822481322872058,
+ "grad_norm": 0.5783627033233643,
+ "learning_rate": 0.00016021916810254097,
+ "loss": 1.3505,
+ "step": 882
+ },
+ {
+ "epoch": 0.8832484136163296,
+ "grad_norm": 0.5718142986297607,
+ "learning_rate": 0.00016013544816838565,
+ "loss": 1.4106,
+ "step": 883
+ },
+ {
+ "epoch": 0.8842486949454534,
+ "grad_norm": 0.551607072353363,
+ "learning_rate": 0.00016005166215624827,
+ "loss": 1.3474,
+ "step": 884
+ },
+ {
+ "epoch": 0.8852489762745772,
+ "grad_norm": 0.5464247465133667,
+ "learning_rate": 0.0001599678101581945,
+ "loss": 1.4443,
+ "step": 885
+ },
+ {
+ "epoch": 0.886249257603701,
+ "grad_norm": 0.5075456500053406,
+ "learning_rate": 0.00015988389226636253,
+ "loss": 1.4919,
+ "step": 886
+ },
+ {
+ "epoch": 0.8872495389328249,
+ "grad_norm": 0.48557186126708984,
+ "learning_rate": 0.00015979990857296295,
+ "loss": 1.4225,
+ "step": 887
+ },
+ {
+ "epoch": 0.8882498202619487,
+ "grad_norm": 0.5385611653327942,
+ "learning_rate": 0.00015971585917027862,
+ "loss": 1.2937,
+ "step": 888
+ },
+ {
+ "epoch": 0.8892501015910725,
+ "grad_norm": 0.6477749943733215,
+ "learning_rate": 0.00015963174415066468,
+ "loss": 1.5628,
+ "step": 889
+ },
+ {
+ "epoch": 0.8902503829201963,
+ "grad_norm": 0.6205973029136658,
+ "learning_rate": 0.0001595475636065483,
+ "loss": 1.4902,
+ "step": 890
+ },
+ {
+ "epoch": 0.8912506642493201,
+ "grad_norm": 0.45717301964759827,
+ "learning_rate": 0.00015946331763042867,
+ "loss": 1.1998,
+ "step": 891
+ },
+ {
+ "epoch": 0.892250945578444,
+ "grad_norm": 0.5279855132102966,
+ "learning_rate": 0.00015937900631487686,
+ "loss": 1.0668,
+ "step": 892
+ },
+ {
+ "epoch": 0.8932512269075678,
+ "grad_norm": 0.5207269787788391,
+ "learning_rate": 0.00015929462975253585,
+ "loss": 1.2774,
+ "step": 893
+ },
+ {
+ "epoch": 0.8942515082366915,
+ "grad_norm": 0.5200834274291992,
+ "learning_rate": 0.00015921018803612014,
+ "loss": 1.4316,
+ "step": 894
+ },
+ {
+ "epoch": 0.8952517895658154,
+ "grad_norm": 0.48317649960517883,
+ "learning_rate": 0.0001591256812584159,
+ "loss": 1.4101,
+ "step": 895
+ },
+ {
+ "epoch": 0.8962520708949392,
+ "grad_norm": 0.475483775138855,
+ "learning_rate": 0.00015904110951228082,
+ "loss": 1.2011,
+ "step": 896
+ },
+ {
+ "epoch": 0.897252352224063,
+ "grad_norm": 0.6542660593986511,
+ "learning_rate": 0.00015895647289064396,
+ "loss": 1.56,
+ "step": 897
+ },
+ {
+ "epoch": 0.8982526335531869,
+ "grad_norm": 0.5154829621315002,
+ "learning_rate": 0.00015887177148650564,
+ "loss": 1.3748,
+ "step": 898
+ },
+ {
+ "epoch": 0.8992529148823106,
+ "grad_norm": 0.5744799375534058,
+ "learning_rate": 0.0001587870053929374,
+ "loss": 1.4072,
+ "step": 899
+ },
+ {
+ "epoch": 0.9002531962114345,
+ "grad_norm": 0.4835909307003021,
+ "learning_rate": 0.00015870217470308188,
+ "loss": 1.3037,
+ "step": 900
+ },
+ {
+ "epoch": 0.9012534775405583,
+ "grad_norm": 0.5292366743087769,
+ "learning_rate": 0.0001586172795101526,
+ "loss": 1.2395,
+ "step": 901
+ },
+ {
+ "epoch": 0.9022537588696821,
+ "grad_norm": 0.5905430912971497,
+ "learning_rate": 0.00015853231990743406,
+ "loss": 1.29,
+ "step": 902
+ },
+ {
+ "epoch": 0.903254040198806,
+ "grad_norm": 0.4918007254600525,
+ "learning_rate": 0.0001584472959882815,
+ "loss": 1.2593,
+ "step": 903
+ },
+ {
+ "epoch": 0.9042543215279297,
+ "grad_norm": 0.4735652208328247,
+ "learning_rate": 0.00015836220784612085,
+ "loss": 1.1669,
+ "step": 904
+ },
+ {
+ "epoch": 0.9052546028570535,
+ "grad_norm": 0.6272550821304321,
+ "learning_rate": 0.00015827705557444852,
+ "loss": 1.3692,
+ "step": 905
+ },
+ {
+ "epoch": 0.9062548841861774,
+ "grad_norm": 0.5333564877510071,
+ "learning_rate": 0.00015819183926683153,
+ "loss": 1.3672,
+ "step": 906
+ },
+ {
+ "epoch": 0.9072551655153012,
+ "grad_norm": 0.44029948115348816,
+ "learning_rate": 0.00015810655901690715,
+ "loss": 1.2124,
+ "step": 907
+ },
+ {
+ "epoch": 0.9082554468444249,
+ "grad_norm": 0.5636379718780518,
+ "learning_rate": 0.00015802121491838297,
+ "loss": 1.3507,
+ "step": 908
+ },
+ {
+ "epoch": 0.9092557281735488,
+ "grad_norm": 0.4394778907299042,
+ "learning_rate": 0.0001579358070650367,
+ "loss": 1.3159,
+ "step": 909
+ },
+ {
+ "epoch": 0.9102560095026726,
+ "grad_norm": 0.5382723212242126,
+ "learning_rate": 0.00015785033555071616,
+ "loss": 1.3733,
+ "step": 910
+ },
+ {
+ "epoch": 0.9112562908317965,
+ "grad_norm": 0.5251659750938416,
+ "learning_rate": 0.00015776480046933905,
+ "loss": 1.2253,
+ "step": 911
+ },
+ {
+ "epoch": 0.9122565721609203,
+ "grad_norm": 0.4791383743286133,
+ "learning_rate": 0.000157679201914893,
+ "loss": 1.2341,
+ "step": 912
+ },
+ {
+ "epoch": 0.913256853490044,
+ "grad_norm": 0.5058613419532776,
+ "learning_rate": 0.00015759353998143528,
+ "loss": 1.2717,
+ "step": 913
+ },
+ {
+ "epoch": 0.9142571348191679,
+ "grad_norm": 0.46837320923805237,
+ "learning_rate": 0.00015750781476309288,
+ "loss": 1.2484,
+ "step": 914
+ },
+ {
+ "epoch": 0.9152574161482917,
+ "grad_norm": 0.524444580078125,
+ "learning_rate": 0.00015742202635406235,
+ "loss": 1.5512,
+ "step": 915
+ },
+ {
+ "epoch": 0.9162576974774155,
+ "grad_norm": 0.6169744729995728,
+ "learning_rate": 0.00015733617484860963,
+ "loss": 1.271,
+ "step": 916
+ },
+ {
+ "epoch": 0.9172579788065394,
+ "grad_norm": 0.48883670568466187,
+ "learning_rate": 0.00015725026034106996,
+ "loss": 1.4779,
+ "step": 917
+ },
+ {
+ "epoch": 0.9182582601356631,
+ "grad_norm": 0.5408645272254944,
+ "learning_rate": 0.00015716428292584787,
+ "loss": 1.3574,
+ "step": 918
+ },
+ {
+ "epoch": 0.919258541464787,
+ "grad_norm": 0.5622221231460571,
+ "learning_rate": 0.00015707824269741702,
+ "loss": 1.2146,
+ "step": 919
+ },
+ {
+ "epoch": 0.9202588227939108,
+ "grad_norm": 0.477328896522522,
+ "learning_rate": 0.00015699213975031996,
+ "loss": 1.162,
+ "step": 920
+ },
+ {
+ "epoch": 0.9212591041230346,
+ "grad_norm": 0.503027081489563,
+ "learning_rate": 0.0001569059741791684,
+ "loss": 1.1674,
+ "step": 921
+ },
+ {
+ "epoch": 0.9222593854521585,
+ "grad_norm": 0.5951637625694275,
+ "learning_rate": 0.0001568197460786426,
+ "loss": 1.3737,
+ "step": 922
+ },
+ {
+ "epoch": 0.9232596667812822,
+ "grad_norm": 0.5276626348495483,
+ "learning_rate": 0.0001567334555434917,
+ "loss": 1.2494,
+ "step": 923
+ },
+ {
+ "epoch": 0.924259948110406,
+ "grad_norm": 0.6354761123657227,
+ "learning_rate": 0.0001566471026685334,
+ "loss": 1.2052,
+ "step": 924
+ },
+ {
+ "epoch": 0.9252602294395299,
+ "grad_norm": 0.4227287471294403,
+ "learning_rate": 0.00015656068754865387,
+ "loss": 1.1446,
+ "step": 925
+ },
+ {
+ "epoch": 0.9262605107686537,
+ "grad_norm": 0.5290839076042175,
+ "learning_rate": 0.00015647421027880772,
+ "loss": 1.2057,
+ "step": 926
+ },
+ {
+ "epoch": 0.9272607920977775,
+ "grad_norm": 0.4961225986480713,
+ "learning_rate": 0.0001563876709540178,
+ "loss": 1.2788,
+ "step": 927
+ },
+ {
+ "epoch": 0.9282610734269013,
+ "grad_norm": 0.5095213651657104,
+ "learning_rate": 0.0001563010696693752,
+ "loss": 1.2751,
+ "step": 928
+ },
+ {
+ "epoch": 0.9292613547560251,
+ "grad_norm": 0.5027223825454712,
+ "learning_rate": 0.00015621440652003907,
+ "loss": 1.3653,
+ "step": 929
+ },
+ {
+ "epoch": 0.930261636085149,
+ "grad_norm": 0.49251896142959595,
+ "learning_rate": 0.00015612768160123652,
+ "loss": 1.1556,
+ "step": 930
+ },
+ {
+ "epoch": 0.9312619174142728,
+ "grad_norm": 0.5187139511108398,
+ "learning_rate": 0.00015604089500826257,
+ "loss": 1.3623,
+ "step": 931
+ },
+ {
+ "epoch": 0.9322621987433966,
+ "grad_norm": 0.5004428029060364,
+ "learning_rate": 0.00015595404683648,
+ "loss": 1.185,
+ "step": 932
+ },
+ {
+ "epoch": 0.9332624800725204,
+ "grad_norm": 0.5750531554222107,
+ "learning_rate": 0.00015586713718131922,
+ "loss": 1.2999,
+ "step": 933
+ },
+ {
+ "epoch": 0.9342627614016442,
+ "grad_norm": 0.482732355594635,
+ "learning_rate": 0.0001557801661382782,
+ "loss": 1.2635,
+ "step": 934
+ },
+ {
+ "epoch": 0.935263042730768,
+ "grad_norm": 0.47854143381118774,
+ "learning_rate": 0.00015569313380292248,
+ "loss": 1.2833,
+ "step": 935
+ },
+ {
+ "epoch": 0.9362633240598919,
+ "grad_norm": 0.49382665753364563,
+ "learning_rate": 0.00015560604027088477,
+ "loss": 1.2327,
+ "step": 936
+ },
+ {
+ "epoch": 0.9372636053890157,
+ "grad_norm": 0.5009885430335999,
+ "learning_rate": 0.00015551888563786515,
+ "loss": 1.2967,
+ "step": 937
+ },
+ {
+ "epoch": 0.9382638867181394,
+ "grad_norm": 0.5012707114219666,
+ "learning_rate": 0.00015543166999963076,
+ "loss": 1.3231,
+ "step": 938
+ },
+ {
+ "epoch": 0.9392641680472633,
+ "grad_norm": 0.6908506751060486,
+ "learning_rate": 0.0001553443934520159,
+ "loss": 1.4055,
+ "step": 939
+ },
+ {
+ "epoch": 0.9402644493763871,
+ "grad_norm": 0.7104817032814026,
+ "learning_rate": 0.00015525705609092157,
+ "loss": 1.3435,
+ "step": 940
+ },
+ {
+ "epoch": 0.941264730705511,
+ "grad_norm": 0.49263522028923035,
+ "learning_rate": 0.00015516965801231586,
+ "loss": 1.2259,
+ "step": 941
+ },
+ {
+ "epoch": 0.9422650120346348,
+ "grad_norm": 0.5337693691253662,
+ "learning_rate": 0.0001550821993122334,
+ "loss": 1.2863,
+ "step": 942
+ },
+ {
+ "epoch": 0.9432652933637585,
+ "grad_norm": 0.5506749153137207,
+ "learning_rate": 0.0001549946800867755,
+ "loss": 1.4061,
+ "step": 943
+ },
+ {
+ "epoch": 0.9442655746928824,
+ "grad_norm": 0.5121364593505859,
+ "learning_rate": 0.00015490710043210997,
+ "loss": 1.3567,
+ "step": 944
+ },
+ {
+ "epoch": 0.9452658560220062,
+ "grad_norm": 0.5326678156852722,
+ "learning_rate": 0.00015481946044447099,
+ "loss": 1.2719,
+ "step": 945
+ },
+ {
+ "epoch": 0.94626613735113,
+ "grad_norm": 0.6023722290992737,
+ "learning_rate": 0.00015473176022015906,
+ "loss": 1.1512,
+ "step": 946
+ },
+ {
+ "epoch": 0.9472664186802539,
+ "grad_norm": 0.4953387975692749,
+ "learning_rate": 0.0001546439998555409,
+ "loss": 1.556,
+ "step": 947
+ },
+ {
+ "epoch": 0.9482667000093776,
+ "grad_norm": 0.5187799334526062,
+ "learning_rate": 0.0001545561794470492,
+ "loss": 1.279,
+ "step": 948
+ },
+ {
+ "epoch": 0.9492669813385014,
+ "grad_norm": 0.5788894295692444,
+ "learning_rate": 0.00015446829909118275,
+ "loss": 1.3246,
+ "step": 949
+ },
+ {
+ "epoch": 0.9502672626676253,
+ "grad_norm": 0.5551681518554688,
+ "learning_rate": 0.00015438035888450623,
+ "loss": 1.2231,
+ "step": 950
+ },
+ {
+ "epoch": 0.9512675439967491,
+ "grad_norm": 0.4898390471935272,
+ "learning_rate": 0.00015429235892364994,
+ "loss": 1.2036,
+ "step": 951
+ },
+ {
+ "epoch": 0.952267825325873,
+ "grad_norm": 0.5427507162094116,
+ "learning_rate": 0.00015420429930530996,
+ "loss": 1.3614,
+ "step": 952
+ },
+ {
+ "epoch": 0.9532681066549967,
+ "grad_norm": 0.557054340839386,
+ "learning_rate": 0.00015411618012624786,
+ "loss": 1.4249,
+ "step": 953
+ },
+ {
+ "epoch": 0.9542683879841205,
+ "grad_norm": 0.5793543457984924,
+ "learning_rate": 0.00015402800148329071,
+ "loss": 1.4341,
+ "step": 954
+ },
+ {
+ "epoch": 0.9552686693132444,
+ "grad_norm": 0.5993456244468689,
+ "learning_rate": 0.00015393976347333088,
+ "loss": 1.0259,
+ "step": 955
+ },
+ {
+ "epoch": 0.9562689506423682,
+ "grad_norm": 0.554904580116272,
+ "learning_rate": 0.00015385146619332596,
+ "loss": 1.3558,
+ "step": 956
+ },
+ {
+ "epoch": 0.9572692319714919,
+ "grad_norm": 0.5488478541374207,
+ "learning_rate": 0.00015376310974029873,
+ "loss": 1.358,
+ "step": 957
+ },
+ {
+ "epoch": 0.9582695133006158,
+ "grad_norm": 0.5108879208564758,
+ "learning_rate": 0.00015367469421133695,
+ "loss": 1.3865,
+ "step": 958
+ },
+ {
+ "epoch": 0.9592697946297396,
+ "grad_norm": 0.4606814682483673,
+ "learning_rate": 0.00015358621970359325,
+ "loss": 1.2055,
+ "step": 959
+ },
+ {
+ "epoch": 0.9602700759588634,
+ "grad_norm": 0.4974004328250885,
+ "learning_rate": 0.00015349768631428519,
+ "loss": 1.2541,
+ "step": 960
+ },
+ {
+ "epoch": 0.9612703572879873,
+ "grad_norm": 0.5107241272926331,
+ "learning_rate": 0.00015340909414069488,
+ "loss": 1.1624,
+ "step": 961
+ },
+ {
+ "epoch": 0.962270638617111,
+ "grad_norm": 0.5587212443351746,
+ "learning_rate": 0.00015332044328016914,
+ "loss": 1.349,
+ "step": 962
+ },
+ {
+ "epoch": 0.9632709199462349,
+ "grad_norm": 0.5209497809410095,
+ "learning_rate": 0.0001532317338301192,
+ "loss": 1.3695,
+ "step": 963
+ },
+ {
+ "epoch": 0.9642712012753587,
+ "grad_norm": 0.4985620677471161,
+ "learning_rate": 0.00015314296588802076,
+ "loss": 1.4597,
+ "step": 964
+ },
+ {
+ "epoch": 0.9652714826044825,
+ "grad_norm": 0.5065100789070129,
+ "learning_rate": 0.00015305413955141365,
+ "loss": 1.4225,
+ "step": 965
+ },
+ {
+ "epoch": 0.9662717639336064,
+ "grad_norm": 0.5079792737960815,
+ "learning_rate": 0.00015296525491790205,
+ "loss": 1.057,
+ "step": 966
+ },
+ {
+ "epoch": 0.9672720452627301,
+ "grad_norm": 0.4673600196838379,
+ "learning_rate": 0.00015287631208515406,
+ "loss": 1.2531,
+ "step": 967
+ },
+ {
+ "epoch": 0.9682723265918539,
+ "grad_norm": 0.5309945344924927,
+ "learning_rate": 0.00015278731115090171,
+ "loss": 1.374,
+ "step": 968
+ },
+ {
+ "epoch": 0.9692726079209778,
+ "grad_norm": 0.4792092442512512,
+ "learning_rate": 0.00015269825221294098,
+ "loss": 1.3018,
+ "step": 969
+ },
+ {
+ "epoch": 0.9702728892501016,
+ "grad_norm": 0.5222868323326111,
+ "learning_rate": 0.00015260913536913154,
+ "loss": 1.4063,
+ "step": 970
+ },
+ {
+ "epoch": 0.9712731705792254,
+ "grad_norm": 0.5373417139053345,
+ "learning_rate": 0.00015251996071739664,
+ "loss": 1.2183,
+ "step": 971
+ },
+ {
+ "epoch": 0.9722734519083492,
+ "grad_norm": 0.5624721050262451,
+ "learning_rate": 0.00015243072835572318,
+ "loss": 1.2696,
+ "step": 972
+ },
+ {
+ "epoch": 0.973273733237473,
+ "grad_norm": 0.46938082575798035,
+ "learning_rate": 0.0001523414383821613,
+ "loss": 1.3544,
+ "step": 973
+ },
+ {
+ "epoch": 0.9742740145665969,
+ "grad_norm": 0.45348694920539856,
+ "learning_rate": 0.00015225209089482462,
+ "loss": 1.2078,
+ "step": 974
+ },
+ {
+ "epoch": 0.9752742958957207,
+ "grad_norm": 0.48000606894493103,
+ "learning_rate": 0.0001521626859918898,
+ "loss": 1.1914,
+ "step": 975
+ },
+ {
+ "epoch": 0.9762745772248445,
+ "grad_norm": 0.5106796622276306,
+ "learning_rate": 0.00015207322377159668,
+ "loss": 1.3249,
+ "step": 976
+ },
+ {
+ "epoch": 0.9772748585539683,
+ "grad_norm": 0.49865373969078064,
+ "learning_rate": 0.00015198370433224805,
+ "loss": 1.2876,
+ "step": 977
+ },
+ {
+ "epoch": 0.9782751398830921,
+ "grad_norm": 0.5271755456924438,
+ "learning_rate": 0.00015189412777220958,
+ "loss": 1.3049,
+ "step": 978
+ },
+ {
+ "epoch": 0.9792754212122159,
+ "grad_norm": 0.49824708700180054,
+ "learning_rate": 0.00015180449418990976,
+ "loss": 1.1614,
+ "step": 979
+ },
+ {
+ "epoch": 0.9802757025413398,
+ "grad_norm": 0.7327549457550049,
+ "learning_rate": 0.00015171480368383964,
+ "loss": 1.2923,
+ "step": 980
+ },
+ {
+ "epoch": 0.9812759838704636,
+ "grad_norm": 0.5170425176620483,
+ "learning_rate": 0.00015162505635255287,
+ "loss": 1.3097,
+ "step": 981
+ },
+ {
+ "epoch": 0.9822762651995874,
+ "grad_norm": 0.47041526436805725,
+ "learning_rate": 0.00015153525229466555,
+ "loss": 1.3508,
+ "step": 982
+ },
+ {
+ "epoch": 0.9832765465287112,
+ "grad_norm": 0.4670693278312683,
+ "learning_rate": 0.00015144539160885613,
+ "loss": 1.3974,
+ "step": 983
+ },
+ {
+ "epoch": 0.984276827857835,
+ "grad_norm": 0.5745754837989807,
+ "learning_rate": 0.00015135547439386516,
+ "loss": 1.2977,
+ "step": 984
+ },
+ {
+ "epoch": 0.9852771091869589,
+ "grad_norm": 0.5845474004745483,
+ "learning_rate": 0.0001512655007484955,
+ "loss": 1.3384,
+ "step": 985
+ },
+ {
+ "epoch": 0.9862773905160827,
+ "grad_norm": 0.5627439618110657,
+ "learning_rate": 0.00015117547077161185,
+ "loss": 1.1756,
+ "step": 986
+ },
+ {
+ "epoch": 0.9872776718452064,
+ "grad_norm": 0.6411226987838745,
+ "learning_rate": 0.0001510853845621409,
+ "loss": 1.3441,
+ "step": 987
+ },
+ {
+ "epoch": 0.9882779531743303,
+ "grad_norm": 0.545659601688385,
+ "learning_rate": 0.00015099524221907107,
+ "loss": 1.3766,
+ "step": 988
+ },
+ {
+ "epoch": 0.9892782345034541,
+ "grad_norm": 0.5058498382568359,
+ "learning_rate": 0.0001509050438414525,
+ "loss": 1.3171,
+ "step": 989
+ },
+ {
+ "epoch": 0.9902785158325779,
+ "grad_norm": 0.6247567534446716,
+ "learning_rate": 0.00015081478952839693,
+ "loss": 1.2141,
+ "step": 990
+ },
+ {
+ "epoch": 0.9912787971617018,
+ "grad_norm": 0.5492308139801025,
+ "learning_rate": 0.00015072447937907753,
+ "loss": 1.1626,
+ "step": 991
+ },
+ {
+ "epoch": 0.9922790784908255,
+ "grad_norm": 0.4795534908771515,
+ "learning_rate": 0.00015063411349272877,
+ "loss": 1.218,
+ "step": 992
+ },
+ {
+ "epoch": 0.9932793598199494,
+ "grad_norm": 0.5527793169021606,
+ "learning_rate": 0.00015054369196864644,
+ "loss": 1.3816,
+ "step": 993
+ },
+ {
+ "epoch": 0.9942796411490732,
+ "grad_norm": 0.5297475457191467,
+ "learning_rate": 0.00015045321490618748,
+ "loss": 1.2515,
+ "step": 994
+ },
+ {
+ "epoch": 0.995279922478197,
+ "grad_norm": 0.518803596496582,
+ "learning_rate": 0.00015036268240476978,
+ "loss": 1.3631,
+ "step": 995
+ },
+ {
+ "epoch": 0.9962802038073209,
+ "grad_norm": 0.47196391224861145,
+ "learning_rate": 0.00015027209456387218,
+ "loss": 1.0932,
+ "step": 996
+ },
+ {
+ "epoch": 0.9972804851364446,
+ "grad_norm": 0.5369086861610413,
+ "learning_rate": 0.00015018145148303438,
+ "loss": 1.1181,
+ "step": 997
+ },
+ {
+ "epoch": 0.9982807664655684,
+ "grad_norm": 0.5940788388252258,
+ "learning_rate": 0.00015009075326185667,
+ "loss": 1.561,
+ "step": 998
+ },
+ {
+ "epoch": 0.9992810477946923,
+ "grad_norm": 0.5340734124183655,
+ "learning_rate": 0.00015000000000000001,
+ "loss": 1.2909,
+ "step": 999
+ },
+ {
+ "epoch": 1.0002813291238162,
+ "grad_norm": 0.5133704543113708,
+ "learning_rate": 0.00014990919179718584,
+ "loss": 1.0441,
+ "step": 1000
+ },
+ {
+ "epoch": 1.0012816104529398,
+ "grad_norm": 0.3812060058116913,
+ "learning_rate": 0.00014981832875319597,
+ "loss": 0.8215,
+ "step": 1001
+ },
+ {
+ "epoch": 1.0022818917820637,
+ "grad_norm": 0.40786364674568176,
+ "learning_rate": 0.00014972741096787242,
+ "loss": 0.8215,
+ "step": 1002
+ },
+ {
+ "epoch": 1.0032821731111876,
+ "grad_norm": 0.4328629672527313,
+ "learning_rate": 0.0001496364385411174,
+ "loss": 0.9506,
+ "step": 1003
+ },
+ {
+ "epoch": 1.0042824544403113,
+ "grad_norm": 0.4680945873260498,
+ "learning_rate": 0.0001495454115728932,
+ "loss": 0.8443,
+ "step": 1004
+ },
+ {
+ "epoch": 1.0052827357694352,
+ "grad_norm": 0.48512670397758484,
+ "learning_rate": 0.0001494543301632219,
+ "loss": 1.1143,
+ "step": 1005
+ },
+ {
+ "epoch": 1.006283017098559,
+ "grad_norm": 0.43949049711227417,
+ "learning_rate": 0.00014936319441218555,
+ "loss": 1.0257,
+ "step": 1006
+ },
+ {
+ "epoch": 1.0072832984276827,
+ "grad_norm": 0.5564325451850891,
+ "learning_rate": 0.0001492720044199259,
+ "loss": 0.967,
+ "step": 1007
+ },
+ {
+ "epoch": 1.0082835797568066,
+ "grad_norm": 0.47199952602386475,
+ "learning_rate": 0.0001491807602866442,
+ "loss": 1.0317,
+ "step": 1008
+ },
+ {
+ "epoch": 1.0092838610859305,
+ "grad_norm": 0.4625256657600403,
+ "learning_rate": 0.00014908946211260123,
+ "loss": 0.894,
+ "step": 1009
+ },
+ {
+ "epoch": 1.0102841424150542,
+ "grad_norm": 0.5081682801246643,
+ "learning_rate": 0.00014899810999811726,
+ "loss": 0.9647,
+ "step": 1010
+ },
+ {
+ "epoch": 1.011284423744178,
+ "grad_norm": 0.5240431427955627,
+ "learning_rate": 0.0001489067040435717,
+ "loss": 1.1076,
+ "step": 1011
+ },
+ {
+ "epoch": 1.012284705073302,
+ "grad_norm": 0.5996805429458618,
+ "learning_rate": 0.00014881524434940313,
+ "loss": 0.9063,
+ "step": 1012
+ },
+ {
+ "epoch": 1.0132849864024256,
+ "grad_norm": 0.4602286219596863,
+ "learning_rate": 0.0001487237310161093,
+ "loss": 0.8003,
+ "step": 1013
+ },
+ {
+ "epoch": 1.0142852677315495,
+ "grad_norm": 0.5298121571540833,
+ "learning_rate": 0.0001486321641442467,
+ "loss": 0.9616,
+ "step": 1014
+ },
+ {
+ "epoch": 1.0152855490606734,
+ "grad_norm": 0.47525477409362793,
+ "learning_rate": 0.00014854054383443081,
+ "loss": 1.0457,
+ "step": 1015
+ },
+ {
+ "epoch": 1.016285830389797,
+ "grad_norm": 0.5577285885810852,
+ "learning_rate": 0.00014844887018733582,
+ "loss": 0.8973,
+ "step": 1016
+ },
+ {
+ "epoch": 1.017286111718921,
+ "grad_norm": 0.5028079748153687,
+ "learning_rate": 0.00014835714330369446,
+ "loss": 1.0721,
+ "step": 1017
+ },
+ {
+ "epoch": 1.0182863930480448,
+ "grad_norm": 0.5401796102523804,
+ "learning_rate": 0.00014826536328429795,
+ "loss": 0.9595,
+ "step": 1018
+ },
+ {
+ "epoch": 1.0192866743771685,
+ "grad_norm": 0.4957962930202484,
+ "learning_rate": 0.000148173530229996,
+ "loss": 0.9871,
+ "step": 1019
+ },
+ {
+ "epoch": 1.0202869557062924,
+ "grad_norm": 0.4891825020313263,
+ "learning_rate": 0.00014808164424169647,
+ "loss": 0.9546,
+ "step": 1020
+ },
+ {
+ "epoch": 1.0212872370354162,
+ "grad_norm": 0.48703211545944214,
+ "learning_rate": 0.0001479897054203655,
+ "loss": 0.8863,
+ "step": 1021
+ },
+ {
+ "epoch": 1.0222875183645401,
+ "grad_norm": 0.5614656805992126,
+ "learning_rate": 0.00014789771386702717,
+ "loss": 0.9857,
+ "step": 1022
+ },
+ {
+ "epoch": 1.0232877996936638,
+ "grad_norm": 0.5903550982475281,
+ "learning_rate": 0.0001478056696827636,
+ "loss": 0.8347,
+ "step": 1023
+ },
+ {
+ "epoch": 1.0242880810227877,
+ "grad_norm": 0.47974926233291626,
+ "learning_rate": 0.0001477135729687147,
+ "loss": 1.0035,
+ "step": 1024
+ },
+ {
+ "epoch": 1.0252883623519116,
+ "grad_norm": 0.5049344897270203,
+ "learning_rate": 0.0001476214238260781,
+ "loss": 0.953,
+ "step": 1025
+ },
+ {
+ "epoch": 1.0262886436810352,
+ "grad_norm": 0.3981640636920929,
+ "learning_rate": 0.000147529222356109,
+ "loss": 0.7118,
+ "step": 1026
+ },
+ {
+ "epoch": 1.0272889250101591,
+ "grad_norm": 0.598785400390625,
+ "learning_rate": 0.0001474369686601202,
+ "loss": 0.9002,
+ "step": 1027
+ },
+ {
+ "epoch": 1.028289206339283,
+ "grad_norm": 0.5422918200492859,
+ "learning_rate": 0.0001473446628394818,
+ "loss": 1.192,
+ "step": 1028
+ },
+ {
+ "epoch": 1.0292894876684067,
+ "grad_norm": 0.592509925365448,
+ "learning_rate": 0.00014725230499562119,
+ "loss": 1.0989,
+ "step": 1029
+ },
+ {
+ "epoch": 1.0302897689975306,
+ "grad_norm": 0.5232793688774109,
+ "learning_rate": 0.00014715989523002296,
+ "loss": 1.0667,
+ "step": 1030
+ },
+ {
+ "epoch": 1.0312900503266544,
+ "grad_norm": 0.5362406373023987,
+ "learning_rate": 0.00014706743364422878,
+ "loss": 0.8933,
+ "step": 1031
+ },
+ {
+ "epoch": 1.032290331655778,
+ "grad_norm": 0.43486225605010986,
+ "learning_rate": 0.00014697492033983707,
+ "loss": 0.8525,
+ "step": 1032
+ },
+ {
+ "epoch": 1.033290612984902,
+ "grad_norm": 0.5187330842018127,
+ "learning_rate": 0.00014688235541850337,
+ "loss": 1.017,
+ "step": 1033
+ },
+ {
+ "epoch": 1.0342908943140259,
+ "grad_norm": 0.5081651210784912,
+ "learning_rate": 0.0001467897389819397,
+ "loss": 1.0135,
+ "step": 1034
+ },
+ {
+ "epoch": 1.0352911756431495,
+ "grad_norm": 0.49661391973495483,
+ "learning_rate": 0.00014669707113191483,
+ "loss": 0.8711,
+ "step": 1035
+ },
+ {
+ "epoch": 1.0362914569722734,
+ "grad_norm": 0.4899054169654846,
+ "learning_rate": 0.0001466043519702539,
+ "loss": 0.9924,
+ "step": 1036
+ },
+ {
+ "epoch": 1.0372917383013973,
+ "grad_norm": 0.47787439823150635,
+ "learning_rate": 0.00014651158159883855,
+ "loss": 0.9238,
+ "step": 1037
+ },
+ {
+ "epoch": 1.038292019630521,
+ "grad_norm": 0.509600818157196,
+ "learning_rate": 0.0001464187601196066,
+ "loss": 0.8854,
+ "step": 1038
+ },
+ {
+ "epoch": 1.0392923009596449,
+ "grad_norm": 0.3907245397567749,
+ "learning_rate": 0.00014632588763455212,
+ "loss": 0.8911,
+ "step": 1039
+ },
+ {
+ "epoch": 1.0402925822887688,
+ "grad_norm": 0.4939952492713928,
+ "learning_rate": 0.00014623296424572517,
+ "loss": 0.9069,
+ "step": 1040
+ },
+ {
+ "epoch": 1.0412928636178926,
+ "grad_norm": 0.4680919945240021,
+ "learning_rate": 0.00014613999005523174,
+ "loss": 0.9361,
+ "step": 1041
+ },
+ {
+ "epoch": 1.0422931449470163,
+ "grad_norm": 0.4871543347835541,
+ "learning_rate": 0.00014604696516523361,
+ "loss": 0.9268,
+ "step": 1042
+ },
+ {
+ "epoch": 1.0432934262761402,
+ "grad_norm": 0.5115481615066528,
+ "learning_rate": 0.00014595388967794835,
+ "loss": 0.9555,
+ "step": 1043
+ },
+ {
+ "epoch": 1.044293707605264,
+ "grad_norm": 0.5923699140548706,
+ "learning_rate": 0.00014586076369564908,
+ "loss": 1.0122,
+ "step": 1044
+ },
+ {
+ "epoch": 1.0452939889343877,
+ "grad_norm": 0.491161048412323,
+ "learning_rate": 0.00014576758732066442,
+ "loss": 0.9805,
+ "step": 1045
+ },
+ {
+ "epoch": 1.0462942702635116,
+ "grad_norm": 0.462168425321579,
+ "learning_rate": 0.00014567436065537835,
+ "loss": 0.9213,
+ "step": 1046
+ },
+ {
+ "epoch": 1.0472945515926355,
+ "grad_norm": 0.5082408785820007,
+ "learning_rate": 0.00014558108380223012,
+ "loss": 0.9073,
+ "step": 1047
+ },
+ {
+ "epoch": 1.0482948329217592,
+ "grad_norm": 0.6131752133369446,
+ "learning_rate": 0.00014548775686371412,
+ "loss": 0.9156,
+ "step": 1048
+ },
+ {
+ "epoch": 1.049295114250883,
+ "grad_norm": 0.6133660674095154,
+ "learning_rate": 0.00014539437994237977,
+ "loss": 1.2011,
+ "step": 1049
+ },
+ {
+ "epoch": 1.050295395580007,
+ "grad_norm": 0.542412519454956,
+ "learning_rate": 0.00014530095314083143,
+ "loss": 1.1075,
+ "step": 1050
+ },
+ {
+ "epoch": 1.0512956769091306,
+ "grad_norm": 0.5367622971534729,
+ "learning_rate": 0.00014520747656172824,
+ "loss": 1.0783,
+ "step": 1051
+ },
+ {
+ "epoch": 1.0522959582382545,
+ "grad_norm": 0.5243119597434998,
+ "learning_rate": 0.00014511395030778406,
+ "loss": 1.0865,
+ "step": 1052
+ },
+ {
+ "epoch": 1.0532962395673784,
+ "grad_norm": 0.5611020922660828,
+ "learning_rate": 0.00014502037448176734,
+ "loss": 0.9613,
+ "step": 1053
+ },
+ {
+ "epoch": 1.054296520896502,
+ "grad_norm": 0.506432294845581,
+ "learning_rate": 0.000144926749186501,
+ "loss": 1.1364,
+ "step": 1054
+ },
+ {
+ "epoch": 1.055296802225626,
+ "grad_norm": 0.5270103812217712,
+ "learning_rate": 0.00014483307452486227,
+ "loss": 1.042,
+ "step": 1055
+ },
+ {
+ "epoch": 1.0562970835547498,
+ "grad_norm": 0.5376967191696167,
+ "learning_rate": 0.0001447393505997827,
+ "loss": 0.9563,
+ "step": 1056
+ },
+ {
+ "epoch": 1.0572973648838735,
+ "grad_norm": 0.4821127653121948,
+ "learning_rate": 0.00014464557751424793,
+ "loss": 0.9241,
+ "step": 1057
+ },
+ {
+ "epoch": 1.0582976462129974,
+ "grad_norm": 0.6197866201400757,
+ "learning_rate": 0.00014455175537129758,
+ "loss": 1.0489,
+ "step": 1058
+ },
+ {
+ "epoch": 1.0592979275421213,
+ "grad_norm": 0.42820343375205994,
+ "learning_rate": 0.00014445788427402528,
+ "loss": 0.7755,
+ "step": 1059
+ },
+ {
+ "epoch": 1.0602982088712452,
+ "grad_norm": 0.49635690450668335,
+ "learning_rate": 0.00014436396432557835,
+ "loss": 0.8485,
+ "step": 1060
+ },
+ {
+ "epoch": 1.0612984902003688,
+ "grad_norm": 0.5529823899269104,
+ "learning_rate": 0.00014426999562915782,
+ "loss": 0.9589,
+ "step": 1061
+ },
+ {
+ "epoch": 1.0622987715294927,
+ "grad_norm": 0.5504932403564453,
+ "learning_rate": 0.00014417597828801832,
+ "loss": 0.9048,
+ "step": 1062
+ },
+ {
+ "epoch": 1.0632990528586166,
+ "grad_norm": 0.5755835175514221,
+ "learning_rate": 0.0001440819124054679,
+ "loss": 0.9542,
+ "step": 1063
+ },
+ {
+ "epoch": 1.0642993341877403,
+ "grad_norm": 0.4767759144306183,
+ "learning_rate": 0.00014398779808486793,
+ "loss": 0.9174,
+ "step": 1064
+ },
+ {
+ "epoch": 1.0652996155168641,
+ "grad_norm": 0.5343469381332397,
+ "learning_rate": 0.00014389363542963306,
+ "loss": 0.8493,
+ "step": 1065
+ },
+ {
+ "epoch": 1.066299896845988,
+ "grad_norm": 0.48161643743515015,
+ "learning_rate": 0.000143799424543231,
+ "loss": 0.8218,
+ "step": 1066
+ },
+ {
+ "epoch": 1.0673001781751117,
+ "grad_norm": 0.4958563446998596,
+ "learning_rate": 0.0001437051655291825,
+ "loss": 0.9849,
+ "step": 1067
+ },
+ {
+ "epoch": 1.0683004595042356,
+ "grad_norm": 0.5286628007888794,
+ "learning_rate": 0.0001436108584910611,
+ "loss": 0.8935,
+ "step": 1068
+ },
+ {
+ "epoch": 1.0693007408333595,
+ "grad_norm": 0.6096596121788025,
+ "learning_rate": 0.0001435165035324933,
+ "loss": 1.0577,
+ "step": 1069
+ },
+ {
+ "epoch": 1.0703010221624831,
+ "grad_norm": 0.4895448088645935,
+ "learning_rate": 0.000143422100757158,
+ "loss": 0.865,
+ "step": 1070
+ },
+ {
+ "epoch": 1.071301303491607,
+ "grad_norm": 0.5186201930046082,
+ "learning_rate": 0.00014332765026878687,
+ "loss": 0.8414,
+ "step": 1071
+ },
+ {
+ "epoch": 1.072301584820731,
+ "grad_norm": 0.5639254450798035,
+ "learning_rate": 0.0001432331521711639,
+ "loss": 0.9401,
+ "step": 1072
+ },
+ {
+ "epoch": 1.0733018661498546,
+ "grad_norm": 0.48865774273872375,
+ "learning_rate": 0.00014313860656812536,
+ "loss": 0.7894,
+ "step": 1073
+ },
+ {
+ "epoch": 1.0743021474789785,
+ "grad_norm": 0.4796544313430786,
+ "learning_rate": 0.00014304401356355983,
+ "loss": 0.8153,
+ "step": 1074
+ },
+ {
+ "epoch": 1.0753024288081023,
+ "grad_norm": 0.5578910708427429,
+ "learning_rate": 0.00014294937326140788,
+ "loss": 1.1675,
+ "step": 1075
+ },
+ {
+ "epoch": 1.076302710137226,
+ "grad_norm": 0.5607575178146362,
+ "learning_rate": 0.00014285468576566207,
+ "loss": 0.9133,
+ "step": 1076
+ },
+ {
+ "epoch": 1.07730299146635,
+ "grad_norm": 0.48808708786964417,
+ "learning_rate": 0.00014275995118036693,
+ "loss": 0.8884,
+ "step": 1077
+ },
+ {
+ "epoch": 1.0783032727954738,
+ "grad_norm": 0.4981604814529419,
+ "learning_rate": 0.00014266516960961852,
+ "loss": 0.9235,
+ "step": 1078
+ },
+ {
+ "epoch": 1.0793035541245974,
+ "grad_norm": 0.6323955655097961,
+ "learning_rate": 0.00014257034115756472,
+ "loss": 1.1617,
+ "step": 1079
+ },
+ {
+ "epoch": 1.0803038354537213,
+ "grad_norm": 0.5465244650840759,
+ "learning_rate": 0.0001424754659284048,
+ "loss": 1.0126,
+ "step": 1080
+ },
+ {
+ "epoch": 1.0813041167828452,
+ "grad_norm": 0.504200279712677,
+ "learning_rate": 0.0001423805440263895,
+ "loss": 1.0069,
+ "step": 1081
+ },
+ {
+ "epoch": 1.0823043981119689,
+ "grad_norm": 0.8698700070381165,
+ "learning_rate": 0.0001422855755558208,
+ "loss": 0.9653,
+ "step": 1082
+ },
+ {
+ "epoch": 1.0833046794410928,
+ "grad_norm": 0.41991496086120605,
+ "learning_rate": 0.00014219056062105193,
+ "loss": 1.089,
+ "step": 1083
+ },
+ {
+ "epoch": 1.0843049607702167,
+ "grad_norm": 0.5334717035293579,
+ "learning_rate": 0.0001420954993264871,
+ "loss": 1.0137,
+ "step": 1084
+ },
+ {
+ "epoch": 1.0853052420993405,
+ "grad_norm": 0.5418859124183655,
+ "learning_rate": 0.00014200039177658145,
+ "loss": 0.9302,
+ "step": 1085
+ },
+ {
+ "epoch": 1.0863055234284642,
+ "grad_norm": 0.515819251537323,
+ "learning_rate": 0.000141905238075841,
+ "loss": 1.0703,
+ "step": 1086
+ },
+ {
+ "epoch": 1.087305804757588,
+ "grad_norm": 0.43046239018440247,
+ "learning_rate": 0.00014181003832882248,
+ "loss": 1.0722,
+ "step": 1087
+ },
+ {
+ "epoch": 1.088306086086712,
+ "grad_norm": 0.6555958390235901,
+ "learning_rate": 0.00014171479264013311,
+ "loss": 0.806,
+ "step": 1088
+ },
+ {
+ "epoch": 1.0893063674158356,
+ "grad_norm": 0.5608332753181458,
+ "learning_rate": 0.00014161950111443077,
+ "loss": 0.9925,
+ "step": 1089
+ },
+ {
+ "epoch": 1.0903066487449595,
+ "grad_norm": 0.5866970419883728,
+ "learning_rate": 0.00014152416385642357,
+ "loss": 0.9278,
+ "step": 1090
+ },
+ {
+ "epoch": 1.0913069300740834,
+ "grad_norm": 0.4913788437843323,
+ "learning_rate": 0.00014142878097086995,
+ "loss": 0.7394,
+ "step": 1091
+ },
+ {
+ "epoch": 1.092307211403207,
+ "grad_norm": 0.4942512512207031,
+ "learning_rate": 0.0001413333525625784,
+ "loss": 0.8891,
+ "step": 1092
+ },
+ {
+ "epoch": 1.093307492732331,
+ "grad_norm": 0.5537131428718567,
+ "learning_rate": 0.00014123787873640754,
+ "loss": 0.9632,
+ "step": 1093
+ },
+ {
+ "epoch": 1.0943077740614549,
+ "grad_norm": 0.49271076917648315,
+ "learning_rate": 0.00014114235959726575,
+ "loss": 0.8708,
+ "step": 1094
+ },
+ {
+ "epoch": 1.0953080553905785,
+ "grad_norm": 0.448188841342926,
+ "learning_rate": 0.0001410467952501114,
+ "loss": 0.9727,
+ "step": 1095
+ },
+ {
+ "epoch": 1.0963083367197024,
+ "grad_norm": 0.4975283741950989,
+ "learning_rate": 0.00014095118579995235,
+ "loss": 0.9971,
+ "step": 1096
+ },
+ {
+ "epoch": 1.0973086180488263,
+ "grad_norm": 0.46382221579551697,
+ "learning_rate": 0.0001408555313518461,
+ "loss": 0.8853,
+ "step": 1097
+ },
+ {
+ "epoch": 1.09830889937795,
+ "grad_norm": 0.5071414113044739,
+ "learning_rate": 0.00014075983201089964,
+ "loss": 0.7723,
+ "step": 1098
+ },
+ {
+ "epoch": 1.0993091807070738,
+ "grad_norm": 0.41700050234794617,
+ "learning_rate": 0.0001406640878822692,
+ "loss": 0.7892,
+ "step": 1099
+ },
+ {
+ "epoch": 1.1003094620361977,
+ "grad_norm": 0.497175395488739,
+ "learning_rate": 0.00014056829907116024,
+ "loss": 0.9791,
+ "step": 1100
+ },
+ {
+ "epoch": 1.1013097433653214,
+ "grad_norm": 0.4512806236743927,
+ "learning_rate": 0.00014047246568282736,
+ "loss": 0.9878,
+ "step": 1101
+ },
+ {
+ "epoch": 1.1023100246944453,
+ "grad_norm": 0.5804361701011658,
+ "learning_rate": 0.00014037658782257414,
+ "loss": 1.1583,
+ "step": 1102
+ },
+ {
+ "epoch": 1.1033103060235692,
+ "grad_norm": 0.5334234237670898,
+ "learning_rate": 0.00014028066559575302,
+ "loss": 1.0705,
+ "step": 1103
+ },
+ {
+ "epoch": 1.104310587352693,
+ "grad_norm": 0.4683452844619751,
+ "learning_rate": 0.00014018469910776513,
+ "loss": 0.8608,
+ "step": 1104
+ },
+ {
+ "epoch": 1.1053108686818167,
+ "grad_norm": 0.5595771074295044,
+ "learning_rate": 0.0001400886884640603,
+ "loss": 1.0804,
+ "step": 1105
+ },
+ {
+ "epoch": 1.1063111500109406,
+ "grad_norm": 0.45048126578330994,
+ "learning_rate": 0.00013999263377013693,
+ "loss": 0.7782,
+ "step": 1106
+ },
+ {
+ "epoch": 1.1073114313400645,
+ "grad_norm": 0.4472745954990387,
+ "learning_rate": 0.00013989653513154165,
+ "loss": 0.8599,
+ "step": 1107
+ },
+ {
+ "epoch": 1.1083117126691882,
+ "grad_norm": 0.5168829560279846,
+ "learning_rate": 0.00013980039265386955,
+ "loss": 0.9984,
+ "step": 1108
+ },
+ {
+ "epoch": 1.109311993998312,
+ "grad_norm": 0.5712297558784485,
+ "learning_rate": 0.00013970420644276383,
+ "loss": 0.957,
+ "step": 1109
+ },
+ {
+ "epoch": 1.110312275327436,
+ "grad_norm": 0.5360589027404785,
+ "learning_rate": 0.0001396079766039157,
+ "loss": 1.0957,
+ "step": 1110
+ },
+ {
+ "epoch": 1.1113125566565596,
+ "grad_norm": 0.49815621972084045,
+ "learning_rate": 0.00013951170324306435,
+ "loss": 1.1143,
+ "step": 1111
+ },
+ {
+ "epoch": 1.1123128379856835,
+ "grad_norm": 0.45044735074043274,
+ "learning_rate": 0.00013941538646599687,
+ "loss": 0.8463,
+ "step": 1112
+ },
+ {
+ "epoch": 1.1133131193148074,
+ "grad_norm": 0.5086628198623657,
+ "learning_rate": 0.0001393190263785479,
+ "loss": 0.9061,
+ "step": 1113
+ },
+ {
+ "epoch": 1.114313400643931,
+ "grad_norm": 0.4669632315635681,
+ "learning_rate": 0.0001392226230865998,
+ "loss": 0.7891,
+ "step": 1114
+ },
+ {
+ "epoch": 1.115313681973055,
+ "grad_norm": 0.43681180477142334,
+ "learning_rate": 0.0001391261766960823,
+ "loss": 0.7687,
+ "step": 1115
+ },
+ {
+ "epoch": 1.1163139633021788,
+ "grad_norm": 0.47354501485824585,
+ "learning_rate": 0.00013902968731297255,
+ "loss": 1.0181,
+ "step": 1116
+ },
+ {
+ "epoch": 1.1173142446313025,
+ "grad_norm": 0.5224591493606567,
+ "learning_rate": 0.00013893315504329498,
+ "loss": 0.9072,
+ "step": 1117
+ },
+ {
+ "epoch": 1.1183145259604264,
+ "grad_norm": 0.5648715496063232,
+ "learning_rate": 0.00013883657999312109,
+ "loss": 1.0256,
+ "step": 1118
+ },
+ {
+ "epoch": 1.1193148072895502,
+ "grad_norm": 0.4603082239627838,
+ "learning_rate": 0.00013873996226856933,
+ "loss": 0.9129,
+ "step": 1119
+ },
+ {
+ "epoch": 1.120315088618674,
+ "grad_norm": 0.48259446024894714,
+ "learning_rate": 0.00013864330197580513,
+ "loss": 0.8335,
+ "step": 1120
+ },
+ {
+ "epoch": 1.1213153699477978,
+ "grad_norm": 0.5239295363426208,
+ "learning_rate": 0.0001385465992210407,
+ "loss": 1.1409,
+ "step": 1121
+ },
+ {
+ "epoch": 1.1223156512769217,
+ "grad_norm": 0.5242553949356079,
+ "learning_rate": 0.00013844985411053492,
+ "loss": 0.9542,
+ "step": 1122
+ },
+ {
+ "epoch": 1.1233159326060456,
+ "grad_norm": 0.5396201014518738,
+ "learning_rate": 0.00013835306675059308,
+ "loss": 1.0786,
+ "step": 1123
+ },
+ {
+ "epoch": 1.1243162139351692,
+ "grad_norm": 1.818426251411438,
+ "learning_rate": 0.00013825623724756704,
+ "loss": 0.9336,
+ "step": 1124
+ },
+ {
+ "epoch": 1.1253164952642931,
+ "grad_norm": 0.5364382863044739,
+ "learning_rate": 0.00013815936570785487,
+ "loss": 0.8096,
+ "step": 1125
+ },
+ {
+ "epoch": 1.1263167765934168,
+ "grad_norm": 0.47344619035720825,
+ "learning_rate": 0.00013806245223790088,
+ "loss": 0.8777,
+ "step": 1126
+ },
+ {
+ "epoch": 1.1273170579225407,
+ "grad_norm": 0.48119789361953735,
+ "learning_rate": 0.0001379654969441955,
+ "loss": 0.9965,
+ "step": 1127
+ },
+ {
+ "epoch": 1.1283173392516646,
+ "grad_norm": 0.5970126390457153,
+ "learning_rate": 0.000137868499933275,
+ "loss": 1.1389,
+ "step": 1128
+ },
+ {
+ "epoch": 1.1293176205807884,
+ "grad_norm": 0.5217893719673157,
+ "learning_rate": 0.00013777146131172162,
+ "loss": 1.1361,
+ "step": 1129
+ },
+ {
+ "epoch": 1.130317901909912,
+ "grad_norm": 0.4322263300418854,
+ "learning_rate": 0.00013767438118616318,
+ "loss": 0.8632,
+ "step": 1130
+ },
+ {
+ "epoch": 1.131318183239036,
+ "grad_norm": 0.49836596846580505,
+ "learning_rate": 0.00013757725966327322,
+ "loss": 0.9594,
+ "step": 1131
+ },
+ {
+ "epoch": 1.1323184645681599,
+ "grad_norm": 0.5220472812652588,
+ "learning_rate": 0.00013748009684977073,
+ "loss": 1.0783,
+ "step": 1132
+ },
+ {
+ "epoch": 1.1333187458972835,
+ "grad_norm": 0.5030301809310913,
+ "learning_rate": 0.0001373828928524201,
+ "loss": 0.9482,
+ "step": 1133
+ },
+ {
+ "epoch": 1.1343190272264074,
+ "grad_norm": 0.5477299094200134,
+ "learning_rate": 0.00013728564777803088,
+ "loss": 1.1119,
+ "step": 1134
+ },
+ {
+ "epoch": 1.1353193085555313,
+ "grad_norm": 0.5505563020706177,
+ "learning_rate": 0.00013718836173345783,
+ "loss": 1.0315,
+ "step": 1135
+ },
+ {
+ "epoch": 1.136319589884655,
+ "grad_norm": 0.5921071171760559,
+ "learning_rate": 0.00013709103482560078,
+ "loss": 0.98,
+ "step": 1136
+ },
+ {
+ "epoch": 1.1373198712137789,
+ "grad_norm": 0.4483082890510559,
+ "learning_rate": 0.00013699366716140435,
+ "loss": 0.9203,
+ "step": 1137
+ },
+ {
+ "epoch": 1.1383201525429028,
+ "grad_norm": 0.4304388165473938,
+ "learning_rate": 0.00013689625884785798,
+ "loss": 0.824,
+ "step": 1138
+ },
+ {
+ "epoch": 1.1393204338720264,
+ "grad_norm": 0.5273844003677368,
+ "learning_rate": 0.00013679880999199583,
+ "loss": 1.0061,
+ "step": 1139
+ },
+ {
+ "epoch": 1.1403207152011503,
+ "grad_norm": 0.5016499161720276,
+ "learning_rate": 0.00013670132070089653,
+ "loss": 0.8692,
+ "step": 1140
+ },
+ {
+ "epoch": 1.1413209965302742,
+ "grad_norm": 0.5045731067657471,
+ "learning_rate": 0.00013660379108168324,
+ "loss": 0.958,
+ "step": 1141
+ },
+ {
+ "epoch": 1.142321277859398,
+ "grad_norm": 0.484275221824646,
+ "learning_rate": 0.00013650622124152334,
+ "loss": 0.8589,
+ "step": 1142
+ },
+ {
+ "epoch": 1.1433215591885217,
+ "grad_norm": 0.6210789680480957,
+ "learning_rate": 0.0001364086112876284,
+ "loss": 0.931,
+ "step": 1143
+ },
+ {
+ "epoch": 1.1443218405176456,
+ "grad_norm": 0.59291011095047,
+ "learning_rate": 0.00013631096132725413,
+ "loss": 0.9706,
+ "step": 1144
+ },
+ {
+ "epoch": 1.1453221218467693,
+ "grad_norm": 0.48909759521484375,
+ "learning_rate": 0.00013621327146770025,
+ "loss": 0.9696,
+ "step": 1145
+ },
+ {
+ "epoch": 1.1463224031758932,
+ "grad_norm": 0.5022495985031128,
+ "learning_rate": 0.00013611554181631013,
+ "loss": 0.9349,
+ "step": 1146
+ },
+ {
+ "epoch": 1.147322684505017,
+ "grad_norm": 0.6155623197555542,
+ "learning_rate": 0.00013601777248047105,
+ "loss": 0.9161,
+ "step": 1147
+ },
+ {
+ "epoch": 1.148322965834141,
+ "grad_norm": 0.49372079968452454,
+ "learning_rate": 0.0001359199635676138,
+ "loss": 0.8598,
+ "step": 1148
+ },
+ {
+ "epoch": 1.1493232471632646,
+ "grad_norm": 0.504294753074646,
+ "learning_rate": 0.00013582211518521273,
+ "loss": 0.9334,
+ "step": 1149
+ },
+ {
+ "epoch": 1.1503235284923885,
+ "grad_norm": 0.44594088196754456,
+ "learning_rate": 0.00013572422744078551,
+ "loss": 1.0443,
+ "step": 1150
+ },
+ {
+ "epoch": 1.1513238098215124,
+ "grad_norm": 0.4689579904079437,
+ "learning_rate": 0.00013562630044189304,
+ "loss": 0.9192,
+ "step": 1151
+ },
+ {
+ "epoch": 1.152324091150636,
+ "grad_norm": 0.49370667338371277,
+ "learning_rate": 0.00013552833429613938,
+ "loss": 0.8638,
+ "step": 1152
+ },
+ {
+ "epoch": 1.15332437247976,
+ "grad_norm": 0.4459637403488159,
+ "learning_rate": 0.0001354303291111716,
+ "loss": 0.8719,
+ "step": 1153
+ },
+ {
+ "epoch": 1.1543246538088838,
+ "grad_norm": 0.41995370388031006,
+ "learning_rate": 0.0001353322849946797,
+ "loss": 0.9429,
+ "step": 1154
+ },
+ {
+ "epoch": 1.1553249351380075,
+ "grad_norm": 0.5358927249908447,
+ "learning_rate": 0.00013523420205439646,
+ "loss": 1.0724,
+ "step": 1155
+ },
+ {
+ "epoch": 1.1563252164671314,
+ "grad_norm": 0.48797738552093506,
+ "learning_rate": 0.0001351360803980972,
+ "loss": 1.0191,
+ "step": 1156
+ },
+ {
+ "epoch": 1.1573254977962553,
+ "grad_norm": 0.46079760789871216,
+ "learning_rate": 0.00013503792013359997,
+ "loss": 0.8731,
+ "step": 1157
+ },
+ {
+ "epoch": 1.158325779125379,
+ "grad_norm": 0.5278632044792175,
+ "learning_rate": 0.00013493972136876509,
+ "loss": 1.0689,
+ "step": 1158
+ },
+ {
+ "epoch": 1.1593260604545028,
+ "grad_norm": 0.6085927486419678,
+ "learning_rate": 0.00013484148421149527,
+ "loss": 1.0228,
+ "step": 1159
+ },
+ {
+ "epoch": 1.1603263417836267,
+ "grad_norm": 0.49424564838409424,
+ "learning_rate": 0.0001347432087697354,
+ "loss": 0.9622,
+ "step": 1160
+ },
+ {
+ "epoch": 1.1613266231127504,
+ "grad_norm": 0.4577535092830658,
+ "learning_rate": 0.00013464489515147238,
+ "loss": 0.795,
+ "step": 1161
+ },
+ {
+ "epoch": 1.1623269044418743,
+ "grad_norm": 0.5331981778144836,
+ "learning_rate": 0.0001345465434647351,
+ "loss": 1.2866,
+ "step": 1162
+ },
+ {
+ "epoch": 1.1633271857709981,
+ "grad_norm": 0.4657655954360962,
+ "learning_rate": 0.00013444815381759425,
+ "loss": 0.8171,
+ "step": 1163
+ },
+ {
+ "epoch": 1.1643274671001218,
+ "grad_norm": 0.44027647376060486,
+ "learning_rate": 0.00013434972631816235,
+ "loss": 0.9448,
+ "step": 1164
+ },
+ {
+ "epoch": 1.1653277484292457,
+ "grad_norm": 1.996617317199707,
+ "learning_rate": 0.0001342512610745933,
+ "loss": 0.8706,
+ "step": 1165
+ },
+ {
+ "epoch": 1.1663280297583696,
+ "grad_norm": 0.4826609790325165,
+ "learning_rate": 0.0001341527581950827,
+ "loss": 1.1075,
+ "step": 1166
+ },
+ {
+ "epoch": 1.1673283110874935,
+ "grad_norm": 0.4908469617366791,
+ "learning_rate": 0.00013405421778786737,
+ "loss": 0.835,
+ "step": 1167
+ },
+ {
+ "epoch": 1.1683285924166171,
+ "grad_norm": 0.5113404989242554,
+ "learning_rate": 0.00013395563996122537,
+ "loss": 0.8437,
+ "step": 1168
+ },
+ {
+ "epoch": 1.169328873745741,
+ "grad_norm": 0.5029433369636536,
+ "learning_rate": 0.00013385702482347593,
+ "loss": 1.1188,
+ "step": 1169
+ },
+ {
+ "epoch": 1.170329155074865,
+ "grad_norm": 0.4739987552165985,
+ "learning_rate": 0.00013375837248297926,
+ "loss": 0.9829,
+ "step": 1170
+ },
+ {
+ "epoch": 1.1713294364039886,
+ "grad_norm": 0.5853392481803894,
+ "learning_rate": 0.0001336596830481364,
+ "loss": 1.0384,
+ "step": 1171
+ },
+ {
+ "epoch": 1.1723297177331125,
+ "grad_norm": 0.5038638710975647,
+ "learning_rate": 0.0001335609566273892,
+ "loss": 0.9389,
+ "step": 1172
+ },
+ {
+ "epoch": 1.1733299990622363,
+ "grad_norm": 0.4367244243621826,
+ "learning_rate": 0.00013346219332922016,
+ "loss": 0.8182,
+ "step": 1173
+ },
+ {
+ "epoch": 1.17433028039136,
+ "grad_norm": 0.4453211724758148,
+ "learning_rate": 0.00013336339326215228,
+ "loss": 0.9289,
+ "step": 1174
+ },
+ {
+ "epoch": 1.175330561720484,
+ "grad_norm": 0.49941959977149963,
+ "learning_rate": 0.00013326455653474897,
+ "loss": 1.1277,
+ "step": 1175
+ },
+ {
+ "epoch": 1.1763308430496078,
+ "grad_norm": 0.553996205329895,
+ "learning_rate": 0.00013316568325561393,
+ "loss": 0.8582,
+ "step": 1176
+ },
+ {
+ "epoch": 1.1773311243787314,
+ "grad_norm": 0.5424408316612244,
+ "learning_rate": 0.00013306677353339098,
+ "loss": 1.0046,
+ "step": 1177
+ },
+ {
+ "epoch": 1.1783314057078553,
+ "grad_norm": 0.4373432695865631,
+ "learning_rate": 0.000132967827476764,
+ "loss": 0.9554,
+ "step": 1178
+ },
+ {
+ "epoch": 1.1793316870369792,
+ "grad_norm": 0.4744022786617279,
+ "learning_rate": 0.0001328688451944569,
+ "loss": 0.7784,
+ "step": 1179
+ },
+ {
+ "epoch": 1.1803319683661029,
+ "grad_norm": 0.5251059532165527,
+ "learning_rate": 0.00013276982679523322,
+ "loss": 0.8857,
+ "step": 1180
+ },
+ {
+ "epoch": 1.1813322496952268,
+ "grad_norm": 0.5108295679092407,
+ "learning_rate": 0.00013267077238789633,
+ "loss": 1.0711,
+ "step": 1181
+ },
+ {
+ "epoch": 1.1823325310243507,
+ "grad_norm": 0.49973955750465393,
+ "learning_rate": 0.00013257168208128908,
+ "loss": 1.0047,
+ "step": 1182
+ },
+ {
+ "epoch": 1.1833328123534743,
+ "grad_norm": 0.5143113732337952,
+ "learning_rate": 0.00013247255598429378,
+ "loss": 0.9294,
+ "step": 1183
+ },
+ {
+ "epoch": 1.1843330936825982,
+ "grad_norm": 0.5185163617134094,
+ "learning_rate": 0.00013237339420583212,
+ "loss": 0.9491,
+ "step": 1184
+ },
+ {
+ "epoch": 1.185333375011722,
+ "grad_norm": 0.49349021911621094,
+ "learning_rate": 0.00013227419685486492,
+ "loss": 0.812,
+ "step": 1185
+ },
+ {
+ "epoch": 1.186333656340846,
+ "grad_norm": 0.5210988521575928,
+ "learning_rate": 0.00013217496404039218,
+ "loss": 1.1228,
+ "step": 1186
+ },
+ {
+ "epoch": 1.1873339376699696,
+ "grad_norm": 0.46139585971832275,
+ "learning_rate": 0.0001320756958714528,
+ "loss": 0.7623,
+ "step": 1187
+ },
+ {
+ "epoch": 1.1883342189990935,
+ "grad_norm": 0.5365749597549438,
+ "learning_rate": 0.00013197639245712454,
+ "loss": 1.0785,
+ "step": 1188
+ },
+ {
+ "epoch": 1.1893345003282172,
+ "grad_norm": 0.4624418616294861,
+ "learning_rate": 0.00013187705390652388,
+ "loss": 1.0245,
+ "step": 1189
+ },
+ {
+ "epoch": 1.190334781657341,
+ "grad_norm": 0.4919735789299011,
+ "learning_rate": 0.00013177768032880593,
+ "loss": 0.9078,
+ "step": 1190
+ },
+ {
+ "epoch": 1.191335062986465,
+ "grad_norm": 0.5049088597297668,
+ "learning_rate": 0.0001316782718331643,
+ "loss": 0.8884,
+ "step": 1191
+ },
+ {
+ "epoch": 1.1923353443155889,
+ "grad_norm": 0.47496137022972107,
+ "learning_rate": 0.0001315788285288309,
+ "loss": 0.9414,
+ "step": 1192
+ },
+ {
+ "epoch": 1.1933356256447125,
+ "grad_norm": 0.4913059175014496,
+ "learning_rate": 0.00013147935052507597,
+ "loss": 0.8762,
+ "step": 1193
+ },
+ {
+ "epoch": 1.1943359069738364,
+ "grad_norm": 0.5643580555915833,
+ "learning_rate": 0.00013137983793120786,
+ "loss": 0.9556,
+ "step": 1194
+ },
+ {
+ "epoch": 1.1953361883029603,
+ "grad_norm": 0.5032216310501099,
+ "learning_rate": 0.0001312802908565729,
+ "loss": 1.1547,
+ "step": 1195
+ },
+ {
+ "epoch": 1.196336469632084,
+ "grad_norm": 0.5721387267112732,
+ "learning_rate": 0.0001311807094105553,
+ "loss": 0.97,
+ "step": 1196
+ },
+ {
+ "epoch": 1.1973367509612078,
+ "grad_norm": 0.47524675726890564,
+ "learning_rate": 0.00013108109370257712,
+ "loss": 0.9953,
+ "step": 1197
+ },
+ {
+ "epoch": 1.1983370322903317,
+ "grad_norm": 0.5769131183624268,
+ "learning_rate": 0.00013098144384209796,
+ "loss": 1.0578,
+ "step": 1198
+ },
+ {
+ "epoch": 1.1993373136194554,
+ "grad_norm": 0.4861721694469452,
+ "learning_rate": 0.000130881759938615,
+ "loss": 0.7542,
+ "step": 1199
+ },
+ {
+ "epoch": 1.2003375949485793,
+ "grad_norm": 0.4798511266708374,
+ "learning_rate": 0.00013078204210166278,
+ "loss": 0.9024,
+ "step": 1200
+ },
+ {
+ "epoch": 1.2013378762777032,
+ "grad_norm": 0.4447210729122162,
+ "learning_rate": 0.00013068229044081324,
+ "loss": 0.9703,
+ "step": 1201
+ },
+ {
+ "epoch": 1.2023381576068268,
+ "grad_norm": 0.5221365690231323,
+ "learning_rate": 0.0001305825050656754,
+ "loss": 1.0575,
+ "step": 1202
+ },
+ {
+ "epoch": 1.2033384389359507,
+ "grad_norm": 0.44786536693573,
+ "learning_rate": 0.00013048268608589533,
+ "loss": 0.9047,
+ "step": 1203
+ },
+ {
+ "epoch": 1.2043387202650746,
+ "grad_norm": 0.44534093141555786,
+ "learning_rate": 0.00013038283361115603,
+ "loss": 0.9156,
+ "step": 1204
+ },
+ {
+ "epoch": 1.2053390015941985,
+ "grad_norm": 0.5345563292503357,
+ "learning_rate": 0.0001302829477511773,
+ "loss": 0.9933,
+ "step": 1205
+ },
+ {
+ "epoch": 1.2063392829233222,
+ "grad_norm": 0.49175193905830383,
+ "learning_rate": 0.0001301830286157157,
+ "loss": 0.84,
+ "step": 1206
+ },
+ {
+ "epoch": 1.207339564252446,
+ "grad_norm": 0.5271350145339966,
+ "learning_rate": 0.0001300830763145642,
+ "loss": 0.8739,
+ "step": 1207
+ },
+ {
+ "epoch": 1.2083398455815697,
+ "grad_norm": 0.4891369342803955,
+ "learning_rate": 0.00012998309095755235,
+ "loss": 0.9923,
+ "step": 1208
+ },
+ {
+ "epoch": 1.2093401269106936,
+ "grad_norm": 0.44362354278564453,
+ "learning_rate": 0.00012988307265454597,
+ "loss": 0.911,
+ "step": 1209
+ },
+ {
+ "epoch": 1.2103404082398175,
+ "grad_norm": 0.46026211977005005,
+ "learning_rate": 0.0001297830215154471,
+ "loss": 0.8749,
+ "step": 1210
+ },
+ {
+ "epoch": 1.2113406895689414,
+ "grad_norm": 0.49236229062080383,
+ "learning_rate": 0.00012968293765019384,
+ "loss": 0.8959,
+ "step": 1211
+ },
+ {
+ "epoch": 1.212340970898065,
+ "grad_norm": 0.5326531529426575,
+ "learning_rate": 0.00012958282116876026,
+ "loss": 1.0464,
+ "step": 1212
+ },
+ {
+ "epoch": 1.213341252227189,
+ "grad_norm": 0.4658203721046448,
+ "learning_rate": 0.00012948267218115624,
+ "loss": 0.8895,
+ "step": 1213
+ },
+ {
+ "epoch": 1.2143415335563128,
+ "grad_norm": 0.5042040348052979,
+ "learning_rate": 0.00012938249079742743,
+ "loss": 0.889,
+ "step": 1214
+ },
+ {
+ "epoch": 1.2153418148854365,
+ "grad_norm": 0.5408799648284912,
+ "learning_rate": 0.00012928227712765504,
+ "loss": 0.9974,
+ "step": 1215
+ },
+ {
+ "epoch": 1.2163420962145604,
+ "grad_norm": 0.7056695818901062,
+ "learning_rate": 0.0001291820312819558,
+ "loss": 0.8644,
+ "step": 1216
+ },
+ {
+ "epoch": 1.2173423775436842,
+ "grad_norm": 0.5424172878265381,
+ "learning_rate": 0.00012908175337048174,
+ "loss": 1.0855,
+ "step": 1217
+ },
+ {
+ "epoch": 1.218342658872808,
+ "grad_norm": 0.4773527681827545,
+ "learning_rate": 0.00012898144350342015,
+ "loss": 1.014,
+ "step": 1218
+ },
+ {
+ "epoch": 1.2193429402019318,
+ "grad_norm": 0.5538880228996277,
+ "learning_rate": 0.0001288811017909934,
+ "loss": 1.0491,
+ "step": 1219
+ },
+ {
+ "epoch": 1.2203432215310557,
+ "grad_norm": 0.4497896730899811,
+ "learning_rate": 0.00012878072834345895,
+ "loss": 0.8591,
+ "step": 1220
+ },
+ {
+ "epoch": 1.2213435028601793,
+ "grad_norm": 0.5487242341041565,
+ "learning_rate": 0.00012868032327110904,
+ "loss": 0.9809,
+ "step": 1221
+ },
+ {
+ "epoch": 1.2223437841893032,
+ "grad_norm": 0.5900948643684387,
+ "learning_rate": 0.00012857988668427066,
+ "loss": 1.1435,
+ "step": 1222
+ },
+ {
+ "epoch": 1.2233440655184271,
+ "grad_norm": 0.5471523404121399,
+ "learning_rate": 0.0001284794186933055,
+ "loss": 1.0088,
+ "step": 1223
+ },
+ {
+ "epoch": 1.2243443468475508,
+ "grad_norm": 0.4625445604324341,
+ "learning_rate": 0.00012837891940860972,
+ "loss": 1.0452,
+ "step": 1224
+ },
+ {
+ "epoch": 1.2253446281766747,
+ "grad_norm": 0.4972693920135498,
+ "learning_rate": 0.00012827838894061377,
+ "loss": 1.0403,
+ "step": 1225
+ },
+ {
+ "epoch": 1.2263449095057986,
+ "grad_norm": 0.4823111295700073,
+ "learning_rate": 0.00012817782739978255,
+ "loss": 0.9439,
+ "step": 1226
+ },
+ {
+ "epoch": 1.2273451908349222,
+ "grad_norm": 0.5163894295692444,
+ "learning_rate": 0.00012807723489661495,
+ "loss": 1.031,
+ "step": 1227
+ },
+ {
+ "epoch": 1.228345472164046,
+ "grad_norm": 0.5085253119468689,
+ "learning_rate": 0.00012797661154164395,
+ "loss": 0.998,
+ "step": 1228
+ },
+ {
+ "epoch": 1.22934575349317,
+ "grad_norm": 0.4469011425971985,
+ "learning_rate": 0.00012787595744543647,
+ "loss": 0.8943,
+ "step": 1229
+ },
+ {
+ "epoch": 1.2303460348222939,
+ "grad_norm": 0.5117391347885132,
+ "learning_rate": 0.00012777527271859307,
+ "loss": 0.9817,
+ "step": 1230
+ },
+ {
+ "epoch": 1.2313463161514175,
+ "grad_norm": 0.44259950518608093,
+ "learning_rate": 0.0001276745574717481,
+ "loss": 0.7659,
+ "step": 1231
+ },
+ {
+ "epoch": 1.2323465974805414,
+ "grad_norm": 0.42978596687316895,
+ "learning_rate": 0.00012757381181556943,
+ "loss": 0.7313,
+ "step": 1232
+ },
+ {
+ "epoch": 1.2333468788096653,
+ "grad_norm": 0.5619105696678162,
+ "learning_rate": 0.0001274730358607583,
+ "loss": 0.9881,
+ "step": 1233
+ },
+ {
+ "epoch": 1.234347160138789,
+ "grad_norm": 0.5065141916275024,
+ "learning_rate": 0.00012737222971804924,
+ "loss": 0.9789,
+ "step": 1234
+ },
+ {
+ "epoch": 1.2353474414679129,
+ "grad_norm": 0.514705240726471,
+ "learning_rate": 0.00012727139349821,
+ "loss": 0.9278,
+ "step": 1235
+ },
+ {
+ "epoch": 1.2363477227970368,
+ "grad_norm": 0.48272448778152466,
+ "learning_rate": 0.0001271705273120413,
+ "loss": 0.9011,
+ "step": 1236
+ },
+ {
+ "epoch": 1.2373480041261604,
+ "grad_norm": 0.4993284344673157,
+ "learning_rate": 0.00012706963127037685,
+ "loss": 0.8341,
+ "step": 1237
+ },
+ {
+ "epoch": 1.2383482854552843,
+ "grad_norm": 0.44701850414276123,
+ "learning_rate": 0.00012696870548408316,
+ "loss": 0.8481,
+ "step": 1238
+ },
+ {
+ "epoch": 1.2393485667844082,
+ "grad_norm": 0.5611200332641602,
+ "learning_rate": 0.00012686775006405946,
+ "loss": 1.101,
+ "step": 1239
+ },
+ {
+ "epoch": 1.2403488481135319,
+ "grad_norm": 0.4962129592895508,
+ "learning_rate": 0.00012676676512123747,
+ "loss": 0.951,
+ "step": 1240
+ },
+ {
+ "epoch": 1.2413491294426557,
+ "grad_norm": 0.5547065734863281,
+ "learning_rate": 0.00012666575076658134,
+ "loss": 1.0228,
+ "step": 1241
+ },
+ {
+ "epoch": 1.2423494107717796,
+ "grad_norm": 0.5761319398880005,
+ "learning_rate": 0.00012656470711108764,
+ "loss": 1.0631,
+ "step": 1242
+ },
+ {
+ "epoch": 1.2433496921009033,
+ "grad_norm": 0.5202417969703674,
+ "learning_rate": 0.00012646363426578505,
+ "loss": 0.9623,
+ "step": 1243
+ },
+ {
+ "epoch": 1.2443499734300272,
+ "grad_norm": 0.561244547367096,
+ "learning_rate": 0.0001263625323417343,
+ "loss": 1.1666,
+ "step": 1244
+ },
+ {
+ "epoch": 1.245350254759151,
+ "grad_norm": 0.43389594554901123,
+ "learning_rate": 0.0001262614014500282,
+ "loss": 0.9473,
+ "step": 1245
+ },
+ {
+ "epoch": 1.2463505360882747,
+ "grad_norm": 0.5219054222106934,
+ "learning_rate": 0.00012616024170179126,
+ "loss": 1.0181,
+ "step": 1246
+ },
+ {
+ "epoch": 1.2473508174173986,
+ "grad_norm": 0.5179515480995178,
+ "learning_rate": 0.00012605905320817976,
+ "loss": 1.0851,
+ "step": 1247
+ },
+ {
+ "epoch": 1.2483510987465225,
+ "grad_norm": 0.5104801058769226,
+ "learning_rate": 0.00012595783608038155,
+ "loss": 0.9239,
+ "step": 1248
+ },
+ {
+ "epoch": 1.2493513800756464,
+ "grad_norm": 0.46918627619743347,
+ "learning_rate": 0.00012585659042961596,
+ "loss": 0.8361,
+ "step": 1249
+ },
+ {
+ "epoch": 1.25035166140477,
+ "grad_norm": 0.5275365710258484,
+ "learning_rate": 0.00012575531636713368,
+ "loss": 0.9256,
+ "step": 1250
+ },
+ {
+ "epoch": 1.251351942733894,
+ "grad_norm": 0.5006279349327087,
+ "learning_rate": 0.00012565401400421651,
+ "loss": 0.8748,
+ "step": 1251
+ },
+ {
+ "epoch": 1.2523522240630176,
+ "grad_norm": 0.466467022895813,
+ "learning_rate": 0.0001255526834521775,
+ "loss": 0.9217,
+ "step": 1252
+ },
+ {
+ "epoch": 1.2533525053921415,
+ "grad_norm": 0.45304587483406067,
+ "learning_rate": 0.00012545132482236055,
+ "loss": 0.8776,
+ "step": 1253
+ },
+ {
+ "epoch": 1.2543527867212654,
+ "grad_norm": 0.483394980430603,
+ "learning_rate": 0.0001253499382261405,
+ "loss": 0.9421,
+ "step": 1254
+ },
+ {
+ "epoch": 1.2553530680503893,
+ "grad_norm": 0.5117647051811218,
+ "learning_rate": 0.00012524852377492285,
+ "loss": 1.0033,
+ "step": 1255
+ },
+ {
+ "epoch": 1.256353349379513,
+ "grad_norm": 0.5712929964065552,
+ "learning_rate": 0.00012514708158014378,
+ "loss": 1.0216,
+ "step": 1256
+ },
+ {
+ "epoch": 1.2573536307086368,
+ "grad_norm": 0.49368858337402344,
+ "learning_rate": 0.00012504561175326985,
+ "loss": 0.8836,
+ "step": 1257
+ },
+ {
+ "epoch": 1.2583539120377607,
+ "grad_norm": 0.5303272008895874,
+ "learning_rate": 0.00012494411440579814,
+ "loss": 1.0138,
+ "step": 1258
+ },
+ {
+ "epoch": 1.2593541933668844,
+ "grad_norm": 0.47034743428230286,
+ "learning_rate": 0.0001248425896492558,
+ "loss": 0.9346,
+ "step": 1259
+ },
+ {
+ "epoch": 1.2603544746960083,
+ "grad_norm": 0.5398191809654236,
+ "learning_rate": 0.00012474103759520027,
+ "loss": 1.2548,
+ "step": 1260
+ },
+ {
+ "epoch": 1.2613547560251321,
+ "grad_norm": 0.4403116703033447,
+ "learning_rate": 0.00012463945835521878,
+ "loss": 0.8063,
+ "step": 1261
+ },
+ {
+ "epoch": 1.2623550373542558,
+ "grad_norm": 0.5504721999168396,
+ "learning_rate": 0.0001245378520409286,
+ "loss": 1.0888,
+ "step": 1262
+ },
+ {
+ "epoch": 1.2633553186833797,
+ "grad_norm": 0.46984589099884033,
+ "learning_rate": 0.0001244362187639767,
+ "loss": 0.9062,
+ "step": 1263
+ },
+ {
+ "epoch": 1.2643556000125036,
+ "grad_norm": 0.5573250651359558,
+ "learning_rate": 0.00012433455863603967,
+ "loss": 0.9474,
+ "step": 1264
+ },
+ {
+ "epoch": 1.2653558813416272,
+ "grad_norm": 0.5468732714653015,
+ "learning_rate": 0.00012423287176882358,
+ "loss": 0.9424,
+ "step": 1265
+ },
+ {
+ "epoch": 1.2663561626707511,
+ "grad_norm": 0.4921899437904358,
+ "learning_rate": 0.00012413115827406392,
+ "loss": 0.8568,
+ "step": 1266
+ },
+ {
+ "epoch": 1.267356443999875,
+ "grad_norm": 0.48769402503967285,
+ "learning_rate": 0.00012402941826352546,
+ "loss": 0.7579,
+ "step": 1267
+ },
+ {
+ "epoch": 1.268356725328999,
+ "grad_norm": 0.5462141633033752,
+ "learning_rate": 0.00012392765184900202,
+ "loss": 0.9946,
+ "step": 1268
+ },
+ {
+ "epoch": 1.2693570066581226,
+ "grad_norm": 0.5021050572395325,
+ "learning_rate": 0.0001238258591423165,
+ "loss": 0.8603,
+ "step": 1269
+ },
+ {
+ "epoch": 1.2703572879872465,
+ "grad_norm": 0.5272159576416016,
+ "learning_rate": 0.00012372404025532072,
+ "loss": 0.94,
+ "step": 1270
+ },
+ {
+ "epoch": 1.2713575693163701,
+ "grad_norm": 0.5332500338554382,
+ "learning_rate": 0.00012362219529989514,
+ "loss": 1.1609,
+ "step": 1271
+ },
+ {
+ "epoch": 1.272357850645494,
+ "grad_norm": 0.5058136582374573,
+ "learning_rate": 0.00012352032438794902,
+ "loss": 1.0013,
+ "step": 1272
+ },
+ {
+ "epoch": 1.273358131974618,
+ "grad_norm": 0.5055596828460693,
+ "learning_rate": 0.00012341842763142005,
+ "loss": 1.0121,
+ "step": 1273
+ },
+ {
+ "epoch": 1.2743584133037418,
+ "grad_norm": 0.5699402689933777,
+ "learning_rate": 0.00012331650514227425,
+ "loss": 1.1188,
+ "step": 1274
+ },
+ {
+ "epoch": 1.2753586946328654,
+ "grad_norm": 0.511233925819397,
+ "learning_rate": 0.00012321455703250616,
+ "loss": 1.0291,
+ "step": 1275
+ },
+ {
+ "epoch": 1.2763589759619893,
+ "grad_norm": 0.5304299592971802,
+ "learning_rate": 0.00012311258341413822,
+ "loss": 0.9619,
+ "step": 1276
+ },
+ {
+ "epoch": 1.277359257291113,
+ "grad_norm": 0.5318915247917175,
+ "learning_rate": 0.00012301058439922102,
+ "loss": 0.9669,
+ "step": 1277
+ },
+ {
+ "epoch": 1.2783595386202369,
+ "grad_norm": 0.510267436504364,
+ "learning_rate": 0.000122908560099833,
+ "loss": 1.0956,
+ "step": 1278
+ },
+ {
+ "epoch": 1.2793598199493608,
+ "grad_norm": 0.530360758304596,
+ "learning_rate": 0.00012280651062808047,
+ "loss": 1.02,
+ "step": 1279
+ },
+ {
+ "epoch": 1.2803601012784847,
+ "grad_norm": 0.5094459056854248,
+ "learning_rate": 0.00012270443609609729,
+ "loss": 0.9614,
+ "step": 1280
+ },
+ {
+ "epoch": 1.2813603826076083,
+ "grad_norm": 0.4430864453315735,
+ "learning_rate": 0.0001226023366160449,
+ "loss": 0.8188,
+ "step": 1281
+ },
+ {
+ "epoch": 1.2823606639367322,
+ "grad_norm": 0.4705411493778229,
+ "learning_rate": 0.00012250021230011225,
+ "loss": 0.8952,
+ "step": 1282
+ },
+ {
+ "epoch": 1.283360945265856,
+ "grad_norm": 0.5231715440750122,
+ "learning_rate": 0.00012239806326051539,
+ "loss": 0.941,
+ "step": 1283
+ },
+ {
+ "epoch": 1.2843612265949798,
+ "grad_norm": 0.5658493041992188,
+ "learning_rate": 0.00012229588960949771,
+ "loss": 1.0047,
+ "step": 1284
+ },
+ {
+ "epoch": 1.2853615079241036,
+ "grad_norm": 0.6016567349433899,
+ "learning_rate": 0.00012219369145932959,
+ "loss": 1.1764,
+ "step": 1285
+ },
+ {
+ "epoch": 1.2863617892532275,
+ "grad_norm": 0.6365408301353455,
+ "learning_rate": 0.00012209146892230822,
+ "loss": 0.9777,
+ "step": 1286
+ },
+ {
+ "epoch": 1.2873620705823514,
+ "grad_norm": 0.46536219120025635,
+ "learning_rate": 0.00012198922211075778,
+ "loss": 0.9826,
+ "step": 1287
+ },
+ {
+ "epoch": 1.288362351911475,
+ "grad_norm": 0.5130245089530945,
+ "learning_rate": 0.00012188695113702896,
+ "loss": 1.0255,
+ "step": 1288
+ },
+ {
+ "epoch": 1.289362633240599,
+ "grad_norm": 0.5321043133735657,
+ "learning_rate": 0.00012178465611349911,
+ "loss": 0.9973,
+ "step": 1289
+ },
+ {
+ "epoch": 1.2903629145697226,
+ "grad_norm": 0.48580724000930786,
+ "learning_rate": 0.00012168233715257194,
+ "loss": 0.8768,
+ "step": 1290
+ },
+ {
+ "epoch": 1.2913631958988465,
+ "grad_norm": 0.5140405297279358,
+ "learning_rate": 0.00012157999436667747,
+ "loss": 0.8985,
+ "step": 1291
+ },
+ {
+ "epoch": 1.2923634772279704,
+ "grad_norm": 0.4582030773162842,
+ "learning_rate": 0.00012147762786827193,
+ "loss": 0.9693,
+ "step": 1292
+ },
+ {
+ "epoch": 1.2933637585570943,
+ "grad_norm": 0.47397539019584656,
+ "learning_rate": 0.00012137523776983757,
+ "loss": 0.8348,
+ "step": 1293
+ },
+ {
+ "epoch": 1.294364039886218,
+ "grad_norm": 0.43932002782821655,
+ "learning_rate": 0.00012127282418388264,
+ "loss": 0.851,
+ "step": 1294
+ },
+ {
+ "epoch": 1.2953643212153418,
+ "grad_norm": 0.5559205412864685,
+ "learning_rate": 0.0001211703872229411,
+ "loss": 0.86,
+ "step": 1295
+ },
+ {
+ "epoch": 1.2963646025444655,
+ "grad_norm": 0.5433980226516724,
+ "learning_rate": 0.00012106792699957263,
+ "loss": 1.1181,
+ "step": 1296
+ },
+ {
+ "epoch": 1.2973648838735894,
+ "grad_norm": 0.5069502592086792,
+ "learning_rate": 0.00012096544362636255,
+ "loss": 0.9613,
+ "step": 1297
+ },
+ {
+ "epoch": 1.2983651652027133,
+ "grad_norm": 0.5588079690933228,
+ "learning_rate": 0.00012086293721592152,
+ "loss": 1.0741,
+ "step": 1298
+ },
+ {
+ "epoch": 1.2993654465318372,
+ "grad_norm": 0.6035181879997253,
+ "learning_rate": 0.00012076040788088554,
+ "loss": 1.0187,
+ "step": 1299
+ },
+ {
+ "epoch": 1.3003657278609608,
+ "grad_norm": 0.4385228455066681,
+ "learning_rate": 0.00012065785573391581,
+ "loss": 0.9293,
+ "step": 1300
+ },
+ {
+ "epoch": 1.3013660091900847,
+ "grad_norm": 0.5284578800201416,
+ "learning_rate": 0.00012055528088769861,
+ "loss": 0.9479,
+ "step": 1301
+ },
+ {
+ "epoch": 1.3023662905192086,
+ "grad_norm": 0.46655789017677307,
+ "learning_rate": 0.00012045268345494511,
+ "loss": 0.8702,
+ "step": 1302
+ },
+ {
+ "epoch": 1.3033665718483323,
+ "grad_norm": 0.5073155164718628,
+ "learning_rate": 0.00012035006354839133,
+ "loss": 0.8667,
+ "step": 1303
+ },
+ {
+ "epoch": 1.3043668531774562,
+ "grad_norm": 0.5954610109329224,
+ "learning_rate": 0.00012024742128079805,
+ "loss": 1.0998,
+ "step": 1304
+ },
+ {
+ "epoch": 1.30536713450658,
+ "grad_norm": 0.46617114543914795,
+ "learning_rate": 0.00012014475676495052,
+ "loss": 0.8853,
+ "step": 1305
+ },
+ {
+ "epoch": 1.306367415835704,
+ "grad_norm": 0.5705167055130005,
+ "learning_rate": 0.00012004207011365849,
+ "loss": 0.9094,
+ "step": 1306
+ },
+ {
+ "epoch": 1.3073676971648276,
+ "grad_norm": 0.4711546301841736,
+ "learning_rate": 0.00011993936143975599,
+ "loss": 0.9597,
+ "step": 1307
+ },
+ {
+ "epoch": 1.3083679784939515,
+ "grad_norm": 0.5322745442390442,
+ "learning_rate": 0.00011983663085610131,
+ "loss": 0.9221,
+ "step": 1308
+ },
+ {
+ "epoch": 1.3093682598230751,
+ "grad_norm": 0.4769452214241028,
+ "learning_rate": 0.00011973387847557676,
+ "loss": 0.7874,
+ "step": 1309
+ },
+ {
+ "epoch": 1.310368541152199,
+ "grad_norm": 0.5224636793136597,
+ "learning_rate": 0.00011963110441108863,
+ "loss": 0.8233,
+ "step": 1310
+ },
+ {
+ "epoch": 1.311368822481323,
+ "grad_norm": 0.5125696063041687,
+ "learning_rate": 0.000119528308775567,
+ "loss": 0.9894,
+ "step": 1311
+ },
+ {
+ "epoch": 1.3123691038104468,
+ "grad_norm": 0.5573001503944397,
+ "learning_rate": 0.00011942549168196575,
+ "loss": 0.9043,
+ "step": 1312
+ },
+ {
+ "epoch": 1.3133693851395705,
+ "grad_norm": 0.5493408441543579,
+ "learning_rate": 0.00011932265324326221,
+ "loss": 0.964,
+ "step": 1313
+ },
+ {
+ "epoch": 1.3143696664686944,
+ "grad_norm": 0.5327842235565186,
+ "learning_rate": 0.0001192197935724573,
+ "loss": 0.9196,
+ "step": 1314
+ },
+ {
+ "epoch": 1.315369947797818,
+ "grad_norm": 0.5743328332901001,
+ "learning_rate": 0.00011911691278257511,
+ "loss": 1.0504,
+ "step": 1315
+ },
+ {
+ "epoch": 1.316370229126942,
+ "grad_norm": 0.446932315826416,
+ "learning_rate": 0.0001190140109866631,
+ "loss": 0.8425,
+ "step": 1316
+ },
+ {
+ "epoch": 1.3173705104560658,
+ "grad_norm": 0.47306087613105774,
+ "learning_rate": 0.00011891108829779165,
+ "loss": 0.8726,
+ "step": 1317
+ },
+ {
+ "epoch": 1.3183707917851897,
+ "grad_norm": 0.566939115524292,
+ "learning_rate": 0.00011880814482905422,
+ "loss": 0.8747,
+ "step": 1318
+ },
+ {
+ "epoch": 1.3193710731143133,
+ "grad_norm": 0.5145870447158813,
+ "learning_rate": 0.00011870518069356709,
+ "loss": 0.9383,
+ "step": 1319
+ },
+ {
+ "epoch": 1.3203713544434372,
+ "grad_norm": 0.5228437185287476,
+ "learning_rate": 0.0001186021960044692,
+ "loss": 1.103,
+ "step": 1320
+ },
+ {
+ "epoch": 1.3213716357725611,
+ "grad_norm": 0.4844512939453125,
+ "learning_rate": 0.00011849919087492211,
+ "loss": 0.98,
+ "step": 1321
+ },
+ {
+ "epoch": 1.3223719171016848,
+ "grad_norm": 0.5099167227745056,
+ "learning_rate": 0.00011839616541810983,
+ "loss": 0.9023,
+ "step": 1322
+ },
+ {
+ "epoch": 1.3233721984308087,
+ "grad_norm": 0.4702555537223816,
+ "learning_rate": 0.00011829311974723867,
+ "loss": 0.8553,
+ "step": 1323
+ },
+ {
+ "epoch": 1.3243724797599326,
+ "grad_norm": 0.5219053030014038,
+ "learning_rate": 0.00011819005397553723,
+ "loss": 0.9446,
+ "step": 1324
+ },
+ {
+ "epoch": 1.3253727610890562,
+ "grad_norm": 0.48462843894958496,
+ "learning_rate": 0.00011808696821625613,
+ "loss": 0.9591,
+ "step": 1325
+ },
+ {
+ "epoch": 1.32637304241818,
+ "grad_norm": 0.5187227725982666,
+ "learning_rate": 0.000117983862582668,
+ "loss": 0.9413,
+ "step": 1326
+ },
+ {
+ "epoch": 1.327373323747304,
+ "grad_norm": 0.47444605827331543,
+ "learning_rate": 0.00011788073718806725,
+ "loss": 0.8979,
+ "step": 1327
+ },
+ {
+ "epoch": 1.3283736050764277,
+ "grad_norm": 0.5251137018203735,
+ "learning_rate": 0.00011777759214577006,
+ "loss": 1.0449,
+ "step": 1328
+ },
+ {
+ "epoch": 1.3293738864055515,
+ "grad_norm": 0.5007866024971008,
+ "learning_rate": 0.00011767442756911417,
+ "loss": 0.9907,
+ "step": 1329
+ },
+ {
+ "epoch": 1.3303741677346754,
+ "grad_norm": 0.8486194610595703,
+ "learning_rate": 0.00011757124357145881,
+ "loss": 1.0459,
+ "step": 1330
+ },
+ {
+ "epoch": 1.3313744490637993,
+ "grad_norm": 0.5153964161872864,
+ "learning_rate": 0.00011746804026618452,
+ "loss": 0.9911,
+ "step": 1331
+ },
+ {
+ "epoch": 1.332374730392923,
+ "grad_norm": 0.523077666759491,
+ "learning_rate": 0.00011736481776669306,
+ "loss": 1.0571,
+ "step": 1332
+ },
+ {
+ "epoch": 1.3333750117220469,
+ "grad_norm": 0.5242265462875366,
+ "learning_rate": 0.00011726157618640728,
+ "loss": 0.9057,
+ "step": 1333
+ },
+ {
+ "epoch": 1.3343752930511705,
+ "grad_norm": 0.524046778678894,
+ "learning_rate": 0.00011715831563877104,
+ "loss": 1.0413,
+ "step": 1334
+ },
+ {
+ "epoch": 1.3353755743802944,
+ "grad_norm": 0.5873232483863831,
+ "learning_rate": 0.00011705503623724898,
+ "loss": 1.1105,
+ "step": 1335
+ },
+ {
+ "epoch": 1.3363758557094183,
+ "grad_norm": 0.5559434294700623,
+ "learning_rate": 0.00011695173809532652,
+ "loss": 0.9045,
+ "step": 1336
+ },
+ {
+ "epoch": 1.3373761370385422,
+ "grad_norm": 0.5970155000686646,
+ "learning_rate": 0.00011684842132650957,
+ "loss": 1.1663,
+ "step": 1337
+ },
+ {
+ "epoch": 1.3383764183676659,
+ "grad_norm": 0.5005142092704773,
+ "learning_rate": 0.00011674508604432464,
+ "loss": 1.0695,
+ "step": 1338
+ },
+ {
+ "epoch": 1.3393766996967897,
+ "grad_norm": 0.49226582050323486,
+ "learning_rate": 0.00011664173236231848,
+ "loss": 1.0875,
+ "step": 1339
+ },
+ {
+ "epoch": 1.3403769810259134,
+ "grad_norm": 0.4792287349700928,
+ "learning_rate": 0.0001165383603940581,
+ "loss": 0.9102,
+ "step": 1340
+ },
+ {
+ "epoch": 1.3413772623550373,
+ "grad_norm": 0.4332147538661957,
+ "learning_rate": 0.00011643497025313061,
+ "loss": 0.8948,
+ "step": 1341
+ },
+ {
+ "epoch": 1.3423775436841612,
+ "grad_norm": 0.45502984523773193,
+ "learning_rate": 0.00011633156205314309,
+ "loss": 0.8538,
+ "step": 1342
+ },
+ {
+ "epoch": 1.343377825013285,
+ "grad_norm": 0.5594006776809692,
+ "learning_rate": 0.00011622813590772244,
+ "loss": 1.0178,
+ "step": 1343
+ },
+ {
+ "epoch": 1.3443781063424087,
+ "grad_norm": 0.4428876042366028,
+ "learning_rate": 0.00011612469193051525,
+ "loss": 0.856,
+ "step": 1344
+ },
+ {
+ "epoch": 1.3453783876715326,
+ "grad_norm": 0.4615425169467926,
+ "learning_rate": 0.00011602123023518779,
+ "loss": 0.8568,
+ "step": 1345
+ },
+ {
+ "epoch": 1.3463786690006565,
+ "grad_norm": 0.543389618396759,
+ "learning_rate": 0.00011591775093542572,
+ "loss": 0.8293,
+ "step": 1346
+ },
+ {
+ "epoch": 1.3473789503297802,
+ "grad_norm": 0.4740433394908905,
+ "learning_rate": 0.0001158142541449341,
+ "loss": 0.9163,
+ "step": 1347
+ },
+ {
+ "epoch": 1.348379231658904,
+ "grad_norm": 0.47938287258148193,
+ "learning_rate": 0.00011571073997743716,
+ "loss": 0.9745,
+ "step": 1348
+ },
+ {
+ "epoch": 1.349379512988028,
+ "grad_norm": 0.47510263323783875,
+ "learning_rate": 0.0001156072085466783,
+ "loss": 0.9536,
+ "step": 1349
+ },
+ {
+ "epoch": 1.3503797943171518,
+ "grad_norm": 0.5921860933303833,
+ "learning_rate": 0.00011550365996641979,
+ "loss": 0.8397,
+ "step": 1350
+ },
+ {
+ "epoch": 1.3513800756462755,
+ "grad_norm": 0.5436375737190247,
+ "learning_rate": 0.00011540009435044281,
+ "loss": 0.9381,
+ "step": 1351
+ },
+ {
+ "epoch": 1.3523803569753994,
+ "grad_norm": 0.4591434597969055,
+ "learning_rate": 0.00011529651181254723,
+ "loss": 1.0771,
+ "step": 1352
+ },
+ {
+ "epoch": 1.353380638304523,
+ "grad_norm": 0.533069372177124,
+ "learning_rate": 0.0001151929124665516,
+ "loss": 0.9103,
+ "step": 1353
+ },
+ {
+ "epoch": 1.354380919633647,
+ "grad_norm": 0.538324773311615,
+ "learning_rate": 0.00011508929642629274,
+ "loss": 1.0469,
+ "step": 1354
+ },
+ {
+ "epoch": 1.3553812009627708,
+ "grad_norm": 0.46198832988739014,
+ "learning_rate": 0.00011498566380562601,
+ "loss": 0.8242,
+ "step": 1355
+ },
+ {
+ "epoch": 1.3563814822918947,
+ "grad_norm": 0.573716402053833,
+ "learning_rate": 0.0001148820147184249,
+ "loss": 0.9437,
+ "step": 1356
+ },
+ {
+ "epoch": 1.3573817636210184,
+ "grad_norm": 0.5638802647590637,
+ "learning_rate": 0.00011477834927858104,
+ "loss": 0.9336,
+ "step": 1357
+ },
+ {
+ "epoch": 1.3583820449501423,
+ "grad_norm": 0.48780402541160583,
+ "learning_rate": 0.00011467466760000399,
+ "loss": 0.8859,
+ "step": 1358
+ },
+ {
+ "epoch": 1.359382326279266,
+ "grad_norm": 0.5441538095474243,
+ "learning_rate": 0.00011457096979662114,
+ "loss": 0.8804,
+ "step": 1359
+ },
+ {
+ "epoch": 1.3603826076083898,
+ "grad_norm": 0.5250831842422485,
+ "learning_rate": 0.00011446725598237767,
+ "loss": 0.9739,
+ "step": 1360
+ },
+ {
+ "epoch": 1.3613828889375137,
+ "grad_norm": 0.49177756905555725,
+ "learning_rate": 0.00011436352627123623,
+ "loss": 0.9586,
+ "step": 1361
+ },
+ {
+ "epoch": 1.3623831702666376,
+ "grad_norm": 0.5866628885269165,
+ "learning_rate": 0.00011425978077717709,
+ "loss": 1.0511,
+ "step": 1362
+ },
+ {
+ "epoch": 1.3633834515957612,
+ "grad_norm": 0.49350351095199585,
+ "learning_rate": 0.00011415601961419775,
+ "loss": 0.9637,
+ "step": 1363
+ },
+ {
+ "epoch": 1.3643837329248851,
+ "grad_norm": 0.5402287244796753,
+ "learning_rate": 0.00011405224289631295,
+ "loss": 1.0008,
+ "step": 1364
+ },
+ {
+ "epoch": 1.365384014254009,
+ "grad_norm": 0.5524907112121582,
+ "learning_rate": 0.00011394845073755455,
+ "loss": 1.0398,
+ "step": 1365
+ },
+ {
+ "epoch": 1.3663842955831327,
+ "grad_norm": 0.49948206543922424,
+ "learning_rate": 0.0001138446432519714,
+ "loss": 0.8577,
+ "step": 1366
+ },
+ {
+ "epoch": 1.3673845769122566,
+ "grad_norm": 0.500592052936554,
+ "learning_rate": 0.00011374082055362909,
+ "loss": 1.0053,
+ "step": 1367
+ },
+ {
+ "epoch": 1.3683848582413805,
+ "grad_norm": 0.4469926357269287,
+ "learning_rate": 0.00011363698275661001,
+ "loss": 0.8081,
+ "step": 1368
+ },
+ {
+ "epoch": 1.3693851395705043,
+ "grad_norm": 0.4939117431640625,
+ "learning_rate": 0.00011353312997501313,
+ "loss": 0.9559,
+ "step": 1369
+ },
+ {
+ "epoch": 1.370385420899628,
+ "grad_norm": 0.5091076493263245,
+ "learning_rate": 0.00011342926232295386,
+ "loss": 0.8962,
+ "step": 1370
+ },
+ {
+ "epoch": 1.371385702228752,
+ "grad_norm": 0.48055970668792725,
+ "learning_rate": 0.00011332537991456398,
+ "loss": 0.8686,
+ "step": 1371
+ },
+ {
+ "epoch": 1.3723859835578756,
+ "grad_norm": 0.4724258482456207,
+ "learning_rate": 0.00011322148286399147,
+ "loss": 0.8872,
+ "step": 1372
+ },
+ {
+ "epoch": 1.3733862648869994,
+ "grad_norm": 0.4945514500141144,
+ "learning_rate": 0.0001131175712854004,
+ "loss": 0.8766,
+ "step": 1373
+ },
+ {
+ "epoch": 1.3743865462161233,
+ "grad_norm": 0.4784204065799713,
+ "learning_rate": 0.00011301364529297079,
+ "loss": 0.8216,
+ "step": 1374
+ },
+ {
+ "epoch": 1.3753868275452472,
+ "grad_norm": 0.4669654667377472,
+ "learning_rate": 0.0001129097050008985,
+ "loss": 0.98,
+ "step": 1375
+ },
+ {
+ "epoch": 1.3763871088743709,
+ "grad_norm": 0.5275737047195435,
+ "learning_rate": 0.00011280575052339514,
+ "loss": 0.9391,
+ "step": 1376
+ },
+ {
+ "epoch": 1.3773873902034948,
+ "grad_norm": 0.47577112913131714,
+ "learning_rate": 0.00011270178197468789,
+ "loss": 0.8956,
+ "step": 1377
+ },
+ {
+ "epoch": 1.3783876715326184,
+ "grad_norm": 0.49086448550224304,
+ "learning_rate": 0.00011259779946901934,
+ "loss": 1.0058,
+ "step": 1378
+ },
+ {
+ "epoch": 1.3793879528617423,
+ "grad_norm": 0.5351247191429138,
+ "learning_rate": 0.0001124938031206475,
+ "loss": 1.0215,
+ "step": 1379
+ },
+ {
+ "epoch": 1.3803882341908662,
+ "grad_norm": 0.5512630343437195,
+ "learning_rate": 0.00011238979304384554,
+ "loss": 1.0254,
+ "step": 1380
+ },
+ {
+ "epoch": 1.38138851551999,
+ "grad_norm": 0.5598354339599609,
+ "learning_rate": 0.0001122857693529017,
+ "loss": 0.8707,
+ "step": 1381
+ },
+ {
+ "epoch": 1.3823887968491138,
+ "grad_norm": 0.5506719946861267,
+ "learning_rate": 0.0001121817321621192,
+ "loss": 0.9061,
+ "step": 1382
+ },
+ {
+ "epoch": 1.3833890781782376,
+ "grad_norm": 0.5244742035865784,
+ "learning_rate": 0.00011207768158581613,
+ "loss": 1.0017,
+ "step": 1383
+ },
+ {
+ "epoch": 1.3843893595073615,
+ "grad_norm": 0.480194091796875,
+ "learning_rate": 0.00011197361773832525,
+ "loss": 0.8132,
+ "step": 1384
+ },
+ {
+ "epoch": 1.3853896408364852,
+ "grad_norm": 0.5409587025642395,
+ "learning_rate": 0.00011186954073399387,
+ "loss": 1.0724,
+ "step": 1385
+ },
+ {
+ "epoch": 1.386389922165609,
+ "grad_norm": 0.5776751041412354,
+ "learning_rate": 0.00011176545068718385,
+ "loss": 0.9577,
+ "step": 1386
+ },
+ {
+ "epoch": 1.387390203494733,
+ "grad_norm": 0.4478171765804291,
+ "learning_rate": 0.0001116613477122713,
+ "loss": 0.7698,
+ "step": 1387
+ },
+ {
+ "epoch": 1.3883904848238566,
+ "grad_norm": 0.5580281615257263,
+ "learning_rate": 0.00011155723192364658,
+ "loss": 1.0065,
+ "step": 1388
+ },
+ {
+ "epoch": 1.3893907661529805,
+ "grad_norm": 0.5318020582199097,
+ "learning_rate": 0.00011145310343571411,
+ "loss": 0.9155,
+ "step": 1389
+ },
+ {
+ "epoch": 1.3903910474821044,
+ "grad_norm": 0.45960649847984314,
+ "learning_rate": 0.00011134896236289224,
+ "loss": 0.848,
+ "step": 1390
+ },
+ {
+ "epoch": 1.391391328811228,
+ "grad_norm": 0.49986693263053894,
+ "learning_rate": 0.0001112448088196132,
+ "loss": 1.0222,
+ "step": 1391
+ },
+ {
+ "epoch": 1.392391610140352,
+ "grad_norm": 0.6470636129379272,
+ "learning_rate": 0.00011114064292032282,
+ "loss": 0.8976,
+ "step": 1392
+ },
+ {
+ "epoch": 1.3933918914694758,
+ "grad_norm": 0.49885210394859314,
+ "learning_rate": 0.0001110364647794807,
+ "loss": 0.8872,
+ "step": 1393
+ },
+ {
+ "epoch": 1.3943921727985997,
+ "grad_norm": 0.48183003067970276,
+ "learning_rate": 0.00011093227451155974,
+ "loss": 0.7506,
+ "step": 1394
+ },
+ {
+ "epoch": 1.3953924541277234,
+ "grad_norm": 0.47776031494140625,
+ "learning_rate": 0.0001108280722310462,
+ "loss": 0.9945,
+ "step": 1395
+ },
+ {
+ "epoch": 1.3963927354568473,
+ "grad_norm": 0.5032552480697632,
+ "learning_rate": 0.0001107238580524395,
+ "loss": 0.9844,
+ "step": 1396
+ },
+ {
+ "epoch": 1.397393016785971,
+ "grad_norm": 0.5641827583312988,
+ "learning_rate": 0.00011061963209025223,
+ "loss": 0.9862,
+ "step": 1397
+ },
+ {
+ "epoch": 1.3983932981150948,
+ "grad_norm": 0.45950955152511597,
+ "learning_rate": 0.00011051539445900983,
+ "loss": 0.9878,
+ "step": 1398
+ },
+ {
+ "epoch": 1.3993935794442187,
+ "grad_norm": 0.48625022172927856,
+ "learning_rate": 0.00011041114527325065,
+ "loss": 0.9446,
+ "step": 1399
+ },
+ {
+ "epoch": 1.4003938607733426,
+ "grad_norm": 0.5851911902427673,
+ "learning_rate": 0.00011030688464752566,
+ "loss": 1.1538,
+ "step": 1400
+ },
+ {
+ "epoch": 1.4013941421024663,
+ "grad_norm": 0.45012837648391724,
+ "learning_rate": 0.00011020261269639842,
+ "loss": 0.8871,
+ "step": 1401
+ },
+ {
+ "epoch": 1.4023944234315902,
+ "grad_norm": 0.4794975221157074,
+ "learning_rate": 0.000110098329534445,
+ "loss": 0.912,
+ "step": 1402
+ },
+ {
+ "epoch": 1.4033947047607138,
+ "grad_norm": 0.5397909879684448,
+ "learning_rate": 0.00010999403527625367,
+ "loss": 1.015,
+ "step": 1403
+ },
+ {
+ "epoch": 1.4043949860898377,
+ "grad_norm": 0.5413039922714233,
+ "learning_rate": 0.00010988973003642499,
+ "loss": 1.0111,
+ "step": 1404
+ },
+ {
+ "epoch": 1.4053952674189616,
+ "grad_norm": 0.48752084374427795,
+ "learning_rate": 0.00010978541392957156,
+ "loss": 0.8649,
+ "step": 1405
+ },
+ {
+ "epoch": 1.4063955487480855,
+ "grad_norm": 0.5576539635658264,
+ "learning_rate": 0.00010968108707031792,
+ "loss": 0.8334,
+ "step": 1406
+ },
+ {
+ "epoch": 1.4073958300772091,
+ "grad_norm": 0.5292769074440002,
+ "learning_rate": 0.00010957674957330042,
+ "loss": 1.0312,
+ "step": 1407
+ },
+ {
+ "epoch": 1.408396111406333,
+ "grad_norm": 0.5971432328224182,
+ "learning_rate": 0.00010947240155316707,
+ "loss": 0.9367,
+ "step": 1408
+ },
+ {
+ "epoch": 1.409396392735457,
+ "grad_norm": 0.5620018839836121,
+ "learning_rate": 0.00010936804312457749,
+ "loss": 0.9493,
+ "step": 1409
+ },
+ {
+ "epoch": 1.4103966740645806,
+ "grad_norm": 0.456496000289917,
+ "learning_rate": 0.00010926367440220276,
+ "loss": 0.8532,
+ "step": 1410
+ },
+ {
+ "epoch": 1.4113969553937045,
+ "grad_norm": 0.47393882274627686,
+ "learning_rate": 0.00010915929550072517,
+ "loss": 0.8073,
+ "step": 1411
+ },
+ {
+ "epoch": 1.4123972367228284,
+ "grad_norm": 0.5321446061134338,
+ "learning_rate": 0.00010905490653483827,
+ "loss": 1.1076,
+ "step": 1412
+ },
+ {
+ "epoch": 1.4133975180519522,
+ "grad_norm": 0.4768468141555786,
+ "learning_rate": 0.00010895050761924668,
+ "loss": 0.9466,
+ "step": 1413
+ },
+ {
+ "epoch": 1.414397799381076,
+ "grad_norm": 0.5629300475120544,
+ "learning_rate": 0.00010884609886866588,
+ "loss": 1.0541,
+ "step": 1414
+ },
+ {
+ "epoch": 1.4153980807101998,
+ "grad_norm": 0.45907631516456604,
+ "learning_rate": 0.00010874168039782227,
+ "loss": 0.9156,
+ "step": 1415
+ },
+ {
+ "epoch": 1.4163983620393235,
+ "grad_norm": 0.5152727961540222,
+ "learning_rate": 0.00010863725232145286,
+ "loss": 1.0495,
+ "step": 1416
+ },
+ {
+ "epoch": 1.4173986433684473,
+ "grad_norm": 0.511647641658783,
+ "learning_rate": 0.00010853281475430517,
+ "loss": 0.7327,
+ "step": 1417
+ },
+ {
+ "epoch": 1.4183989246975712,
+ "grad_norm": 0.6430179476737976,
+ "learning_rate": 0.0001084283678111372,
+ "loss": 0.9831,
+ "step": 1418
+ },
+ {
+ "epoch": 1.4193992060266951,
+ "grad_norm": 0.5592547059059143,
+ "learning_rate": 0.00010832391160671729,
+ "loss": 0.9462,
+ "step": 1419
+ },
+ {
+ "epoch": 1.4203994873558188,
+ "grad_norm": 0.5079266428947449,
+ "learning_rate": 0.00010821944625582392,
+ "loss": 1.0473,
+ "step": 1420
+ },
+ {
+ "epoch": 1.4213997686849427,
+ "grad_norm": 0.5006073713302612,
+ "learning_rate": 0.00010811497187324555,
+ "loss": 0.8077,
+ "step": 1421
+ },
+ {
+ "epoch": 1.4224000500140663,
+ "grad_norm": 0.47260841727256775,
+ "learning_rate": 0.00010801048857378071,
+ "loss": 0.8069,
+ "step": 1422
+ },
+ {
+ "epoch": 1.4234003313431902,
+ "grad_norm": 0.5051037669181824,
+ "learning_rate": 0.00010790599647223763,
+ "loss": 1.0241,
+ "step": 1423
+ },
+ {
+ "epoch": 1.424400612672314,
+ "grad_norm": 0.5116690397262573,
+ "learning_rate": 0.0001078014956834342,
+ "loss": 1.0377,
+ "step": 1424
+ },
+ {
+ "epoch": 1.425400894001438,
+ "grad_norm": 0.48974907398223877,
+ "learning_rate": 0.00010769698632219794,
+ "loss": 1.0578,
+ "step": 1425
+ },
+ {
+ "epoch": 1.4264011753305617,
+ "grad_norm": 0.5071999430656433,
+ "learning_rate": 0.00010759246850336572,
+ "loss": 0.9072,
+ "step": 1426
+ },
+ {
+ "epoch": 1.4274014566596855,
+ "grad_norm": 0.6418463587760925,
+ "learning_rate": 0.0001074879423417837,
+ "loss": 1.1195,
+ "step": 1427
+ },
+ {
+ "epoch": 1.4284017379888094,
+ "grad_norm": 0.4854032099246979,
+ "learning_rate": 0.00010738340795230721,
+ "loss": 1.0776,
+ "step": 1428
+ },
+ {
+ "epoch": 1.429402019317933,
+ "grad_norm": 0.5330777764320374,
+ "learning_rate": 0.00010727886544980068,
+ "loss": 1.0851,
+ "step": 1429
+ },
+ {
+ "epoch": 1.430402300647057,
+ "grad_norm": 0.5281643271446228,
+ "learning_rate": 0.00010717431494913741,
+ "loss": 0.8663,
+ "step": 1430
+ },
+ {
+ "epoch": 1.4314025819761809,
+ "grad_norm": 0.47898662090301514,
+ "learning_rate": 0.00010706975656519946,
+ "loss": 0.9926,
+ "step": 1431
+ },
+ {
+ "epoch": 1.4324028633053048,
+ "grad_norm": 0.43927934765815735,
+ "learning_rate": 0.00010696519041287765,
+ "loss": 0.8698,
+ "step": 1432
+ },
+ {
+ "epoch": 1.4334031446344284,
+ "grad_norm": 0.5207253694534302,
+ "learning_rate": 0.0001068606166070712,
+ "loss": 0.9795,
+ "step": 1433
+ },
+ {
+ "epoch": 1.4344034259635523,
+ "grad_norm": 0.5264511704444885,
+ "learning_rate": 0.00010675603526268785,
+ "loss": 0.9593,
+ "step": 1434
+ },
+ {
+ "epoch": 1.435403707292676,
+ "grad_norm": 0.5435792803764343,
+ "learning_rate": 0.00010665144649464356,
+ "loss": 0.9436,
+ "step": 1435
+ },
+ {
+ "epoch": 1.4364039886217999,
+ "grad_norm": 0.5383104681968689,
+ "learning_rate": 0.00010654685041786249,
+ "loss": 0.9569,
+ "step": 1436
+ },
+ {
+ "epoch": 1.4374042699509237,
+ "grad_norm": 0.48762592673301697,
+ "learning_rate": 0.00010644224714727681,
+ "loss": 0.9235,
+ "step": 1437
+ },
+ {
+ "epoch": 1.4384045512800476,
+ "grad_norm": 0.4815019965171814,
+ "learning_rate": 0.0001063376367978266,
+ "loss": 0.8241,
+ "step": 1438
+ },
+ {
+ "epoch": 1.4394048326091713,
+ "grad_norm": 0.4944337010383606,
+ "learning_rate": 0.00010623301948445971,
+ "loss": 0.9169,
+ "step": 1439
+ },
+ {
+ "epoch": 1.4404051139382952,
+ "grad_norm": 0.5658552646636963,
+ "learning_rate": 0.00010612839532213164,
+ "loss": 1.044,
+ "step": 1440
+ },
+ {
+ "epoch": 1.4414053952674188,
+ "grad_norm": 0.5688045620918274,
+ "learning_rate": 0.00010602376442580544,
+ "loss": 0.9684,
+ "step": 1441
+ },
+ {
+ "epoch": 1.4424056765965427,
+ "grad_norm": 0.5434709787368774,
+ "learning_rate": 0.00010591912691045152,
+ "loss": 0.8741,
+ "step": 1442
+ },
+ {
+ "epoch": 1.4434059579256666,
+ "grad_norm": 0.583562433719635,
+ "learning_rate": 0.00010581448289104758,
+ "loss": 1.1651,
+ "step": 1443
+ },
+ {
+ "epoch": 1.4444062392547905,
+ "grad_norm": 0.566363513469696,
+ "learning_rate": 0.00010570983248257853,
+ "loss": 1.0091,
+ "step": 1444
+ },
+ {
+ "epoch": 1.4454065205839142,
+ "grad_norm": 0.527039647102356,
+ "learning_rate": 0.00010560517580003617,
+ "loss": 1.0666,
+ "step": 1445
+ },
+ {
+ "epoch": 1.446406801913038,
+ "grad_norm": 0.46389803290367126,
+ "learning_rate": 0.00010550051295841931,
+ "loss": 0.9344,
+ "step": 1446
+ },
+ {
+ "epoch": 1.447407083242162,
+ "grad_norm": 0.6291074752807617,
+ "learning_rate": 0.00010539584407273349,
+ "loss": 1.0388,
+ "step": 1447
+ },
+ {
+ "epoch": 1.4484073645712856,
+ "grad_norm": 0.5249356031417847,
+ "learning_rate": 0.00010529116925799085,
+ "loss": 0.97,
+ "step": 1448
+ },
+ {
+ "epoch": 1.4494076459004095,
+ "grad_norm": 0.4662008583545685,
+ "learning_rate": 0.00010518648862921012,
+ "loss": 0.8385,
+ "step": 1449
+ },
+ {
+ "epoch": 1.4504079272295334,
+ "grad_norm": 0.5730600953102112,
+ "learning_rate": 0.00010508180230141635,
+ "loss": 0.8747,
+ "step": 1450
+ },
+ {
+ "epoch": 1.451408208558657,
+ "grad_norm": 0.48082512617111206,
+ "learning_rate": 0.00010497711038964086,
+ "loss": 0.8624,
+ "step": 1451
+ },
+ {
+ "epoch": 1.452408489887781,
+ "grad_norm": 0.48900333046913147,
+ "learning_rate": 0.0001048724130089212,
+ "loss": 0.7826,
+ "step": 1452
+ },
+ {
+ "epoch": 1.4534087712169048,
+ "grad_norm": 0.4998112618923187,
+ "learning_rate": 0.00010476771027430086,
+ "loss": 0.8687,
+ "step": 1453
+ },
+ {
+ "epoch": 1.4544090525460285,
+ "grad_norm": 0.4872112572193146,
+ "learning_rate": 0.00010466300230082911,
+ "loss": 0.9185,
+ "step": 1454
+ },
+ {
+ "epoch": 1.4554093338751524,
+ "grad_norm": 0.5405575633049011,
+ "learning_rate": 0.00010455828920356115,
+ "loss": 0.9601,
+ "step": 1455
+ },
+ {
+ "epoch": 1.4564096152042763,
+ "grad_norm": 0.4496804475784302,
+ "learning_rate": 0.00010445357109755771,
+ "loss": 0.8606,
+ "step": 1456
+ },
+ {
+ "epoch": 1.4574098965334001,
+ "grad_norm": 0.49340635538101196,
+ "learning_rate": 0.00010434884809788508,
+ "loss": 1.1009,
+ "step": 1457
+ },
+ {
+ "epoch": 1.4584101778625238,
+ "grad_norm": 0.4692990481853485,
+ "learning_rate": 0.00010424412031961484,
+ "loss": 0.8011,
+ "step": 1458
+ },
+ {
+ "epoch": 1.4594104591916477,
+ "grad_norm": 0.5027800798416138,
+ "learning_rate": 0.00010413938787782394,
+ "loss": 0.8827,
+ "step": 1459
+ },
+ {
+ "epoch": 1.4604107405207714,
+ "grad_norm": 0.6764587163925171,
+ "learning_rate": 0.00010403465088759437,
+ "loss": 0.8513,
+ "step": 1460
+ },
+ {
+ "epoch": 1.4614110218498952,
+ "grad_norm": 0.558620035648346,
+ "learning_rate": 0.00010392990946401313,
+ "loss": 0.9881,
+ "step": 1461
+ },
+ {
+ "epoch": 1.4624113031790191,
+ "grad_norm": 0.603817343711853,
+ "learning_rate": 0.00010382516372217215,
+ "loss": 0.9869,
+ "step": 1462
+ },
+ {
+ "epoch": 1.463411584508143,
+ "grad_norm": 0.4486953020095825,
+ "learning_rate": 0.000103720413777168,
+ "loss": 0.8933,
+ "step": 1463
+ },
+ {
+ "epoch": 1.4644118658372667,
+ "grad_norm": 0.5756564736366272,
+ "learning_rate": 0.00010361565974410192,
+ "loss": 0.9974,
+ "step": 1464
+ },
+ {
+ "epoch": 1.4654121471663906,
+ "grad_norm": 0.4386444389820099,
+ "learning_rate": 0.00010351090173807969,
+ "loss": 0.8577,
+ "step": 1465
+ },
+ {
+ "epoch": 1.4664124284955142,
+ "grad_norm": 0.5308933258056641,
+ "learning_rate": 0.00010340613987421137,
+ "loss": 1.0539,
+ "step": 1466
+ },
+ {
+ "epoch": 1.4674127098246381,
+ "grad_norm": 0.6070798635482788,
+ "learning_rate": 0.00010330137426761135,
+ "loss": 0.9111,
+ "step": 1467
+ },
+ {
+ "epoch": 1.468412991153762,
+ "grad_norm": 0.5870214700698853,
+ "learning_rate": 0.00010319660503339808,
+ "loss": 0.9958,
+ "step": 1468
+ },
+ {
+ "epoch": 1.469413272482886,
+ "grad_norm": 0.5014438629150391,
+ "learning_rate": 0.00010309183228669397,
+ "loss": 0.987,
+ "step": 1469
+ },
+ {
+ "epoch": 1.4704135538120096,
+ "grad_norm": 0.47051525115966797,
+ "learning_rate": 0.00010298705614262532,
+ "loss": 1.0899,
+ "step": 1470
+ },
+ {
+ "epoch": 1.4714138351411334,
+ "grad_norm": 0.5500984787940979,
+ "learning_rate": 0.0001028822767163222,
+ "loss": 0.8882,
+ "step": 1471
+ },
+ {
+ "epoch": 1.4724141164702573,
+ "grad_norm": 0.4973205626010895,
+ "learning_rate": 0.00010277749412291824,
+ "loss": 0.9374,
+ "step": 1472
+ },
+ {
+ "epoch": 1.473414397799381,
+ "grad_norm": 0.4927331209182739,
+ "learning_rate": 0.00010267270847755048,
+ "loss": 0.9608,
+ "step": 1473
+ },
+ {
+ "epoch": 1.4744146791285049,
+ "grad_norm": 0.5539640188217163,
+ "learning_rate": 0.00010256791989535952,
+ "loss": 0.9339,
+ "step": 1474
+ },
+ {
+ "epoch": 1.4754149604576288,
+ "grad_norm": 0.48375800251960754,
+ "learning_rate": 0.00010246312849148899,
+ "loss": 0.8778,
+ "step": 1475
+ },
+ {
+ "epoch": 1.4764152417867527,
+ "grad_norm": 0.522544264793396,
+ "learning_rate": 0.00010235833438108571,
+ "loss": 0.9633,
+ "step": 1476
+ },
+ {
+ "epoch": 1.4774155231158763,
+ "grad_norm": 0.5747688412666321,
+ "learning_rate": 0.00010225353767929944,
+ "loss": 1.0206,
+ "step": 1477
+ },
+ {
+ "epoch": 1.4784158044450002,
+ "grad_norm": 0.4539598226547241,
+ "learning_rate": 0.00010214873850128282,
+ "loss": 0.7895,
+ "step": 1478
+ },
+ {
+ "epoch": 1.4794160857741239,
+ "grad_norm": 0.4290696978569031,
+ "learning_rate": 0.00010204393696219117,
+ "loss": 0.8718,
+ "step": 1479
+ },
+ {
+ "epoch": 1.4804163671032478,
+ "grad_norm": 0.43560928106307983,
+ "learning_rate": 0.00010193913317718244,
+ "loss": 0.8839,
+ "step": 1480
+ },
+ {
+ "epoch": 1.4814166484323716,
+ "grad_norm": 0.4937680661678314,
+ "learning_rate": 0.00010183432726141706,
+ "loss": 0.9615,
+ "step": 1481
+ },
+ {
+ "epoch": 1.4824169297614955,
+ "grad_norm": 0.5631589889526367,
+ "learning_rate": 0.00010172951933005775,
+ "loss": 1.0691,
+ "step": 1482
+ },
+ {
+ "epoch": 1.4834172110906192,
+ "grad_norm": 0.5049973726272583,
+ "learning_rate": 0.00010162470949826948,
+ "loss": 0.9107,
+ "step": 1483
+ },
+ {
+ "epoch": 1.484417492419743,
+ "grad_norm": 0.5362145304679871,
+ "learning_rate": 0.0001015198978812193,
+ "loss": 0.9762,
+ "step": 1484
+ },
+ {
+ "epoch": 1.4854177737488667,
+ "grad_norm": 0.4824192225933075,
+ "learning_rate": 0.00010141508459407623,
+ "loss": 0.8844,
+ "step": 1485
+ },
+ {
+ "epoch": 1.4864180550779906,
+ "grad_norm": 0.5116665959358215,
+ "learning_rate": 0.0001013102697520111,
+ "loss": 0.9461,
+ "step": 1486
+ },
+ {
+ "epoch": 1.4874183364071145,
+ "grad_norm": 0.5244630575180054,
+ "learning_rate": 0.00010120545347019647,
+ "loss": 1.0286,
+ "step": 1487
+ },
+ {
+ "epoch": 1.4884186177362384,
+ "grad_norm": 0.5252584218978882,
+ "learning_rate": 0.00010110063586380646,
+ "loss": 1.1083,
+ "step": 1488
+ },
+ {
+ "epoch": 1.489418899065362,
+ "grad_norm": 0.4909230172634125,
+ "learning_rate": 0.00010099581704801673,
+ "loss": 0.9338,
+ "step": 1489
+ },
+ {
+ "epoch": 1.490419180394486,
+ "grad_norm": 0.5618056654930115,
+ "learning_rate": 0.00010089099713800414,
+ "loss": 1.0513,
+ "step": 1490
+ },
+ {
+ "epoch": 1.4914194617236098,
+ "grad_norm": 0.48737892508506775,
+ "learning_rate": 0.00010078617624894684,
+ "loss": 0.8669,
+ "step": 1491
+ },
+ {
+ "epoch": 1.4924197430527335,
+ "grad_norm": 0.411451131105423,
+ "learning_rate": 0.000100681354496024,
+ "loss": 0.881,
+ "step": 1492
+ },
+ {
+ "epoch": 1.4934200243818574,
+ "grad_norm": 0.5821709632873535,
+ "learning_rate": 0.00010057653199441581,
+ "loss": 0.9359,
+ "step": 1493
+ },
+ {
+ "epoch": 1.4944203057109813,
+ "grad_norm": 0.4621860086917877,
+ "learning_rate": 0.00010047170885930324,
+ "loss": 0.8121,
+ "step": 1494
+ },
+ {
+ "epoch": 1.4954205870401052,
+ "grad_norm": 0.4658668339252472,
+ "learning_rate": 0.00010036688520586788,
+ "loss": 0.9806,
+ "step": 1495
+ },
+ {
+ "epoch": 1.4964208683692288,
+ "grad_norm": 0.49816030263900757,
+ "learning_rate": 0.00010026206114929209,
+ "loss": 0.9124,
+ "step": 1496
+ },
+ {
+ "epoch": 1.4974211496983527,
+ "grad_norm": 0.5228123068809509,
+ "learning_rate": 0.00010015723680475846,
+ "loss": 1.0132,
+ "step": 1497
+ },
+ {
+ "epoch": 1.4984214310274764,
+ "grad_norm": 0.4727514982223511,
+ "learning_rate": 0.00010005241228745004,
+ "loss": 0.8418,
+ "step": 1498
+ },
+ {
+ "epoch": 1.4994217123566003,
+ "grad_norm": 0.528904914855957,
+ "learning_rate": 9.994758771254997e-05,
+ "loss": 0.9702,
+ "step": 1499
+ },
+ {
+ "epoch": 1.5004219936857242,
+ "grad_norm": 0.5090524554252625,
+ "learning_rate": 9.984276319524154e-05,
+ "loss": 0.9927,
+ "step": 1500
+ },
+ {
+ "epoch": 1.501422275014848,
+ "grad_norm": 0.4553126096725464,
+ "learning_rate": 9.973793885070792e-05,
+ "loss": 0.9075,
+ "step": 1501
+ },
+ {
+ "epoch": 1.5024225563439717,
+ "grad_norm": 0.4887089133262634,
+ "learning_rate": 9.963311479413211e-05,
+ "loss": 0.9999,
+ "step": 1502
+ },
+ {
+ "epoch": 1.5034228376730956,
+ "grad_norm": 0.48520341515541077,
+ "learning_rate": 9.95282911406968e-05,
+ "loss": 1.0182,
+ "step": 1503
+ },
+ {
+ "epoch": 1.5044231190022193,
+ "grad_norm": 0.5554280877113342,
+ "learning_rate": 9.942346800558421e-05,
+ "loss": 0.9456,
+ "step": 1504
+ },
+ {
+ "epoch": 1.5054234003313431,
+ "grad_norm": 0.5199026465415955,
+ "learning_rate": 9.931864550397601e-05,
+ "loss": 1.0141,
+ "step": 1505
+ },
+ {
+ "epoch": 1.506423681660467,
+ "grad_norm": 0.5191763043403625,
+ "learning_rate": 9.921382375105318e-05,
+ "loss": 0.937,
+ "step": 1506
+ },
+ {
+ "epoch": 1.507423962989591,
+ "grad_norm": 0.5416325330734253,
+ "learning_rate": 9.910900286199587e-05,
+ "loss": 1.07,
+ "step": 1507
+ },
+ {
+ "epoch": 1.5084242443187148,
+ "grad_norm": 0.5193303227424622,
+ "learning_rate": 9.900418295198328e-05,
+ "loss": 0.9386,
+ "step": 1508
+ },
+ {
+ "epoch": 1.5094245256478385,
+ "grad_norm": 0.5433129072189331,
+ "learning_rate": 9.889936413619356e-05,
+ "loss": 0.8967,
+ "step": 1509
+ },
+ {
+ "epoch": 1.5104248069769621,
+ "grad_norm": 0.526980459690094,
+ "learning_rate": 9.879454652980358e-05,
+ "loss": 1.1135,
+ "step": 1510
+ },
+ {
+ "epoch": 1.511425088306086,
+ "grad_norm": 0.4468344449996948,
+ "learning_rate": 9.868973024798895e-05,
+ "loss": 0.9408,
+ "step": 1511
+ },
+ {
+ "epoch": 1.51242536963521,
+ "grad_norm": 0.5974569320678711,
+ "learning_rate": 9.858491540592382e-05,
+ "loss": 0.9747,
+ "step": 1512
+ },
+ {
+ "epoch": 1.5134256509643338,
+ "grad_norm": 0.5186171531677246,
+ "learning_rate": 9.848010211878074e-05,
+ "loss": 1.1012,
+ "step": 1513
+ },
+ {
+ "epoch": 1.5144259322934577,
+ "grad_norm": 0.5307335257530212,
+ "learning_rate": 9.837529050173052e-05,
+ "loss": 0.9548,
+ "step": 1514
+ },
+ {
+ "epoch": 1.5154262136225813,
+ "grad_norm": 0.469865083694458,
+ "learning_rate": 9.827048066994225e-05,
+ "loss": 0.8556,
+ "step": 1515
+ },
+ {
+ "epoch": 1.516426494951705,
+ "grad_norm": 0.4164840877056122,
+ "learning_rate": 9.816567273858296e-05,
+ "loss": 0.7429,
+ "step": 1516
+ },
+ {
+ "epoch": 1.517426776280829,
+ "grad_norm": 0.5811400413513184,
+ "learning_rate": 9.806086682281758e-05,
+ "loss": 1.066,
+ "step": 1517
+ },
+ {
+ "epoch": 1.5184270576099528,
+ "grad_norm": 0.4634648263454437,
+ "learning_rate": 9.795606303780885e-05,
+ "loss": 1.0048,
+ "step": 1518
+ },
+ {
+ "epoch": 1.5194273389390767,
+ "grad_norm": 0.45642492175102234,
+ "learning_rate": 9.785126149871722e-05,
+ "loss": 0.8776,
+ "step": 1519
+ },
+ {
+ "epoch": 1.5204276202682006,
+ "grad_norm": 0.5217366218566895,
+ "learning_rate": 9.77464623207006e-05,
+ "loss": 0.9806,
+ "step": 1520
+ },
+ {
+ "epoch": 1.5214279015973242,
+ "grad_norm": 0.4867999851703644,
+ "learning_rate": 9.764166561891432e-05,
+ "loss": 0.9539,
+ "step": 1521
+ },
+ {
+ "epoch": 1.522428182926448,
+ "grad_norm": 0.5579104423522949,
+ "learning_rate": 9.753687150851102e-05,
+ "loss": 1.0812,
+ "step": 1522
+ },
+ {
+ "epoch": 1.5234284642555718,
+ "grad_norm": 0.5152975916862488,
+ "learning_rate": 9.74320801046405e-05,
+ "loss": 0.8958,
+ "step": 1523
+ },
+ {
+ "epoch": 1.5244287455846957,
+ "grad_norm": 0.5229570269584656,
+ "learning_rate": 9.732729152244953e-05,
+ "loss": 1.1053,
+ "step": 1524
+ },
+ {
+ "epoch": 1.5254290269138195,
+ "grad_norm": 0.49501264095306396,
+ "learning_rate": 9.722250587708181e-05,
+ "loss": 0.8045,
+ "step": 1525
+ },
+ {
+ "epoch": 1.5264293082429434,
+ "grad_norm": 0.5376133918762207,
+ "learning_rate": 9.711772328367784e-05,
+ "loss": 1.0366,
+ "step": 1526
+ },
+ {
+ "epoch": 1.527429589572067,
+ "grad_norm": 0.5039237141609192,
+ "learning_rate": 9.70129438573747e-05,
+ "loss": 0.9531,
+ "step": 1527
+ },
+ {
+ "epoch": 1.528429870901191,
+ "grad_norm": 0.483420729637146,
+ "learning_rate": 9.690816771330608e-05,
+ "loss": 0.8635,
+ "step": 1528
+ },
+ {
+ "epoch": 1.5294301522303146,
+ "grad_norm": 0.5216282606124878,
+ "learning_rate": 9.680339496660192e-05,
+ "loss": 0.8885,
+ "step": 1529
+ },
+ {
+ "epoch": 1.5304304335594385,
+ "grad_norm": 0.4887123703956604,
+ "learning_rate": 9.669862573238863e-05,
+ "loss": 1.01,
+ "step": 1530
+ },
+ {
+ "epoch": 1.5314307148885624,
+ "grad_norm": 0.5213040113449097,
+ "learning_rate": 9.659386012578863e-05,
+ "loss": 0.8264,
+ "step": 1531
+ },
+ {
+ "epoch": 1.5324309962176863,
+ "grad_norm": 0.45882460474967957,
+ "learning_rate": 9.648909826192033e-05,
+ "loss": 0.9247,
+ "step": 1532
+ },
+ {
+ "epoch": 1.5334312775468102,
+ "grad_norm": 0.4360674023628235,
+ "learning_rate": 9.63843402558981e-05,
+ "loss": 0.9197,
+ "step": 1533
+ },
+ {
+ "epoch": 1.5344315588759339,
+ "grad_norm": 0.5070340633392334,
+ "learning_rate": 9.627958622283203e-05,
+ "loss": 0.9523,
+ "step": 1534
+ },
+ {
+ "epoch": 1.5354318402050575,
+ "grad_norm": 0.5255693197250366,
+ "learning_rate": 9.617483627782788e-05,
+ "loss": 1.1249,
+ "step": 1535
+ },
+ {
+ "epoch": 1.5364321215341814,
+ "grad_norm": 0.5451697707176208,
+ "learning_rate": 9.607009053598689e-05,
+ "loss": 1.0246,
+ "step": 1536
+ },
+ {
+ "epoch": 1.5374324028633053,
+ "grad_norm": 0.4846939742565155,
+ "learning_rate": 9.596534911240566e-05,
+ "loss": 0.8665,
+ "step": 1537
+ },
+ {
+ "epoch": 1.5384326841924292,
+ "grad_norm": 0.4528220295906067,
+ "learning_rate": 9.58606121221761e-05,
+ "loss": 0.9338,
+ "step": 1538
+ },
+ {
+ "epoch": 1.539432965521553,
+ "grad_norm": 0.4627808630466461,
+ "learning_rate": 9.57558796803852e-05,
+ "loss": 0.8086,
+ "step": 1539
+ },
+ {
+ "epoch": 1.5404332468506767,
+ "grad_norm": 0.47025686502456665,
+ "learning_rate": 9.565115190211497e-05,
+ "loss": 0.8745,
+ "step": 1540
+ },
+ {
+ "epoch": 1.5414335281798006,
+ "grad_norm": 0.5646499395370483,
+ "learning_rate": 9.554642890244233e-05,
+ "loss": 1.0445,
+ "step": 1541
+ },
+ {
+ "epoch": 1.5424338095089243,
+ "grad_norm": 0.48776212334632874,
+ "learning_rate": 9.54417107964389e-05,
+ "loss": 0.9189,
+ "step": 1542
+ },
+ {
+ "epoch": 1.5434340908380482,
+ "grad_norm": 0.4854126274585724,
+ "learning_rate": 9.533699769917092e-05,
+ "loss": 0.9359,
+ "step": 1543
+ },
+ {
+ "epoch": 1.544434372167172,
+ "grad_norm": 0.4896346926689148,
+ "learning_rate": 9.523228972569917e-05,
+ "loss": 0.8201,
+ "step": 1544
+ },
+ {
+ "epoch": 1.545434653496296,
+ "grad_norm": 0.5236535668373108,
+ "learning_rate": 9.512758699107879e-05,
+ "loss": 0.9501,
+ "step": 1545
+ },
+ {
+ "epoch": 1.5464349348254196,
+ "grad_norm": 0.607430636882782,
+ "learning_rate": 9.502288961035912e-05,
+ "loss": 0.8468,
+ "step": 1546
+ },
+ {
+ "epoch": 1.5474352161545435,
+ "grad_norm": 0.46944427490234375,
+ "learning_rate": 9.491819769858366e-05,
+ "loss": 0.8697,
+ "step": 1547
+ },
+ {
+ "epoch": 1.5484354974836672,
+ "grad_norm": 0.44860196113586426,
+ "learning_rate": 9.48135113707899e-05,
+ "loss": 0.9398,
+ "step": 1548
+ },
+ {
+ "epoch": 1.549435778812791,
+ "grad_norm": 0.45095279812812805,
+ "learning_rate": 9.470883074200916e-05,
+ "loss": 0.7818,
+ "step": 1549
+ },
+ {
+ "epoch": 1.550436060141915,
+ "grad_norm": 0.519603967666626,
+ "learning_rate": 9.460415592726653e-05,
+ "loss": 0.8663,
+ "step": 1550
+ },
+ {
+ "epoch": 1.5514363414710388,
+ "grad_norm": 0.4833553731441498,
+ "learning_rate": 9.449948704158071e-05,
+ "loss": 0.958,
+ "step": 1551
+ },
+ {
+ "epoch": 1.5524366228001627,
+ "grad_norm": 0.504408597946167,
+ "learning_rate": 9.439482419996384e-05,
+ "loss": 0.8795,
+ "step": 1552
+ },
+ {
+ "epoch": 1.5534369041292864,
+ "grad_norm": 0.45152923464775085,
+ "learning_rate": 9.42901675174215e-05,
+ "loss": 0.8427,
+ "step": 1553
+ },
+ {
+ "epoch": 1.55443718545841,
+ "grad_norm": 0.48051750659942627,
+ "learning_rate": 9.418551710895243e-05,
+ "loss": 0.8997,
+ "step": 1554
+ },
+ {
+ "epoch": 1.555437466787534,
+ "grad_norm": 0.41671374440193176,
+ "learning_rate": 9.408087308954853e-05,
+ "loss": 0.7823,
+ "step": 1555
+ },
+ {
+ "epoch": 1.5564377481166578,
+ "grad_norm": 0.4859127402305603,
+ "learning_rate": 9.397623557419461e-05,
+ "loss": 0.8865,
+ "step": 1556
+ },
+ {
+ "epoch": 1.5574380294457817,
+ "grad_norm": 0.492712140083313,
+ "learning_rate": 9.38716046778684e-05,
+ "loss": 0.8464,
+ "step": 1557
+ },
+ {
+ "epoch": 1.5584383107749056,
+ "grad_norm": 0.4976697564125061,
+ "learning_rate": 9.37669805155403e-05,
+ "loss": 0.948,
+ "step": 1558
+ },
+ {
+ "epoch": 1.5594385921040292,
+ "grad_norm": 0.5431742668151855,
+ "learning_rate": 9.366236320217339e-05,
+ "loss": 1.1718,
+ "step": 1559
+ },
+ {
+ "epoch": 1.5604388734331531,
+ "grad_norm": 0.49732932448387146,
+ "learning_rate": 9.355775285272318e-05,
+ "loss": 0.939,
+ "step": 1560
+ },
+ {
+ "epoch": 1.5614391547622768,
+ "grad_norm": 0.4857761859893799,
+ "learning_rate": 9.34531495821375e-05,
+ "loss": 0.9269,
+ "step": 1561
+ },
+ {
+ "epoch": 1.5624394360914007,
+ "grad_norm": 0.47211897373199463,
+ "learning_rate": 9.334855350535645e-05,
+ "loss": 1.0069,
+ "step": 1562
+ },
+ {
+ "epoch": 1.5634397174205246,
+ "grad_norm": 0.4433748126029968,
+ "learning_rate": 9.324396473731217e-05,
+ "loss": 0.866,
+ "step": 1563
+ },
+ {
+ "epoch": 1.5644399987496485,
+ "grad_norm": 0.5030574798583984,
+ "learning_rate": 9.313938339292883e-05,
+ "loss": 0.7763,
+ "step": 1564
+ },
+ {
+ "epoch": 1.5654402800787721,
+ "grad_norm": 0.46466779708862305,
+ "learning_rate": 9.303480958712239e-05,
+ "loss": 0.9033,
+ "step": 1565
+ },
+ {
+ "epoch": 1.566440561407896,
+ "grad_norm": 0.39663952589035034,
+ "learning_rate": 9.293024343480055e-05,
+ "loss": 0.7205,
+ "step": 1566
+ },
+ {
+ "epoch": 1.5674408427370197,
+ "grad_norm": 0.5455542206764221,
+ "learning_rate": 9.282568505086261e-05,
+ "loss": 0.8864,
+ "step": 1567
+ },
+ {
+ "epoch": 1.5684411240661436,
+ "grad_norm": 0.5139548778533936,
+ "learning_rate": 9.272113455019935e-05,
+ "loss": 0.9822,
+ "step": 1568
+ },
+ {
+ "epoch": 1.5694414053952674,
+ "grad_norm": 0.46824902296066284,
+ "learning_rate": 9.261659204769284e-05,
+ "loss": 0.8348,
+ "step": 1569
+ },
+ {
+ "epoch": 1.5704416867243913,
+ "grad_norm": 0.5223984122276306,
+ "learning_rate": 9.251205765821636e-05,
+ "loss": 0.9696,
+ "step": 1570
+ },
+ {
+ "epoch": 1.5714419680535152,
+ "grad_norm": 0.6279047727584839,
+ "learning_rate": 9.240753149663433e-05,
+ "loss": 1.009,
+ "step": 1571
+ },
+ {
+ "epoch": 1.5724422493826389,
+ "grad_norm": 0.49068430066108704,
+ "learning_rate": 9.230301367780208e-05,
+ "loss": 0.9984,
+ "step": 1572
+ },
+ {
+ "epoch": 1.5734425307117625,
+ "grad_norm": 0.4828907251358032,
+ "learning_rate": 9.219850431656579e-05,
+ "loss": 0.8535,
+ "step": 1573
+ },
+ {
+ "epoch": 1.5744428120408864,
+ "grad_norm": 0.4925834834575653,
+ "learning_rate": 9.209400352776237e-05,
+ "loss": 0.8849,
+ "step": 1574
+ },
+ {
+ "epoch": 1.5754430933700103,
+ "grad_norm": 0.5048914551734924,
+ "learning_rate": 9.198951142621929e-05,
+ "loss": 0.8767,
+ "step": 1575
+ },
+ {
+ "epoch": 1.5764433746991342,
+ "grad_norm": 0.44887635111808777,
+ "learning_rate": 9.188502812675446e-05,
+ "loss": 0.8687,
+ "step": 1576
+ },
+ {
+ "epoch": 1.577443656028258,
+ "grad_norm": 0.4909934401512146,
+ "learning_rate": 9.178055374417612e-05,
+ "loss": 0.8362,
+ "step": 1577
+ },
+ {
+ "epoch": 1.5784439373573818,
+ "grad_norm": 0.45031628012657166,
+ "learning_rate": 9.167608839328272e-05,
+ "loss": 0.902,
+ "step": 1578
+ },
+ {
+ "epoch": 1.5794442186865054,
+ "grad_norm": 0.5682864189147949,
+ "learning_rate": 9.15716321888628e-05,
+ "loss": 1.0558,
+ "step": 1579
+ },
+ {
+ "epoch": 1.5804445000156293,
+ "grad_norm": 0.4406115412712097,
+ "learning_rate": 9.146718524569487e-05,
+ "loss": 0.8283,
+ "step": 1580
+ },
+ {
+ "epoch": 1.5814447813447532,
+ "grad_norm": 0.4749000072479248,
+ "learning_rate": 9.136274767854716e-05,
+ "loss": 0.9342,
+ "step": 1581
+ },
+ {
+ "epoch": 1.582445062673877,
+ "grad_norm": 0.4785940945148468,
+ "learning_rate": 9.125831960217774e-05,
+ "loss": 0.9208,
+ "step": 1582
+ },
+ {
+ "epoch": 1.583445344003001,
+ "grad_norm": 0.572299599647522,
+ "learning_rate": 9.115390113133414e-05,
+ "loss": 0.8469,
+ "step": 1583
+ },
+ {
+ "epoch": 1.5844456253321246,
+ "grad_norm": 0.4829537570476532,
+ "learning_rate": 9.104949238075336e-05,
+ "loss": 0.9471,
+ "step": 1584
+ },
+ {
+ "epoch": 1.5854459066612485,
+ "grad_norm": 0.5315890908241272,
+ "learning_rate": 9.094509346516178e-05,
+ "loss": 0.9663,
+ "step": 1585
+ },
+ {
+ "epoch": 1.5864461879903722,
+ "grad_norm": 0.4654553532600403,
+ "learning_rate": 9.084070449927488e-05,
+ "loss": 0.7776,
+ "step": 1586
+ },
+ {
+ "epoch": 1.587446469319496,
+ "grad_norm": 0.5083040595054626,
+ "learning_rate": 9.07363255977973e-05,
+ "loss": 0.8438,
+ "step": 1587
+ },
+ {
+ "epoch": 1.58844675064862,
+ "grad_norm": 0.502129077911377,
+ "learning_rate": 9.063195687542249e-05,
+ "loss": 0.8481,
+ "step": 1588
+ },
+ {
+ "epoch": 1.5894470319777438,
+ "grad_norm": 0.517439067363739,
+ "learning_rate": 9.052759844683295e-05,
+ "loss": 0.9054,
+ "step": 1589
+ },
+ {
+ "epoch": 1.5904473133068675,
+ "grad_norm": 0.4777907431125641,
+ "learning_rate": 9.042325042669961e-05,
+ "loss": 0.9888,
+ "step": 1590
+ },
+ {
+ "epoch": 1.5914475946359914,
+ "grad_norm": 0.41228219866752625,
+ "learning_rate": 9.03189129296821e-05,
+ "loss": 0.5767,
+ "step": 1591
+ },
+ {
+ "epoch": 1.592447875965115,
+ "grad_norm": 0.45188775658607483,
+ "learning_rate": 9.021458607042845e-05,
+ "loss": 0.875,
+ "step": 1592
+ },
+ {
+ "epoch": 1.593448157294239,
+ "grad_norm": 0.46999362111091614,
+ "learning_rate": 9.011026996357503e-05,
+ "loss": 0.8739,
+ "step": 1593
+ },
+ {
+ "epoch": 1.5944484386233628,
+ "grad_norm": 0.5621476173400879,
+ "learning_rate": 9.000596472374637e-05,
+ "loss": 0.8978,
+ "step": 1594
+ },
+ {
+ "epoch": 1.5954487199524867,
+ "grad_norm": 0.4524415135383606,
+ "learning_rate": 8.990167046555504e-05,
+ "loss": 0.7987,
+ "step": 1595
+ },
+ {
+ "epoch": 1.5964490012816106,
+ "grad_norm": 0.42351627349853516,
+ "learning_rate": 8.97973873036016e-05,
+ "loss": 0.8705,
+ "step": 1596
+ },
+ {
+ "epoch": 1.5974492826107343,
+ "grad_norm": 0.45115014910697937,
+ "learning_rate": 8.969311535247438e-05,
+ "loss": 0.9235,
+ "step": 1597
+ },
+ {
+ "epoch": 1.598449563939858,
+ "grad_norm": 0.5297085642814636,
+ "learning_rate": 8.958885472674939e-05,
+ "loss": 0.9363,
+ "step": 1598
+ },
+ {
+ "epoch": 1.5994498452689818,
+ "grad_norm": 0.5296758413314819,
+ "learning_rate": 8.948460554099018e-05,
+ "loss": 0.9461,
+ "step": 1599
+ },
+ {
+ "epoch": 1.6004501265981057,
+ "grad_norm": 0.4951537251472473,
+ "learning_rate": 8.93803679097478e-05,
+ "loss": 0.9494,
+ "step": 1600
+ },
+ {
+ "epoch": 1.6014504079272296,
+ "grad_norm": 0.5380229949951172,
+ "learning_rate": 8.927614194756052e-05,
+ "loss": 0.8813,
+ "step": 1601
+ },
+ {
+ "epoch": 1.6024506892563535,
+ "grad_norm": 0.487196683883667,
+ "learning_rate": 8.917192776895382e-05,
+ "loss": 0.8183,
+ "step": 1602
+ },
+ {
+ "epoch": 1.6034509705854771,
+ "grad_norm": 0.450591504573822,
+ "learning_rate": 8.906772548844026e-05,
+ "loss": 0.9506,
+ "step": 1603
+ },
+ {
+ "epoch": 1.604451251914601,
+ "grad_norm": 0.5414707064628601,
+ "learning_rate": 8.896353522051928e-05,
+ "loss": 1.2171,
+ "step": 1604
+ },
+ {
+ "epoch": 1.6054515332437247,
+ "grad_norm": 0.5198320746421814,
+ "learning_rate": 8.885935707967716e-05,
+ "loss": 0.8762,
+ "step": 1605
+ },
+ {
+ "epoch": 1.6064518145728486,
+ "grad_norm": 0.4546220302581787,
+ "learning_rate": 8.875519118038684e-05,
+ "loss": 0.9634,
+ "step": 1606
+ },
+ {
+ "epoch": 1.6074520959019725,
+ "grad_norm": 0.5151107907295227,
+ "learning_rate": 8.865103763710777e-05,
+ "loss": 1.1038,
+ "step": 1607
+ },
+ {
+ "epoch": 1.6084523772310964,
+ "grad_norm": 0.46089720726013184,
+ "learning_rate": 8.854689656428591e-05,
+ "loss": 0.8706,
+ "step": 1608
+ },
+ {
+ "epoch": 1.60945265856022,
+ "grad_norm": 0.4554317593574524,
+ "learning_rate": 8.844276807635343e-05,
+ "loss": 0.7553,
+ "step": 1609
+ },
+ {
+ "epoch": 1.610452939889344,
+ "grad_norm": 0.5166018009185791,
+ "learning_rate": 8.833865228772871e-05,
+ "loss": 0.8954,
+ "step": 1610
+ },
+ {
+ "epoch": 1.6114532212184676,
+ "grad_norm": 0.45595693588256836,
+ "learning_rate": 8.823454931281616e-05,
+ "loss": 0.9015,
+ "step": 1611
+ },
+ {
+ "epoch": 1.6124535025475915,
+ "grad_norm": 0.4563496708869934,
+ "learning_rate": 8.813045926600615e-05,
+ "loss": 0.9071,
+ "step": 1612
+ },
+ {
+ "epoch": 1.6134537838767153,
+ "grad_norm": 0.44123467803001404,
+ "learning_rate": 8.802638226167479e-05,
+ "loss": 0.8316,
+ "step": 1613
+ },
+ {
+ "epoch": 1.6144540652058392,
+ "grad_norm": 0.5304034352302551,
+ "learning_rate": 8.792231841418391e-05,
+ "loss": 0.9965,
+ "step": 1614
+ },
+ {
+ "epoch": 1.6154543465349631,
+ "grad_norm": 0.5578649044036865,
+ "learning_rate": 8.781826783788084e-05,
+ "loss": 0.9171,
+ "step": 1615
+ },
+ {
+ "epoch": 1.6164546278640868,
+ "grad_norm": 0.5331206917762756,
+ "learning_rate": 8.771423064709837e-05,
+ "loss": 0.8648,
+ "step": 1616
+ },
+ {
+ "epoch": 1.6174549091932104,
+ "grad_norm": 0.5196745991706848,
+ "learning_rate": 8.76102069561545e-05,
+ "loss": 0.9136,
+ "step": 1617
+ },
+ {
+ "epoch": 1.6184551905223343,
+ "grad_norm": 0.5278195142745972,
+ "learning_rate": 8.750619687935251e-05,
+ "loss": 0.9105,
+ "step": 1618
+ },
+ {
+ "epoch": 1.6194554718514582,
+ "grad_norm": 0.4967080056667328,
+ "learning_rate": 8.740220053098067e-05,
+ "loss": 0.8975,
+ "step": 1619
+ },
+ {
+ "epoch": 1.620455753180582,
+ "grad_norm": 0.5626882910728455,
+ "learning_rate": 8.729821802531212e-05,
+ "loss": 1.0178,
+ "step": 1620
+ },
+ {
+ "epoch": 1.621456034509706,
+ "grad_norm": 0.4372572898864746,
+ "learning_rate": 8.719424947660487e-05,
+ "loss": 0.8344,
+ "step": 1621
+ },
+ {
+ "epoch": 1.6224563158388297,
+ "grad_norm": 0.5572327971458435,
+ "learning_rate": 8.70902949991015e-05,
+ "loss": 0.9831,
+ "step": 1622
+ },
+ {
+ "epoch": 1.6234565971679535,
+ "grad_norm": 0.43764790892601013,
+ "learning_rate": 8.698635470702923e-05,
+ "loss": 0.8901,
+ "step": 1623
+ },
+ {
+ "epoch": 1.6244568784970772,
+ "grad_norm": 0.5335058569908142,
+ "learning_rate": 8.688242871459963e-05,
+ "loss": 0.8063,
+ "step": 1624
+ },
+ {
+ "epoch": 1.625457159826201,
+ "grad_norm": 0.5070383548736572,
+ "learning_rate": 8.677851713600855e-05,
+ "loss": 1.1381,
+ "step": 1625
+ },
+ {
+ "epoch": 1.626457441155325,
+ "grad_norm": 0.5117019414901733,
+ "learning_rate": 8.667462008543603e-05,
+ "loss": 1.1598,
+ "step": 1626
+ },
+ {
+ "epoch": 1.6274577224844489,
+ "grad_norm": 0.4911440908908844,
+ "learning_rate": 8.657073767704615e-05,
+ "loss": 0.9673,
+ "step": 1627
+ },
+ {
+ "epoch": 1.6284580038135725,
+ "grad_norm": 0.4799586832523346,
+ "learning_rate": 8.646687002498692e-05,
+ "loss": 0.8415,
+ "step": 1628
+ },
+ {
+ "epoch": 1.6294582851426964,
+ "grad_norm": 0.5615330934524536,
+ "learning_rate": 8.636301724339004e-05,
+ "loss": 0.9751,
+ "step": 1629
+ },
+ {
+ "epoch": 1.63045856647182,
+ "grad_norm": 0.45118963718414307,
+ "learning_rate": 8.625917944637096e-05,
+ "loss": 0.9169,
+ "step": 1630
+ },
+ {
+ "epoch": 1.631458847800944,
+ "grad_norm": 0.49533525109291077,
+ "learning_rate": 8.615535674802865e-05,
+ "loss": 0.9739,
+ "step": 1631
+ },
+ {
+ "epoch": 1.6324591291300679,
+ "grad_norm": 0.5451453328132629,
+ "learning_rate": 8.605154926244543e-05,
+ "loss": 0.777,
+ "step": 1632
+ },
+ {
+ "epoch": 1.6334594104591917,
+ "grad_norm": 0.6013240814208984,
+ "learning_rate": 8.594775710368704e-05,
+ "loss": 0.9289,
+ "step": 1633
+ },
+ {
+ "epoch": 1.6344596917883156,
+ "grad_norm": 0.5311821699142456,
+ "learning_rate": 8.584398038580226e-05,
+ "loss": 0.9737,
+ "step": 1634
+ },
+ {
+ "epoch": 1.6354599731174393,
+ "grad_norm": 0.4836428165435791,
+ "learning_rate": 8.574021922282292e-05,
+ "loss": 0.9495,
+ "step": 1635
+ },
+ {
+ "epoch": 1.636460254446563,
+ "grad_norm": 0.5316966772079468,
+ "learning_rate": 8.563647372876378e-05,
+ "loss": 0.8871,
+ "step": 1636
+ },
+ {
+ "epoch": 1.6374605357756868,
+ "grad_norm": 0.4969998896121979,
+ "learning_rate": 8.553274401762237e-05,
+ "loss": 0.8881,
+ "step": 1637
+ },
+ {
+ "epoch": 1.6384608171048107,
+ "grad_norm": 0.48786112666130066,
+ "learning_rate": 8.542903020337887e-05,
+ "loss": 0.8859,
+ "step": 1638
+ },
+ {
+ "epoch": 1.6394610984339346,
+ "grad_norm": 0.4753643572330475,
+ "learning_rate": 8.532533239999602e-05,
+ "loss": 0.759,
+ "step": 1639
+ },
+ {
+ "epoch": 1.6404613797630585,
+ "grad_norm": 0.4672154486179352,
+ "learning_rate": 8.522165072141897e-05,
+ "loss": 0.8429,
+ "step": 1640
+ },
+ {
+ "epoch": 1.6414616610921822,
+ "grad_norm": 0.47218796610832214,
+ "learning_rate": 8.511798528157512e-05,
+ "loss": 0.7702,
+ "step": 1641
+ },
+ {
+ "epoch": 1.6424619424213058,
+ "grad_norm": 0.4409984052181244,
+ "learning_rate": 8.501433619437403e-05,
+ "loss": 0.7803,
+ "step": 1642
+ },
+ {
+ "epoch": 1.6434622237504297,
+ "grad_norm": 0.539503812789917,
+ "learning_rate": 8.49107035737073e-05,
+ "loss": 0.9739,
+ "step": 1643
+ },
+ {
+ "epoch": 1.6444625050795536,
+ "grad_norm": 0.5032373666763306,
+ "learning_rate": 8.480708753344846e-05,
+ "loss": 1.0876,
+ "step": 1644
+ },
+ {
+ "epoch": 1.6454627864086775,
+ "grad_norm": 0.4480466842651367,
+ "learning_rate": 8.470348818745278e-05,
+ "loss": 0.9183,
+ "step": 1645
+ },
+ {
+ "epoch": 1.6464630677378014,
+ "grad_norm": 0.49911466240882874,
+ "learning_rate": 8.459990564955721e-05,
+ "loss": 0.8048,
+ "step": 1646
+ },
+ {
+ "epoch": 1.647463349066925,
+ "grad_norm": 0.48236754536628723,
+ "learning_rate": 8.449634003358022e-05,
+ "loss": 0.9785,
+ "step": 1647
+ },
+ {
+ "epoch": 1.648463630396049,
+ "grad_norm": 0.5161852240562439,
+ "learning_rate": 8.43927914533217e-05,
+ "loss": 0.9626,
+ "step": 1648
+ },
+ {
+ "epoch": 1.6494639117251726,
+ "grad_norm": 0.5653015971183777,
+ "learning_rate": 8.428926002256283e-05,
+ "loss": 1.0785,
+ "step": 1649
+ },
+ {
+ "epoch": 1.6504641930542965,
+ "grad_norm": 0.5340739488601685,
+ "learning_rate": 8.418574585506591e-05,
+ "loss": 1.0613,
+ "step": 1650
+ },
+ {
+ "epoch": 1.6514644743834204,
+ "grad_norm": 0.4651111960411072,
+ "learning_rate": 8.408224906457429e-05,
+ "loss": 0.8313,
+ "step": 1651
+ },
+ {
+ "epoch": 1.6524647557125443,
+ "grad_norm": 0.5264735221862793,
+ "learning_rate": 8.397876976481224e-05,
+ "loss": 0.8187,
+ "step": 1652
+ },
+ {
+ "epoch": 1.653465037041668,
+ "grad_norm": 0.4576081335544586,
+ "learning_rate": 8.387530806948476e-05,
+ "loss": 0.8758,
+ "step": 1653
+ },
+ {
+ "epoch": 1.6544653183707918,
+ "grad_norm": 0.4851805567741394,
+ "learning_rate": 8.37718640922776e-05,
+ "loss": 0.877,
+ "step": 1654
+ },
+ {
+ "epoch": 1.6554655996999155,
+ "grad_norm": 0.48545941710472107,
+ "learning_rate": 8.366843794685695e-05,
+ "loss": 0.8988,
+ "step": 1655
+ },
+ {
+ "epoch": 1.6564658810290394,
+ "grad_norm": 0.5381633639335632,
+ "learning_rate": 8.356502974686941e-05,
+ "loss": 0.8958,
+ "step": 1656
+ },
+ {
+ "epoch": 1.6574661623581632,
+ "grad_norm": 0.5239037275314331,
+ "learning_rate": 8.346163960594193e-05,
+ "loss": 0.9698,
+ "step": 1657
+ },
+ {
+ "epoch": 1.6584664436872871,
+ "grad_norm": 0.5378285050392151,
+ "learning_rate": 8.335826763768156e-05,
+ "loss": 0.8765,
+ "step": 1658
+ },
+ {
+ "epoch": 1.659466725016411,
+ "grad_norm": 0.45296210050582886,
+ "learning_rate": 8.325491395567541e-05,
+ "loss": 0.8048,
+ "step": 1659
+ },
+ {
+ "epoch": 1.6604670063455347,
+ "grad_norm": 0.4575178325176239,
+ "learning_rate": 8.315157867349046e-05,
+ "loss": 0.8388,
+ "step": 1660
+ },
+ {
+ "epoch": 1.6614672876746583,
+ "grad_norm": 0.4762253165245056,
+ "learning_rate": 8.30482619046735e-05,
+ "loss": 0.9123,
+ "step": 1661
+ },
+ {
+ "epoch": 1.6624675690037822,
+ "grad_norm": 0.46717318892478943,
+ "learning_rate": 8.294496376275104e-05,
+ "loss": 0.9213,
+ "step": 1662
+ },
+ {
+ "epoch": 1.6634678503329061,
+ "grad_norm": 0.4792725741863251,
+ "learning_rate": 8.284168436122898e-05,
+ "loss": 0.793,
+ "step": 1663
+ },
+ {
+ "epoch": 1.66446813166203,
+ "grad_norm": 0.4854644238948822,
+ "learning_rate": 8.273842381359273e-05,
+ "loss": 0.9657,
+ "step": 1664
+ },
+ {
+ "epoch": 1.665468412991154,
+ "grad_norm": 0.44722744822502136,
+ "learning_rate": 8.263518223330697e-05,
+ "loss": 0.8159,
+ "step": 1665
+ },
+ {
+ "epoch": 1.6664686943202776,
+ "grad_norm": 0.5070934891700745,
+ "learning_rate": 8.253195973381552e-05,
+ "loss": 0.8971,
+ "step": 1666
+ },
+ {
+ "epoch": 1.6674689756494014,
+ "grad_norm": 0.4743734300136566,
+ "learning_rate": 8.242875642854121e-05,
+ "loss": 0.8042,
+ "step": 1667
+ },
+ {
+ "epoch": 1.668469256978525,
+ "grad_norm": 0.5857224464416504,
+ "learning_rate": 8.232557243088585e-05,
+ "loss": 1.0666,
+ "step": 1668
+ },
+ {
+ "epoch": 1.669469538307649,
+ "grad_norm": 0.5257895588874817,
+ "learning_rate": 8.222240785422996e-05,
+ "loss": 0.9619,
+ "step": 1669
+ },
+ {
+ "epoch": 1.6704698196367729,
+ "grad_norm": 0.5153073668479919,
+ "learning_rate": 8.211926281193277e-05,
+ "loss": 0.9189,
+ "step": 1670
+ },
+ {
+ "epoch": 1.6714701009658968,
+ "grad_norm": 0.49723324179649353,
+ "learning_rate": 8.201613741733203e-05,
+ "loss": 1.037,
+ "step": 1671
+ },
+ {
+ "epoch": 1.6724703822950204,
+ "grad_norm": 0.5014336705207825,
+ "learning_rate": 8.191303178374389e-05,
+ "loss": 0.8598,
+ "step": 1672
+ },
+ {
+ "epoch": 1.6734706636241443,
+ "grad_norm": 0.5031597018241882,
+ "learning_rate": 8.180994602446279e-05,
+ "loss": 0.9622,
+ "step": 1673
+ },
+ {
+ "epoch": 1.674470944953268,
+ "grad_norm": 0.4872223436832428,
+ "learning_rate": 8.170688025276134e-05,
+ "loss": 0.7971,
+ "step": 1674
+ },
+ {
+ "epoch": 1.6754712262823919,
+ "grad_norm": 0.5090667605400085,
+ "learning_rate": 8.160383458189022e-05,
+ "loss": 0.9825,
+ "step": 1675
+ },
+ {
+ "epoch": 1.6764715076115158,
+ "grad_norm": 0.49642691016197205,
+ "learning_rate": 8.15008091250779e-05,
+ "loss": 0.9541,
+ "step": 1676
+ },
+ {
+ "epoch": 1.6774717889406396,
+ "grad_norm": 0.7710174322128296,
+ "learning_rate": 8.13978039955308e-05,
+ "loss": 0.9036,
+ "step": 1677
+ },
+ {
+ "epoch": 1.6784720702697635,
+ "grad_norm": 0.551180362701416,
+ "learning_rate": 8.12948193064329e-05,
+ "loss": 0.931,
+ "step": 1678
+ },
+ {
+ "epoch": 1.6794723515988872,
+ "grad_norm": 0.540558934211731,
+ "learning_rate": 8.119185517094578e-05,
+ "loss": 0.8364,
+ "step": 1679
+ },
+ {
+ "epoch": 1.6804726329280109,
+ "grad_norm": 0.47380101680755615,
+ "learning_rate": 8.108891170220836e-05,
+ "loss": 0.8494,
+ "step": 1680
+ },
+ {
+ "epoch": 1.6814729142571347,
+ "grad_norm": 0.4427139461040497,
+ "learning_rate": 8.098598901333692e-05,
+ "loss": 0.8441,
+ "step": 1681
+ },
+ {
+ "epoch": 1.6824731955862586,
+ "grad_norm": 0.5092798471450806,
+ "learning_rate": 8.088308721742491e-05,
+ "loss": 0.9069,
+ "step": 1682
+ },
+ {
+ "epoch": 1.6834734769153825,
+ "grad_norm": 0.4453091621398926,
+ "learning_rate": 8.078020642754274e-05,
+ "loss": 0.8539,
+ "step": 1683
+ },
+ {
+ "epoch": 1.6844737582445064,
+ "grad_norm": 0.5102719068527222,
+ "learning_rate": 8.06773467567378e-05,
+ "loss": 0.808,
+ "step": 1684
+ },
+ {
+ "epoch": 1.68547403957363,
+ "grad_norm": 0.44998160004615784,
+ "learning_rate": 8.057450831803428e-05,
+ "loss": 0.9399,
+ "step": 1685
+ },
+ {
+ "epoch": 1.686474320902754,
+ "grad_norm": 0.47718214988708496,
+ "learning_rate": 8.047169122443302e-05,
+ "loss": 0.8851,
+ "step": 1686
+ },
+ {
+ "epoch": 1.6874746022318776,
+ "grad_norm": 0.5858275890350342,
+ "learning_rate": 8.036889558891142e-05,
+ "loss": 1.0813,
+ "step": 1687
+ },
+ {
+ "epoch": 1.6884748835610015,
+ "grad_norm": 0.6066718101501465,
+ "learning_rate": 8.026612152442329e-05,
+ "loss": 0.985,
+ "step": 1688
+ },
+ {
+ "epoch": 1.6894751648901254,
+ "grad_norm": 0.529468834400177,
+ "learning_rate": 8.016336914389874e-05,
+ "loss": 1.0599,
+ "step": 1689
+ },
+ {
+ "epoch": 1.6904754462192493,
+ "grad_norm": 0.5604698061943054,
+ "learning_rate": 8.006063856024405e-05,
+ "loss": 0.8511,
+ "step": 1690
+ },
+ {
+ "epoch": 1.691475727548373,
+ "grad_norm": 0.5078622102737427,
+ "learning_rate": 7.995792988634152e-05,
+ "loss": 0.8286,
+ "step": 1691
+ },
+ {
+ "epoch": 1.6924760088774968,
+ "grad_norm": 0.5138706564903259,
+ "learning_rate": 7.985524323504948e-05,
+ "loss": 0.9054,
+ "step": 1692
+ },
+ {
+ "epoch": 1.6934762902066205,
+ "grad_norm": 0.42073604464530945,
+ "learning_rate": 7.975257871920195e-05,
+ "loss": 0.8403,
+ "step": 1693
+ },
+ {
+ "epoch": 1.6944765715357444,
+ "grad_norm": 0.5249999761581421,
+ "learning_rate": 7.964993645160866e-05,
+ "loss": 0.8382,
+ "step": 1694
+ },
+ {
+ "epoch": 1.6954768528648683,
+ "grad_norm": 0.4233437478542328,
+ "learning_rate": 7.954731654505491e-05,
+ "loss": 0.7757,
+ "step": 1695
+ },
+ {
+ "epoch": 1.6964771341939922,
+ "grad_norm": 0.5192474722862244,
+ "learning_rate": 7.944471911230142e-05,
+ "loss": 0.9689,
+ "step": 1696
+ },
+ {
+ "epoch": 1.697477415523116,
+ "grad_norm": 0.5599137544631958,
+ "learning_rate": 7.93421442660842e-05,
+ "loss": 1.1277,
+ "step": 1697
+ },
+ {
+ "epoch": 1.6984776968522397,
+ "grad_norm": 0.4425784647464752,
+ "learning_rate": 7.923959211911449e-05,
+ "loss": 0.8822,
+ "step": 1698
+ },
+ {
+ "epoch": 1.6994779781813634,
+ "grad_norm": 0.48276057839393616,
+ "learning_rate": 7.91370627840785e-05,
+ "loss": 1.0073,
+ "step": 1699
+ },
+ {
+ "epoch": 1.7004782595104873,
+ "grad_norm": 0.5134496688842773,
+ "learning_rate": 7.903455637363746e-05,
+ "loss": 0.8437,
+ "step": 1700
+ },
+ {
+ "epoch": 1.7014785408396111,
+ "grad_norm": 0.49254342913627625,
+ "learning_rate": 7.89320730004274e-05,
+ "loss": 0.9512,
+ "step": 1701
+ },
+ {
+ "epoch": 1.702478822168735,
+ "grad_norm": 0.4442595839500427,
+ "learning_rate": 7.882961277705895e-05,
+ "loss": 0.8391,
+ "step": 1702
+ },
+ {
+ "epoch": 1.703479103497859,
+ "grad_norm": 0.5177878141403198,
+ "learning_rate": 7.872717581611741e-05,
+ "loss": 0.9012,
+ "step": 1703
+ },
+ {
+ "epoch": 1.7044793848269826,
+ "grad_norm": 0.4612918496131897,
+ "learning_rate": 7.862476223016246e-05,
+ "loss": 0.86,
+ "step": 1704
+ },
+ {
+ "epoch": 1.7054796661561062,
+ "grad_norm": 0.47172513604164124,
+ "learning_rate": 7.852237213172812e-05,
+ "loss": 0.8821,
+ "step": 1705
+ },
+ {
+ "epoch": 1.7064799474852301,
+ "grad_norm": 0.5113676190376282,
+ "learning_rate": 7.842000563332254e-05,
+ "loss": 0.8243,
+ "step": 1706
+ },
+ {
+ "epoch": 1.707480228814354,
+ "grad_norm": 0.5000366568565369,
+ "learning_rate": 7.831766284742807e-05,
+ "loss": 0.9887,
+ "step": 1707
+ },
+ {
+ "epoch": 1.708480510143478,
+ "grad_norm": 0.5838572978973389,
+ "learning_rate": 7.82153438865009e-05,
+ "loss": 0.9401,
+ "step": 1708
+ },
+ {
+ "epoch": 1.7094807914726018,
+ "grad_norm": 0.5229962468147278,
+ "learning_rate": 7.811304886297104e-05,
+ "loss": 1.0353,
+ "step": 1709
+ },
+ {
+ "epoch": 1.7104810728017255,
+ "grad_norm": 0.45854273438453674,
+ "learning_rate": 7.801077788924224e-05,
+ "loss": 0.8868,
+ "step": 1710
+ },
+ {
+ "epoch": 1.7114813541308493,
+ "grad_norm": 0.5133983492851257,
+ "learning_rate": 7.790853107769179e-05,
+ "loss": 0.9689,
+ "step": 1711
+ },
+ {
+ "epoch": 1.712481635459973,
+ "grad_norm": 0.5269356369972229,
+ "learning_rate": 7.780630854067045e-05,
+ "loss": 0.8751,
+ "step": 1712
+ },
+ {
+ "epoch": 1.713481916789097,
+ "grad_norm": 0.523595929145813,
+ "learning_rate": 7.77041103905023e-05,
+ "loss": 0.9806,
+ "step": 1713
+ },
+ {
+ "epoch": 1.7144821981182208,
+ "grad_norm": 0.6217412352561951,
+ "learning_rate": 7.760193673948461e-05,
+ "loss": 0.8298,
+ "step": 1714
+ },
+ {
+ "epoch": 1.7154824794473447,
+ "grad_norm": 0.47979483008384705,
+ "learning_rate": 7.749978769988778e-05,
+ "loss": 0.8578,
+ "step": 1715
+ },
+ {
+ "epoch": 1.7164827607764683,
+ "grad_norm": 0.4971829652786255,
+ "learning_rate": 7.739766338395511e-05,
+ "loss": 0.9794,
+ "step": 1716
+ },
+ {
+ "epoch": 1.7174830421055922,
+ "grad_norm": 0.5164886116981506,
+ "learning_rate": 7.729556390390275e-05,
+ "loss": 0.9267,
+ "step": 1717
+ },
+ {
+ "epoch": 1.7184833234347159,
+ "grad_norm": 0.5067420601844788,
+ "learning_rate": 7.719348937191957e-05,
+ "loss": 0.951,
+ "step": 1718
+ },
+ {
+ "epoch": 1.7194836047638398,
+ "grad_norm": 0.5390254259109497,
+ "learning_rate": 7.709143990016702e-05,
+ "loss": 0.8409,
+ "step": 1719
+ },
+ {
+ "epoch": 1.7204838860929637,
+ "grad_norm": 0.4631121754646301,
+ "learning_rate": 7.698941560077899e-05,
+ "loss": 0.704,
+ "step": 1720
+ },
+ {
+ "epoch": 1.7214841674220875,
+ "grad_norm": 0.5231932997703552,
+ "learning_rate": 7.688741658586178e-05,
+ "loss": 1.0912,
+ "step": 1721
+ },
+ {
+ "epoch": 1.7224844487512114,
+ "grad_norm": 0.4563293755054474,
+ "learning_rate": 7.678544296749384e-05,
+ "loss": 0.8444,
+ "step": 1722
+ },
+ {
+ "epoch": 1.723484730080335,
+ "grad_norm": 0.4844750463962555,
+ "learning_rate": 7.668349485772572e-05,
+ "loss": 0.9234,
+ "step": 1723
+ },
+ {
+ "epoch": 1.7244850114094588,
+ "grad_norm": 0.45698872208595276,
+ "learning_rate": 7.658157236857999e-05,
+ "loss": 0.8608,
+ "step": 1724
+ },
+ {
+ "epoch": 1.7254852927385826,
+ "grad_norm": 0.46694663166999817,
+ "learning_rate": 7.6479675612051e-05,
+ "loss": 0.9628,
+ "step": 1725
+ },
+ {
+ "epoch": 1.7264855740677065,
+ "grad_norm": 0.46077099442481995,
+ "learning_rate": 7.637780470010487e-05,
+ "loss": 0.8173,
+ "step": 1726
+ },
+ {
+ "epoch": 1.7274858553968304,
+ "grad_norm": 0.5198522210121155,
+ "learning_rate": 7.62759597446793e-05,
+ "loss": 0.8813,
+ "step": 1727
+ },
+ {
+ "epoch": 1.7284861367259543,
+ "grad_norm": 0.48385483026504517,
+ "learning_rate": 7.617414085768351e-05,
+ "loss": 0.7007,
+ "step": 1728
+ },
+ {
+ "epoch": 1.729486418055078,
+ "grad_norm": 0.5622795224189758,
+ "learning_rate": 7.607234815099802e-05,
+ "loss": 1.0422,
+ "step": 1729
+ },
+ {
+ "epoch": 1.7304866993842019,
+ "grad_norm": 0.5077874660491943,
+ "learning_rate": 7.597058173647458e-05,
+ "loss": 1.014,
+ "step": 1730
+ },
+ {
+ "epoch": 1.7314869807133255,
+ "grad_norm": 0.598760724067688,
+ "learning_rate": 7.586884172593609e-05,
+ "loss": 0.8979,
+ "step": 1731
+ },
+ {
+ "epoch": 1.7324872620424494,
+ "grad_norm": 0.6116266846656799,
+ "learning_rate": 7.576712823117645e-05,
+ "loss": 0.9121,
+ "step": 1732
+ },
+ {
+ "epoch": 1.7334875433715733,
+ "grad_norm": 0.6157407164573669,
+ "learning_rate": 7.566544136396037e-05,
+ "loss": 0.9361,
+ "step": 1733
+ },
+ {
+ "epoch": 1.7344878247006972,
+ "grad_norm": 0.5174565315246582,
+ "learning_rate": 7.556378123602334e-05,
+ "loss": 1.1858,
+ "step": 1734
+ },
+ {
+ "epoch": 1.7354881060298208,
+ "grad_norm": 0.42541515827178955,
+ "learning_rate": 7.54621479590714e-05,
+ "loss": 0.7425,
+ "step": 1735
+ },
+ {
+ "epoch": 1.7364883873589447,
+ "grad_norm": 0.49402132630348206,
+ "learning_rate": 7.536054164478123e-05,
+ "loss": 0.8158,
+ "step": 1736
+ },
+ {
+ "epoch": 1.7374886686880684,
+ "grad_norm": 0.4637628197669983,
+ "learning_rate": 7.525896240479976e-05,
+ "loss": 0.7859,
+ "step": 1737
+ },
+ {
+ "epoch": 1.7384889500171923,
+ "grad_norm": 0.5475689172744751,
+ "learning_rate": 7.51574103507442e-05,
+ "loss": 0.825,
+ "step": 1738
+ },
+ {
+ "epoch": 1.7394892313463162,
+ "grad_norm": 0.5652226209640503,
+ "learning_rate": 7.505588559420189e-05,
+ "loss": 0.9051,
+ "step": 1739
+ },
+ {
+ "epoch": 1.74048951267544,
+ "grad_norm": 0.4930717647075653,
+ "learning_rate": 7.495438824673016e-05,
+ "loss": 0.7797,
+ "step": 1740
+ },
+ {
+ "epoch": 1.741489794004564,
+ "grad_norm": 0.4611824154853821,
+ "learning_rate": 7.485291841985626e-05,
+ "loss": 1.014,
+ "step": 1741
+ },
+ {
+ "epoch": 1.7424900753336876,
+ "grad_norm": 0.4652807414531708,
+ "learning_rate": 7.475147622507717e-05,
+ "loss": 0.7601,
+ "step": 1742
+ },
+ {
+ "epoch": 1.7434903566628113,
+ "grad_norm": 0.5227355360984802,
+ "learning_rate": 7.465006177385953e-05,
+ "loss": 0.8616,
+ "step": 1743
+ },
+ {
+ "epoch": 1.7444906379919352,
+ "grad_norm": 0.42283377051353455,
+ "learning_rate": 7.454867517763948e-05,
+ "loss": 0.8647,
+ "step": 1744
+ },
+ {
+ "epoch": 1.745490919321059,
+ "grad_norm": 0.45151621103286743,
+ "learning_rate": 7.444731654782253e-05,
+ "loss": 0.8619,
+ "step": 1745
+ },
+ {
+ "epoch": 1.746491200650183,
+ "grad_norm": 0.6146779656410217,
+ "learning_rate": 7.434598599578351e-05,
+ "loss": 0.9479,
+ "step": 1746
+ },
+ {
+ "epoch": 1.7474914819793068,
+ "grad_norm": 0.4988139271736145,
+ "learning_rate": 7.424468363286634e-05,
+ "loss": 0.9136,
+ "step": 1747
+ },
+ {
+ "epoch": 1.7484917633084305,
+ "grad_norm": 0.5271700024604797,
+ "learning_rate": 7.414340957038406e-05,
+ "loss": 1.0416,
+ "step": 1748
+ },
+ {
+ "epoch": 1.7494920446375544,
+ "grad_norm": 0.46806615591049194,
+ "learning_rate": 7.404216391961847e-05,
+ "loss": 0.8376,
+ "step": 1749
+ },
+ {
+ "epoch": 1.750492325966678,
+ "grad_norm": 0.4781439006328583,
+ "learning_rate": 7.394094679182024e-05,
+ "loss": 0.9669,
+ "step": 1750
+ },
+ {
+ "epoch": 1.751492607295802,
+ "grad_norm": 0.49085667729377747,
+ "learning_rate": 7.383975829820874e-05,
+ "loss": 0.9279,
+ "step": 1751
+ },
+ {
+ "epoch": 1.7524928886249258,
+ "grad_norm": 0.4937964379787445,
+ "learning_rate": 7.37385985499718e-05,
+ "loss": 1.1126,
+ "step": 1752
+ },
+ {
+ "epoch": 1.7534931699540497,
+ "grad_norm": 0.3883766233921051,
+ "learning_rate": 7.36374676582657e-05,
+ "loss": 0.7398,
+ "step": 1753
+ },
+ {
+ "epoch": 1.7544934512831734,
+ "grad_norm": 0.4864053726196289,
+ "learning_rate": 7.353636573421496e-05,
+ "loss": 0.8172,
+ "step": 1754
+ },
+ {
+ "epoch": 1.7554937326122972,
+ "grad_norm": 0.48342639207839966,
+ "learning_rate": 7.343529288891239e-05,
+ "loss": 0.8957,
+ "step": 1755
+ },
+ {
+ "epoch": 1.756494013941421,
+ "grad_norm": 0.47928398847579956,
+ "learning_rate": 7.333424923341868e-05,
+ "loss": 0.8414,
+ "step": 1756
+ },
+ {
+ "epoch": 1.7574942952705448,
+ "grad_norm": 0.46736687421798706,
+ "learning_rate": 7.323323487876257e-05,
+ "loss": 0.7661,
+ "step": 1757
+ },
+ {
+ "epoch": 1.7584945765996687,
+ "grad_norm": 0.5184097290039062,
+ "learning_rate": 7.313224993594057e-05,
+ "loss": 0.8719,
+ "step": 1758
+ },
+ {
+ "epoch": 1.7594948579287926,
+ "grad_norm": 0.526541531085968,
+ "learning_rate": 7.303129451591686e-05,
+ "loss": 0.8801,
+ "step": 1759
+ },
+ {
+ "epoch": 1.7604951392579165,
+ "grad_norm": 0.5191768407821655,
+ "learning_rate": 7.29303687296232e-05,
+ "loss": 0.9343,
+ "step": 1760
+ },
+ {
+ "epoch": 1.7614954205870401,
+ "grad_norm": 0.5041552186012268,
+ "learning_rate": 7.282947268795877e-05,
+ "loss": 0.9369,
+ "step": 1761
+ },
+ {
+ "epoch": 1.7624957019161638,
+ "grad_norm": 0.4530990719795227,
+ "learning_rate": 7.272860650179006e-05,
+ "loss": 0.9629,
+ "step": 1762
+ },
+ {
+ "epoch": 1.7634959832452877,
+ "grad_norm": 0.42898643016815186,
+ "learning_rate": 7.262777028195081e-05,
+ "loss": 0.7658,
+ "step": 1763
+ },
+ {
+ "epoch": 1.7644962645744116,
+ "grad_norm": 0.4350574314594269,
+ "learning_rate": 7.252696413924174e-05,
+ "loss": 0.7273,
+ "step": 1764
+ },
+ {
+ "epoch": 1.7654965459035354,
+ "grad_norm": 0.517660915851593,
+ "learning_rate": 7.242618818443056e-05,
+ "loss": 0.9021,
+ "step": 1765
+ },
+ {
+ "epoch": 1.7664968272326593,
+ "grad_norm": 0.5530719757080078,
+ "learning_rate": 7.232544252825189e-05,
+ "loss": 0.8532,
+ "step": 1766
+ },
+ {
+ "epoch": 1.767497108561783,
+ "grad_norm": 0.41731134057044983,
+ "learning_rate": 7.222472728140695e-05,
+ "loss": 0.6834,
+ "step": 1767
+ },
+ {
+ "epoch": 1.7684973898909067,
+ "grad_norm": 0.4782492518424988,
+ "learning_rate": 7.212404255456357e-05,
+ "loss": 0.8692,
+ "step": 1768
+ },
+ {
+ "epoch": 1.7694976712200305,
+ "grad_norm": 0.5327005386352539,
+ "learning_rate": 7.202338845835606e-05,
+ "loss": 0.92,
+ "step": 1769
+ },
+ {
+ "epoch": 1.7704979525491544,
+ "grad_norm": 0.48882028460502625,
+ "learning_rate": 7.192276510338507e-05,
+ "loss": 0.8545,
+ "step": 1770
+ },
+ {
+ "epoch": 1.7714982338782783,
+ "grad_norm": 0.5156509280204773,
+ "learning_rate": 7.182217260021749e-05,
+ "loss": 0.9533,
+ "step": 1771
+ },
+ {
+ "epoch": 1.7724985152074022,
+ "grad_norm": 0.49955782294273376,
+ "learning_rate": 7.172161105938624e-05,
+ "loss": 0.7701,
+ "step": 1772
+ },
+ {
+ "epoch": 1.7734987965365259,
+ "grad_norm": 0.4707096219062805,
+ "learning_rate": 7.162108059139032e-05,
+ "loss": 0.9093,
+ "step": 1773
+ },
+ {
+ "epoch": 1.7744990778656498,
+ "grad_norm": 0.5026343464851379,
+ "learning_rate": 7.15205813066945e-05,
+ "loss": 1.0551,
+ "step": 1774
+ },
+ {
+ "epoch": 1.7754993591947734,
+ "grad_norm": 0.4696865975856781,
+ "learning_rate": 7.142011331572936e-05,
+ "loss": 0.8701,
+ "step": 1775
+ },
+ {
+ "epoch": 1.7764996405238973,
+ "grad_norm": 0.4939334988594055,
+ "learning_rate": 7.131967672889101e-05,
+ "loss": 0.9638,
+ "step": 1776
+ },
+ {
+ "epoch": 1.7774999218530212,
+ "grad_norm": 0.4661426842212677,
+ "learning_rate": 7.121927165654109e-05,
+ "loss": 0.8687,
+ "step": 1777
+ },
+ {
+ "epoch": 1.778500203182145,
+ "grad_norm": 0.48258379101753235,
+ "learning_rate": 7.111889820900664e-05,
+ "loss": 0.9335,
+ "step": 1778
+ },
+ {
+ "epoch": 1.7795004845112687,
+ "grad_norm": 0.4910578727722168,
+ "learning_rate": 7.101855649657991e-05,
+ "loss": 0.9632,
+ "step": 1779
+ },
+ {
+ "epoch": 1.7805007658403926,
+ "grad_norm": 0.46052396297454834,
+ "learning_rate": 7.091824662951827e-05,
+ "loss": 0.7958,
+ "step": 1780
+ },
+ {
+ "epoch": 1.7815010471695163,
+ "grad_norm": 0.4766314625740051,
+ "learning_rate": 7.08179687180442e-05,
+ "loss": 0.7427,
+ "step": 1781
+ },
+ {
+ "epoch": 1.7825013284986402,
+ "grad_norm": 0.4556989371776581,
+ "learning_rate": 7.071772287234497e-05,
+ "loss": 0.7899,
+ "step": 1782
+ },
+ {
+ "epoch": 1.783501609827764,
+ "grad_norm": 0.5186169743537903,
+ "learning_rate": 7.06175092025726e-05,
+ "loss": 0.9758,
+ "step": 1783
+ },
+ {
+ "epoch": 1.784501891156888,
+ "grad_norm": 0.5379285216331482,
+ "learning_rate": 7.051732781884378e-05,
+ "loss": 0.8966,
+ "step": 1784
+ },
+ {
+ "epoch": 1.7855021724860118,
+ "grad_norm": 0.520286500453949,
+ "learning_rate": 7.041717883123977e-05,
+ "loss": 0.9421,
+ "step": 1785
+ },
+ {
+ "epoch": 1.7865024538151355,
+ "grad_norm": 0.5489597916603088,
+ "learning_rate": 7.031706234980617e-05,
+ "loss": 0.936,
+ "step": 1786
+ },
+ {
+ "epoch": 1.7875027351442592,
+ "grad_norm": 0.5182730555534363,
+ "learning_rate": 7.021697848455291e-05,
+ "loss": 0.953,
+ "step": 1787
+ },
+ {
+ "epoch": 1.788503016473383,
+ "grad_norm": 0.5181865692138672,
+ "learning_rate": 7.011692734545403e-05,
+ "loss": 0.8688,
+ "step": 1788
+ },
+ {
+ "epoch": 1.789503297802507,
+ "grad_norm": 0.44486725330352783,
+ "learning_rate": 7.001690904244767e-05,
+ "loss": 0.8014,
+ "step": 1789
+ },
+ {
+ "epoch": 1.7905035791316308,
+ "grad_norm": 0.5337903499603271,
+ "learning_rate": 6.991692368543584e-05,
+ "loss": 0.9003,
+ "step": 1790
+ },
+ {
+ "epoch": 1.7915038604607547,
+ "grad_norm": 0.5147045254707336,
+ "learning_rate": 6.981697138428434e-05,
+ "loss": 0.9162,
+ "step": 1791
+ },
+ {
+ "epoch": 1.7925041417898784,
+ "grad_norm": 0.5204777121543884,
+ "learning_rate": 6.971705224882271e-05,
+ "loss": 0.8938,
+ "step": 1792
+ },
+ {
+ "epoch": 1.7935044231190023,
+ "grad_norm": 0.45608311891555786,
+ "learning_rate": 6.9617166388844e-05,
+ "loss": 0.7545,
+ "step": 1793
+ },
+ {
+ "epoch": 1.794504704448126,
+ "grad_norm": 0.47650712728500366,
+ "learning_rate": 6.951731391410468e-05,
+ "loss": 0.8237,
+ "step": 1794
+ },
+ {
+ "epoch": 1.7955049857772498,
+ "grad_norm": 0.5793735384941101,
+ "learning_rate": 6.94174949343246e-05,
+ "loss": 1.1272,
+ "step": 1795
+ },
+ {
+ "epoch": 1.7965052671063737,
+ "grad_norm": 0.4923813045024872,
+ "learning_rate": 6.931770955918674e-05,
+ "loss": 1.0535,
+ "step": 1796
+ },
+ {
+ "epoch": 1.7975055484354976,
+ "grad_norm": 0.515476405620575,
+ "learning_rate": 6.921795789833723e-05,
+ "loss": 0.986,
+ "step": 1797
+ },
+ {
+ "epoch": 1.7985058297646213,
+ "grad_norm": 0.4697955250740051,
+ "learning_rate": 6.911824006138503e-05,
+ "loss": 0.8236,
+ "step": 1798
+ },
+ {
+ "epoch": 1.7995061110937451,
+ "grad_norm": 0.48255470395088196,
+ "learning_rate": 6.901855615790206e-05,
+ "loss": 0.8308,
+ "step": 1799
+ },
+ {
+ "epoch": 1.8005063924228688,
+ "grad_norm": 0.5010727047920227,
+ "learning_rate": 6.891890629742288e-05,
+ "loss": 0.9338,
+ "step": 1800
+ },
+ {
+ "epoch": 1.8015066737519927,
+ "grad_norm": 0.5230937004089355,
+ "learning_rate": 6.88192905894447e-05,
+ "loss": 0.7253,
+ "step": 1801
+ },
+ {
+ "epoch": 1.8025069550811166,
+ "grad_norm": 0.4493248164653778,
+ "learning_rate": 6.871970914342712e-05,
+ "loss": 0.9527,
+ "step": 1802
+ },
+ {
+ "epoch": 1.8035072364102405,
+ "grad_norm": 0.4727008044719696,
+ "learning_rate": 6.862016206879216e-05,
+ "loss": 0.9527,
+ "step": 1803
+ },
+ {
+ "epoch": 1.8045075177393644,
+ "grad_norm": 0.457698255777359,
+ "learning_rate": 6.852064947492405e-05,
+ "loss": 0.8424,
+ "step": 1804
+ },
+ {
+ "epoch": 1.805507799068488,
+ "grad_norm": 0.6156003475189209,
+ "learning_rate": 6.842117147116913e-05,
+ "loss": 0.9988,
+ "step": 1805
+ },
+ {
+ "epoch": 1.8065080803976117,
+ "grad_norm": 0.5174852609634399,
+ "learning_rate": 6.832172816683575e-05,
+ "loss": 0.8635,
+ "step": 1806
+ },
+ {
+ "epoch": 1.8075083617267356,
+ "grad_norm": 0.5165886878967285,
+ "learning_rate": 6.82223196711941e-05,
+ "loss": 0.8721,
+ "step": 1807
+ },
+ {
+ "epoch": 1.8085086430558595,
+ "grad_norm": 0.4866868853569031,
+ "learning_rate": 6.812294609347615e-05,
+ "loss": 0.8819,
+ "step": 1808
+ },
+ {
+ "epoch": 1.8095089243849833,
+ "grad_norm": 0.4991300404071808,
+ "learning_rate": 6.802360754287547e-05,
+ "loss": 0.8644,
+ "step": 1809
+ },
+ {
+ "epoch": 1.8105092057141072,
+ "grad_norm": 0.501853346824646,
+ "learning_rate": 6.79243041285472e-05,
+ "loss": 0.824,
+ "step": 1810
+ },
+ {
+ "epoch": 1.811509487043231,
+ "grad_norm": 0.5272979140281677,
+ "learning_rate": 6.782503595960782e-05,
+ "loss": 1.0178,
+ "step": 1811
+ },
+ {
+ "epoch": 1.8125097683723548,
+ "grad_norm": 0.5986105799674988,
+ "learning_rate": 6.772580314513508e-05,
+ "loss": 0.949,
+ "step": 1812
+ },
+ {
+ "epoch": 1.8135100497014784,
+ "grad_norm": 0.5391054153442383,
+ "learning_rate": 6.762660579416791e-05,
+ "loss": 1.0698,
+ "step": 1813
+ },
+ {
+ "epoch": 1.8145103310306023,
+ "grad_norm": 0.48486262559890747,
+ "learning_rate": 6.752744401570625e-05,
+ "loss": 0.9986,
+ "step": 1814
+ },
+ {
+ "epoch": 1.8155106123597262,
+ "grad_norm": 0.5090842843055725,
+ "learning_rate": 6.742831791871096e-05,
+ "loss": 0.8459,
+ "step": 1815
+ },
+ {
+ "epoch": 1.81651089368885,
+ "grad_norm": 0.408403605222702,
+ "learning_rate": 6.732922761210369e-05,
+ "loss": 0.7093,
+ "step": 1816
+ },
+ {
+ "epoch": 1.8175111750179738,
+ "grad_norm": 0.5082786083221436,
+ "learning_rate": 6.723017320476679e-05,
+ "loss": 0.8289,
+ "step": 1817
+ },
+ {
+ "epoch": 1.8185114563470977,
+ "grad_norm": 0.4834018647670746,
+ "learning_rate": 6.713115480554313e-05,
+ "loss": 0.9766,
+ "step": 1818
+ },
+ {
+ "epoch": 1.8195117376762213,
+ "grad_norm": 0.5373227596282959,
+ "learning_rate": 6.7032172523236e-05,
+ "loss": 1.0396,
+ "step": 1819
+ },
+ {
+ "epoch": 1.8205120190053452,
+ "grad_norm": 0.49561604857444763,
+ "learning_rate": 6.693322646660906e-05,
+ "loss": 0.9774,
+ "step": 1820
+ },
+ {
+ "epoch": 1.821512300334469,
+ "grad_norm": 0.47309985756874084,
+ "learning_rate": 6.683431674438612e-05,
+ "loss": 0.8266,
+ "step": 1821
+ },
+ {
+ "epoch": 1.822512581663593,
+ "grad_norm": 0.5706244707107544,
+ "learning_rate": 6.673544346525107e-05,
+ "loss": 1.027,
+ "step": 1822
+ },
+ {
+ "epoch": 1.8235128629927169,
+ "grad_norm": 0.5383077263832092,
+ "learning_rate": 6.663660673784777e-05,
+ "loss": 1.0545,
+ "step": 1823
+ },
+ {
+ "epoch": 1.8245131443218405,
+ "grad_norm": 0.5760438442230225,
+ "learning_rate": 6.653780667077985e-05,
+ "loss": 0.8955,
+ "step": 1824
+ },
+ {
+ "epoch": 1.8255134256509642,
+ "grad_norm": 0.45533323287963867,
+ "learning_rate": 6.643904337261082e-05,
+ "loss": 0.9149,
+ "step": 1825
+ },
+ {
+ "epoch": 1.826513706980088,
+ "grad_norm": 0.43935853242874146,
+ "learning_rate": 6.634031695186362e-05,
+ "loss": 0.8231,
+ "step": 1826
+ },
+ {
+ "epoch": 1.827513988309212,
+ "grad_norm": 0.4752298593521118,
+ "learning_rate": 6.624162751702076e-05,
+ "loss": 0.7823,
+ "step": 1827
+ },
+ {
+ "epoch": 1.8285142696383359,
+ "grad_norm": 0.5012879371643066,
+ "learning_rate": 6.614297517652409e-05,
+ "loss": 0.9586,
+ "step": 1828
+ },
+ {
+ "epoch": 1.8295145509674597,
+ "grad_norm": 0.4421415328979492,
+ "learning_rate": 6.604436003877464e-05,
+ "loss": 0.7812,
+ "step": 1829
+ },
+ {
+ "epoch": 1.8305148322965834,
+ "grad_norm": 0.5347071290016174,
+ "learning_rate": 6.594578221213265e-05,
+ "loss": 0.8906,
+ "step": 1830
+ },
+ {
+ "epoch": 1.831515113625707,
+ "grad_norm": 0.5177352428436279,
+ "learning_rate": 6.58472418049173e-05,
+ "loss": 1.0046,
+ "step": 1831
+ },
+ {
+ "epoch": 1.832515394954831,
+ "grad_norm": 0.5403003096580505,
+ "learning_rate": 6.574873892540671e-05,
+ "loss": 0.9588,
+ "step": 1832
+ },
+ {
+ "epoch": 1.8335156762839548,
+ "grad_norm": 0.5138882994651794,
+ "learning_rate": 6.565027368183769e-05,
+ "loss": 0.9824,
+ "step": 1833
+ },
+ {
+ "epoch": 1.8345159576130787,
+ "grad_norm": 0.4976009726524353,
+ "learning_rate": 6.555184618240577e-05,
+ "loss": 0.9454,
+ "step": 1834
+ },
+ {
+ "epoch": 1.8355162389422026,
+ "grad_norm": 0.5282961130142212,
+ "learning_rate": 6.545345653526495e-05,
+ "loss": 1.0134,
+ "step": 1835
+ },
+ {
+ "epoch": 1.8365165202713263,
+ "grad_norm": 0.47592097520828247,
+ "learning_rate": 6.535510484852767e-05,
+ "loss": 0.9658,
+ "step": 1836
+ },
+ {
+ "epoch": 1.8375168016004502,
+ "grad_norm": 0.5012205839157104,
+ "learning_rate": 6.525679123026463e-05,
+ "loss": 0.8937,
+ "step": 1837
+ },
+ {
+ "epoch": 1.8385170829295738,
+ "grad_norm": 0.47777363657951355,
+ "learning_rate": 6.515851578850474e-05,
+ "loss": 0.9862,
+ "step": 1838
+ },
+ {
+ "epoch": 1.8395173642586977,
+ "grad_norm": 0.4610724449157715,
+ "learning_rate": 6.506027863123492e-05,
+ "loss": 0.9208,
+ "step": 1839
+ },
+ {
+ "epoch": 1.8405176455878216,
+ "grad_norm": 0.5747025609016418,
+ "learning_rate": 6.496207986640004e-05,
+ "loss": 0.9366,
+ "step": 1840
+ },
+ {
+ "epoch": 1.8415179269169455,
+ "grad_norm": 0.48486635088920593,
+ "learning_rate": 6.48639196019028e-05,
+ "loss": 0.7989,
+ "step": 1841
+ },
+ {
+ "epoch": 1.8425182082460692,
+ "grad_norm": 0.4930958151817322,
+ "learning_rate": 6.476579794560356e-05,
+ "loss": 0.7846,
+ "step": 1842
+ },
+ {
+ "epoch": 1.843518489575193,
+ "grad_norm": 0.5363168120384216,
+ "learning_rate": 6.46677150053203e-05,
+ "loss": 0.9519,
+ "step": 1843
+ },
+ {
+ "epoch": 1.8445187709043167,
+ "grad_norm": 0.39670878648757935,
+ "learning_rate": 6.45696708888284e-05,
+ "loss": 0.8245,
+ "step": 1844
+ },
+ {
+ "epoch": 1.8455190522334406,
+ "grad_norm": 0.5151652693748474,
+ "learning_rate": 6.447166570386063e-05,
+ "loss": 0.9517,
+ "step": 1845
+ },
+ {
+ "epoch": 1.8465193335625645,
+ "grad_norm": 0.47063514590263367,
+ "learning_rate": 6.437369955810699e-05,
+ "loss": 0.8235,
+ "step": 1846
+ },
+ {
+ "epoch": 1.8475196148916884,
+ "grad_norm": 0.5120642185211182,
+ "learning_rate": 6.42757725592145e-05,
+ "loss": 1.1862,
+ "step": 1847
+ },
+ {
+ "epoch": 1.8485198962208123,
+ "grad_norm": 0.491312175989151,
+ "learning_rate": 6.417788481478728e-05,
+ "loss": 0.8375,
+ "step": 1848
+ },
+ {
+ "epoch": 1.849520177549936,
+ "grad_norm": 0.5049518346786499,
+ "learning_rate": 6.40800364323862e-05,
+ "loss": 0.8419,
+ "step": 1849
+ },
+ {
+ "epoch": 1.8505204588790596,
+ "grad_norm": 0.4442373514175415,
+ "learning_rate": 6.398222751952899e-05,
+ "loss": 0.8519,
+ "step": 1850
+ },
+ {
+ "epoch": 1.8515207402081835,
+ "grad_norm": 0.5219951868057251,
+ "learning_rate": 6.388445818368991e-05,
+ "loss": 0.8146,
+ "step": 1851
+ },
+ {
+ "epoch": 1.8525210215373074,
+ "grad_norm": 0.5035893321037292,
+ "learning_rate": 6.378672853229981e-05,
+ "loss": 0.908,
+ "step": 1852
+ },
+ {
+ "epoch": 1.8535213028664312,
+ "grad_norm": 0.4742502272129059,
+ "learning_rate": 6.368903867274585e-05,
+ "loss": 0.9503,
+ "step": 1853
+ },
+ {
+ "epoch": 1.8545215841955551,
+ "grad_norm": 0.504763126373291,
+ "learning_rate": 6.35913887123716e-05,
+ "loss": 0.8487,
+ "step": 1854
+ },
+ {
+ "epoch": 1.8555218655246788,
+ "grad_norm": 0.5125763416290283,
+ "learning_rate": 6.34937787584767e-05,
+ "loss": 0.9596,
+ "step": 1855
+ },
+ {
+ "epoch": 1.8565221468538027,
+ "grad_norm": 1.090164065361023,
+ "learning_rate": 6.339620891831678e-05,
+ "loss": 0.8088,
+ "step": 1856
+ },
+ {
+ "epoch": 1.8575224281829263,
+ "grad_norm": 0.4670305550098419,
+ "learning_rate": 6.329867929910347e-05,
+ "loss": 0.9748,
+ "step": 1857
+ },
+ {
+ "epoch": 1.8585227095120502,
+ "grad_norm": 0.49796226620674133,
+ "learning_rate": 6.32011900080042e-05,
+ "loss": 0.7566,
+ "step": 1858
+ },
+ {
+ "epoch": 1.8595229908411741,
+ "grad_norm": 0.5040385723114014,
+ "learning_rate": 6.310374115214204e-05,
+ "loss": 0.8959,
+ "step": 1859
+ },
+ {
+ "epoch": 1.860523272170298,
+ "grad_norm": 0.5290741920471191,
+ "learning_rate": 6.30063328385957e-05,
+ "loss": 0.9035,
+ "step": 1860
+ },
+ {
+ "epoch": 1.8615235534994217,
+ "grad_norm": 0.5860772728919983,
+ "learning_rate": 6.290896517439925e-05,
+ "loss": 1.0858,
+ "step": 1861
+ },
+ {
+ "epoch": 1.8625238348285456,
+ "grad_norm": 0.4714392423629761,
+ "learning_rate": 6.281163826654218e-05,
+ "loss": 0.9652,
+ "step": 1862
+ },
+ {
+ "epoch": 1.8635241161576692,
+ "grad_norm": 0.4995323717594147,
+ "learning_rate": 6.271435222196916e-05,
+ "loss": 0.9808,
+ "step": 1863
+ },
+ {
+ "epoch": 1.864524397486793,
+ "grad_norm": 0.5379069447517395,
+ "learning_rate": 6.261710714757994e-05,
+ "loss": 0.9893,
+ "step": 1864
+ },
+ {
+ "epoch": 1.865524678815917,
+ "grad_norm": 0.5350576043128967,
+ "learning_rate": 6.251990315022927e-05,
+ "loss": 1.1355,
+ "step": 1865
+ },
+ {
+ "epoch": 1.8665249601450409,
+ "grad_norm": 0.5047613382339478,
+ "learning_rate": 6.24227403367268e-05,
+ "loss": 1.0258,
+ "step": 1866
+ },
+ {
+ "epoch": 1.8675252414741648,
+ "grad_norm": 0.5068250894546509,
+ "learning_rate": 6.232561881383687e-05,
+ "loss": 1.0832,
+ "step": 1867
+ },
+ {
+ "epoch": 1.8685255228032884,
+ "grad_norm": 0.5315554738044739,
+ "learning_rate": 6.222853868827839e-05,
+ "loss": 0.8883,
+ "step": 1868
+ },
+ {
+ "epoch": 1.869525804132412,
+ "grad_norm": 0.47088900208473206,
+ "learning_rate": 6.213150006672499e-05,
+ "loss": 1.0515,
+ "step": 1869
+ },
+ {
+ "epoch": 1.870526085461536,
+ "grad_norm": 0.450911283493042,
+ "learning_rate": 6.20345030558045e-05,
+ "loss": 0.8933,
+ "step": 1870
+ },
+ {
+ "epoch": 1.8715263667906599,
+ "grad_norm": 0.5612505674362183,
+ "learning_rate": 6.193754776209911e-05,
+ "loss": 0.8822,
+ "step": 1871
+ },
+ {
+ "epoch": 1.8725266481197838,
+ "grad_norm": 0.6027489900588989,
+ "learning_rate": 6.184063429214515e-05,
+ "loss": 0.849,
+ "step": 1872
+ },
+ {
+ "epoch": 1.8735269294489076,
+ "grad_norm": 0.5527409315109253,
+ "learning_rate": 6.174376275243299e-05,
+ "loss": 0.9841,
+ "step": 1873
+ },
+ {
+ "epoch": 1.8745272107780313,
+ "grad_norm": 0.4340353310108185,
+ "learning_rate": 6.164693324940694e-05,
+ "loss": 0.7171,
+ "step": 1874
+ },
+ {
+ "epoch": 1.8755274921071552,
+ "grad_norm": 0.46394628286361694,
+ "learning_rate": 6.15501458894651e-05,
+ "loss": 0.8439,
+ "step": 1875
+ },
+ {
+ "epoch": 1.8765277734362789,
+ "grad_norm": 0.46649280190467834,
+ "learning_rate": 6.145340077895929e-05,
+ "loss": 0.9665,
+ "step": 1876
+ },
+ {
+ "epoch": 1.8775280547654027,
+ "grad_norm": 0.45841577649116516,
+ "learning_rate": 6.135669802419488e-05,
+ "loss": 0.8537,
+ "step": 1877
+ },
+ {
+ "epoch": 1.8785283360945266,
+ "grad_norm": 0.46763482689857483,
+ "learning_rate": 6.126003773143072e-05,
+ "loss": 0.8314,
+ "step": 1878
+ },
+ {
+ "epoch": 1.8795286174236505,
+ "grad_norm": 0.47747811675071716,
+ "learning_rate": 6.116342000687896e-05,
+ "loss": 0.9612,
+ "step": 1879
+ },
+ {
+ "epoch": 1.8805288987527742,
+ "grad_norm": 0.5145304203033447,
+ "learning_rate": 6.106684495670506e-05,
+ "loss": 0.9628,
+ "step": 1880
+ },
+ {
+ "epoch": 1.881529180081898,
+ "grad_norm": 0.4443700313568115,
+ "learning_rate": 6.097031268702746e-05,
+ "loss": 0.7482,
+ "step": 1881
+ },
+ {
+ "epoch": 1.8825294614110217,
+ "grad_norm": 0.4143758714199066,
+ "learning_rate": 6.087382330391774e-05,
+ "loss": 0.6993,
+ "step": 1882
+ },
+ {
+ "epoch": 1.8835297427401456,
+ "grad_norm": 0.5006669759750366,
+ "learning_rate": 6.077737691340023e-05,
+ "loss": 0.8514,
+ "step": 1883
+ },
+ {
+ "epoch": 1.8845300240692695,
+ "grad_norm": 0.48067471385002136,
+ "learning_rate": 6.0680973621452105e-05,
+ "loss": 0.8319,
+ "step": 1884
+ },
+ {
+ "epoch": 1.8855303053983934,
+ "grad_norm": 0.47147560119628906,
+ "learning_rate": 6.0584613534003144e-05,
+ "loss": 0.9822,
+ "step": 1885
+ },
+ {
+ "epoch": 1.8865305867275173,
+ "grad_norm": 0.48229023814201355,
+ "learning_rate": 6.0488296756935636e-05,
+ "loss": 0.8972,
+ "step": 1886
+ },
+ {
+ "epoch": 1.887530868056641,
+ "grad_norm": 0.487932026386261,
+ "learning_rate": 6.039202339608432e-05,
+ "loss": 0.8976,
+ "step": 1887
+ },
+ {
+ "epoch": 1.8885311493857646,
+ "grad_norm": 0.49332642555236816,
+ "learning_rate": 6.0295793557236203e-05,
+ "loss": 0.8734,
+ "step": 1888
+ },
+ {
+ "epoch": 1.8895314307148885,
+ "grad_norm": 0.4834604263305664,
+ "learning_rate": 6.019960734613047e-05,
+ "loss": 0.8414,
+ "step": 1889
+ },
+ {
+ "epoch": 1.8905317120440124,
+ "grad_norm": 0.46540340781211853,
+ "learning_rate": 6.010346486845837e-05,
+ "loss": 0.848,
+ "step": 1890
+ },
+ {
+ "epoch": 1.8915319933731363,
+ "grad_norm": 0.4350258409976959,
+ "learning_rate": 6.0007366229863117e-05,
+ "loss": 0.8143,
+ "step": 1891
+ },
+ {
+ "epoch": 1.8925322747022602,
+ "grad_norm": 0.4675842821598053,
+ "learning_rate": 5.991131153593971e-05,
+ "loss": 0.8136,
+ "step": 1892
+ },
+ {
+ "epoch": 1.8935325560313838,
+ "grad_norm": 0.560526430606842,
+ "learning_rate": 5.981530089223489e-05,
+ "loss": 1.097,
+ "step": 1893
+ },
+ {
+ "epoch": 1.8945328373605075,
+ "grad_norm": 0.48588961362838745,
+ "learning_rate": 5.971933440424703e-05,
+ "loss": 0.8046,
+ "step": 1894
+ },
+ {
+ "epoch": 1.8955331186896314,
+ "grad_norm": 0.47677376866340637,
+ "learning_rate": 5.9623412177425886e-05,
+ "loss": 0.8202,
+ "step": 1895
+ },
+ {
+ "epoch": 1.8965334000187553,
+ "grad_norm": 0.49400967359542847,
+ "learning_rate": 5.952753431717268e-05,
+ "loss": 0.8114,
+ "step": 1896
+ },
+ {
+ "epoch": 1.8975336813478791,
+ "grad_norm": 0.4729720652103424,
+ "learning_rate": 5.9431700928839805e-05,
+ "loss": 0.7848,
+ "step": 1897
+ },
+ {
+ "epoch": 1.898533962677003,
+ "grad_norm": 0.4910169541835785,
+ "learning_rate": 5.933591211773082e-05,
+ "loss": 0.8424,
+ "step": 1898
+ },
+ {
+ "epoch": 1.8995342440061267,
+ "grad_norm": 0.4618901014328003,
+ "learning_rate": 5.924016798910037e-05,
+ "loss": 0.8423,
+ "step": 1899
+ },
+ {
+ "epoch": 1.9005345253352506,
+ "grad_norm": 0.562754213809967,
+ "learning_rate": 5.914446864815388e-05,
+ "loss": 0.8016,
+ "step": 1900
+ },
+ {
+ "epoch": 1.9015348066643742,
+ "grad_norm": 0.479568749666214,
+ "learning_rate": 5.9048814200047675e-05,
+ "loss": 0.9471,
+ "step": 1901
+ },
+ {
+ "epoch": 1.9025350879934981,
+ "grad_norm": 0.5435795187950134,
+ "learning_rate": 5.895320474988864e-05,
+ "loss": 0.94,
+ "step": 1902
+ },
+ {
+ "epoch": 1.903535369322622,
+ "grad_norm": 0.711804211139679,
+ "learning_rate": 5.885764040273426e-05,
+ "loss": 0.9192,
+ "step": 1903
+ },
+ {
+ "epoch": 1.904535650651746,
+ "grad_norm": 0.49941113591194153,
+ "learning_rate": 5.876212126359251e-05,
+ "loss": 0.8541,
+ "step": 1904
+ },
+ {
+ "epoch": 1.9055359319808696,
+ "grad_norm": 0.4437618553638458,
+ "learning_rate": 5.866664743742162e-05,
+ "loss": 0.935,
+ "step": 1905
+ },
+ {
+ "epoch": 1.9065362133099935,
+ "grad_norm": 0.4949079751968384,
+ "learning_rate": 5.857121902913008e-05,
+ "loss": 0.965,
+ "step": 1906
+ },
+ {
+ "epoch": 1.9075364946391171,
+ "grad_norm": 0.5047593712806702,
+ "learning_rate": 5.8475836143576433e-05,
+ "loss": 0.9078,
+ "step": 1907
+ },
+ {
+ "epoch": 1.908536775968241,
+ "grad_norm": 0.4645143449306488,
+ "learning_rate": 5.838049888556925e-05,
+ "loss": 0.7848,
+ "step": 1908
+ },
+ {
+ "epoch": 1.909537057297365,
+ "grad_norm": 0.45980706810951233,
+ "learning_rate": 5.8285207359866936e-05,
+ "loss": 0.8297,
+ "step": 1909
+ },
+ {
+ "epoch": 1.9105373386264888,
+ "grad_norm": 0.566573441028595,
+ "learning_rate": 5.8189961671177574e-05,
+ "loss": 1.0823,
+ "step": 1910
+ },
+ {
+ "epoch": 1.9115376199556127,
+ "grad_norm": 0.46022629737854004,
+ "learning_rate": 5.809476192415905e-05,
+ "loss": 0.8634,
+ "step": 1911
+ },
+ {
+ "epoch": 1.9125379012847363,
+ "grad_norm": 0.493632048368454,
+ "learning_rate": 5.7999608223418534e-05,
+ "loss": 0.7264,
+ "step": 1912
+ },
+ {
+ "epoch": 1.91353818261386,
+ "grad_norm": 0.4561927616596222,
+ "learning_rate": 5.790450067351291e-05,
+ "loss": 0.8736,
+ "step": 1913
+ },
+ {
+ "epoch": 1.9145384639429839,
+ "grad_norm": 0.5217312574386597,
+ "learning_rate": 5.780943937894805e-05,
+ "loss": 0.9918,
+ "step": 1914
+ },
+ {
+ "epoch": 1.9155387452721078,
+ "grad_norm": 0.500164806842804,
+ "learning_rate": 5.771442444417918e-05,
+ "loss": 0.784,
+ "step": 1915
+ },
+ {
+ "epoch": 1.9165390266012317,
+ "grad_norm": 0.4723392724990845,
+ "learning_rate": 5.761945597361054e-05,
+ "loss": 0.9225,
+ "step": 1916
+ },
+ {
+ "epoch": 1.9175393079303555,
+ "grad_norm": 0.4722166359424591,
+ "learning_rate": 5.752453407159522e-05,
+ "loss": 0.8516,
+ "step": 1917
+ },
+ {
+ "epoch": 1.9185395892594792,
+ "grad_norm": 0.4163341522216797,
+ "learning_rate": 5.742965884243532e-05,
+ "loss": 0.7709,
+ "step": 1918
+ },
+ {
+ "epoch": 1.919539870588603,
+ "grad_norm": 0.5236088037490845,
+ "learning_rate": 5.733483039038149e-05,
+ "loss": 0.9662,
+ "step": 1919
+ },
+ {
+ "epoch": 1.9205401519177268,
+ "grad_norm": 0.5264710783958435,
+ "learning_rate": 5.724004881963311e-05,
+ "loss": 0.8918,
+ "step": 1920
+ },
+ {
+ "epoch": 1.9215404332468506,
+ "grad_norm": 0.43993479013442993,
+ "learning_rate": 5.714531423433791e-05,
+ "loss": 0.9233,
+ "step": 1921
+ },
+ {
+ "epoch": 1.9225407145759745,
+ "grad_norm": 0.4552697241306305,
+ "learning_rate": 5.705062673859216e-05,
+ "loss": 0.8593,
+ "step": 1922
+ },
+ {
+ "epoch": 1.9235409959050984,
+ "grad_norm": 0.5186688899993896,
+ "learning_rate": 5.69559864364402e-05,
+ "loss": 0.906,
+ "step": 1923
+ },
+ {
+ "epoch": 1.924541277234222,
+ "grad_norm": 0.5140933990478516,
+ "learning_rate": 5.6861393431874675e-05,
+ "loss": 1.0488,
+ "step": 1924
+ },
+ {
+ "epoch": 1.925541558563346,
+ "grad_norm": 0.4874193072319031,
+ "learning_rate": 5.676684782883615e-05,
+ "loss": 0.8875,
+ "step": 1925
+ },
+ {
+ "epoch": 1.9265418398924696,
+ "grad_norm": 0.5220529437065125,
+ "learning_rate": 5.667234973121317e-05,
+ "loss": 0.8561,
+ "step": 1926
+ },
+ {
+ "epoch": 1.9275421212215935,
+ "grad_norm": 0.43269822001457214,
+ "learning_rate": 5.6577899242842025e-05,
+ "loss": 0.9039,
+ "step": 1927
+ },
+ {
+ "epoch": 1.9285424025507174,
+ "grad_norm": 0.5126697421073914,
+ "learning_rate": 5.648349646750673e-05,
+ "loss": 0.941,
+ "step": 1928
+ },
+ {
+ "epoch": 1.9295426838798413,
+ "grad_norm": 0.5042800307273865,
+ "learning_rate": 5.6389141508938903e-05,
+ "loss": 0.9901,
+ "step": 1929
+ },
+ {
+ "epoch": 1.9305429652089652,
+ "grad_norm": 0.5381462574005127,
+ "learning_rate": 5.629483447081751e-05,
+ "loss": 0.9661,
+ "step": 1930
+ },
+ {
+ "epoch": 1.9315432465380888,
+ "grad_norm": 0.5455595254898071,
+ "learning_rate": 5.620057545676901e-05,
+ "loss": 0.8618,
+ "step": 1931
+ },
+ {
+ "epoch": 1.9325435278672125,
+ "grad_norm": 0.45534226298332214,
+ "learning_rate": 5.610636457036693e-05,
+ "loss": 0.827,
+ "step": 1932
+ },
+ {
+ "epoch": 1.9335438091963364,
+ "grad_norm": 0.4841485917568207,
+ "learning_rate": 5.601220191513208e-05,
+ "loss": 0.8979,
+ "step": 1933
+ },
+ {
+ "epoch": 1.9345440905254603,
+ "grad_norm": 0.4711385667324066,
+ "learning_rate": 5.591808759453214e-05,
+ "loss": 0.9841,
+ "step": 1934
+ },
+ {
+ "epoch": 1.9355443718545842,
+ "grad_norm": 0.44583311676979065,
+ "learning_rate": 5.5824021711981686e-05,
+ "loss": 0.7455,
+ "step": 1935
+ },
+ {
+ "epoch": 1.936544653183708,
+ "grad_norm": 0.5186678171157837,
+ "learning_rate": 5.573000437084221e-05,
+ "loss": 0.8881,
+ "step": 1936
+ },
+ {
+ "epoch": 1.9375449345128317,
+ "grad_norm": 0.5111430287361145,
+ "learning_rate": 5.563603567442168e-05,
+ "loss": 0.97,
+ "step": 1937
+ },
+ {
+ "epoch": 1.9385452158419556,
+ "grad_norm": 0.5185840129852295,
+ "learning_rate": 5.554211572597477e-05,
+ "loss": 0.9864,
+ "step": 1938
+ },
+ {
+ "epoch": 1.9395454971710793,
+ "grad_norm": 0.590919554233551,
+ "learning_rate": 5.544824462870244e-05,
+ "loss": 0.9917,
+ "step": 1939
+ },
+ {
+ "epoch": 1.9405457785002032,
+ "grad_norm": 0.5174764394760132,
+ "learning_rate": 5.5354422485752125e-05,
+ "loss": 0.91,
+ "step": 1940
+ },
+ {
+ "epoch": 1.941546059829327,
+ "grad_norm": 0.4679591953754425,
+ "learning_rate": 5.5260649400217326e-05,
+ "loss": 0.8743,
+ "step": 1941
+ },
+ {
+ "epoch": 1.942546341158451,
+ "grad_norm": 0.5026495456695557,
+ "learning_rate": 5.5166925475137735e-05,
+ "loss": 0.9652,
+ "step": 1942
+ },
+ {
+ "epoch": 1.9435466224875746,
+ "grad_norm": 0.48180490732192993,
+ "learning_rate": 5.507325081349903e-05,
+ "loss": 0.9213,
+ "step": 1943
+ },
+ {
+ "epoch": 1.9445469038166985,
+ "grad_norm": 0.467143714427948,
+ "learning_rate": 5.497962551823266e-05,
+ "loss": 0.901,
+ "step": 1944
+ },
+ {
+ "epoch": 1.9455471851458221,
+ "grad_norm": 0.4535980820655823,
+ "learning_rate": 5.488604969221597e-05,
+ "loss": 0.8284,
+ "step": 1945
+ },
+ {
+ "epoch": 1.946547466474946,
+ "grad_norm": 0.5203812122344971,
+ "learning_rate": 5.479252343827178e-05,
+ "loss": 0.8001,
+ "step": 1946
+ },
+ {
+ "epoch": 1.94754774780407,
+ "grad_norm": 0.4892285168170929,
+ "learning_rate": 5.469904685916861e-05,
+ "loss": 0.7415,
+ "step": 1947
+ },
+ {
+ "epoch": 1.9485480291331938,
+ "grad_norm": 0.5130967497825623,
+ "learning_rate": 5.460562005762024e-05,
+ "loss": 0.9661,
+ "step": 1948
+ },
+ {
+ "epoch": 1.9495483104623177,
+ "grad_norm": 0.47101548314094543,
+ "learning_rate": 5.4512243136285915e-05,
+ "loss": 0.85,
+ "step": 1949
+ },
+ {
+ "epoch": 1.9505485917914414,
+ "grad_norm": 0.4335457384586334,
+ "learning_rate": 5.441891619776987e-05,
+ "loss": 0.8406,
+ "step": 1950
+ },
+ {
+ "epoch": 1.951548873120565,
+ "grad_norm": 0.45771148800849915,
+ "learning_rate": 5.432563934462166e-05,
+ "loss": 0.9252,
+ "step": 1951
+ },
+ {
+ "epoch": 1.952549154449689,
+ "grad_norm": 0.5619480013847351,
+ "learning_rate": 5.423241267933557e-05,
+ "loss": 0.844,
+ "step": 1952
+ },
+ {
+ "epoch": 1.9535494357788128,
+ "grad_norm": 0.4204142391681671,
+ "learning_rate": 5.4139236304350935e-05,
+ "loss": 0.8829,
+ "step": 1953
+ },
+ {
+ "epoch": 1.9545497171079367,
+ "grad_norm": 0.4862264394760132,
+ "learning_rate": 5.404611032205169e-05,
+ "loss": 0.9882,
+ "step": 1954
+ },
+ {
+ "epoch": 1.9555499984370606,
+ "grad_norm": 0.46490079164505005,
+ "learning_rate": 5.3953034834766416e-05,
+ "loss": 0.978,
+ "step": 1955
+ },
+ {
+ "epoch": 1.9565502797661842,
+ "grad_norm": 0.5944529175758362,
+ "learning_rate": 5.386000994476832e-05,
+ "loss": 0.8706,
+ "step": 1956
+ },
+ {
+ "epoch": 1.957550561095308,
+ "grad_norm": 0.5310636162757874,
+ "learning_rate": 5.376703575427481e-05,
+ "loss": 0.9472,
+ "step": 1957
+ },
+ {
+ "epoch": 1.9585508424244318,
+ "grad_norm": 0.49689510464668274,
+ "learning_rate": 5.367411236544786e-05,
+ "loss": 1.0081,
+ "step": 1958
+ },
+ {
+ "epoch": 1.9595511237535557,
+ "grad_norm": 0.5658974647521973,
+ "learning_rate": 5.3581239880393375e-05,
+ "loss": 1.0419,
+ "step": 1959
+ },
+ {
+ "epoch": 1.9605514050826796,
+ "grad_norm": 0.4068913757801056,
+ "learning_rate": 5.3488418401161475e-05,
+ "loss": 0.7635,
+ "step": 1960
+ },
+ {
+ "epoch": 1.9615516864118034,
+ "grad_norm": 0.6318438053131104,
+ "learning_rate": 5.339564802974615e-05,
+ "loss": 0.8508,
+ "step": 1961
+ },
+ {
+ "epoch": 1.962551967740927,
+ "grad_norm": 0.5346115827560425,
+ "learning_rate": 5.33029288680852e-05,
+ "loss": 0.8885,
+ "step": 1962
+ },
+ {
+ "epoch": 1.963552249070051,
+ "grad_norm": 0.4659571051597595,
+ "learning_rate": 5.321026101806032e-05,
+ "loss": 0.8957,
+ "step": 1963
+ },
+ {
+ "epoch": 1.9645525303991747,
+ "grad_norm": 0.502803385257721,
+ "learning_rate": 5.311764458149664e-05,
+ "loss": 0.8807,
+ "step": 1964
+ },
+ {
+ "epoch": 1.9655528117282985,
+ "grad_norm": 0.482771098613739,
+ "learning_rate": 5.302507966016295e-05,
+ "loss": 0.9404,
+ "step": 1965
+ },
+ {
+ "epoch": 1.9665530930574224,
+ "grad_norm": 0.48509371280670166,
+ "learning_rate": 5.293256635577126e-05,
+ "loss": 0.8903,
+ "step": 1966
+ },
+ {
+ "epoch": 1.9675533743865463,
+ "grad_norm": 0.5044885873794556,
+ "learning_rate": 5.284010476997705e-05,
+ "loss": 0.8193,
+ "step": 1967
+ },
+ {
+ "epoch": 1.96855365571567,
+ "grad_norm": 0.5143056511878967,
+ "learning_rate": 5.274769500437882e-05,
+ "loss": 0.9903,
+ "step": 1968
+ },
+ {
+ "epoch": 1.9695539370447939,
+ "grad_norm": 0.4803191125392914,
+ "learning_rate": 5.265533716051825e-05,
+ "loss": 0.815,
+ "step": 1969
+ },
+ {
+ "epoch": 1.9705542183739175,
+ "grad_norm": 0.4977998435497284,
+ "learning_rate": 5.256303133987982e-05,
+ "loss": 0.8749,
+ "step": 1970
+ },
+ {
+ "epoch": 1.9715544997030414,
+ "grad_norm": 0.4953812062740326,
+ "learning_rate": 5.247077764389099e-05,
+ "loss": 0.8228,
+ "step": 1971
+ },
+ {
+ "epoch": 1.9725547810321653,
+ "grad_norm": 0.4795776903629303,
+ "learning_rate": 5.2378576173921934e-05,
+ "loss": 0.8692,
+ "step": 1972
+ },
+ {
+ "epoch": 1.9735550623612892,
+ "grad_norm": 0.6318855881690979,
+ "learning_rate": 5.22864270312853e-05,
+ "loss": 1.0387,
+ "step": 1973
+ },
+ {
+ "epoch": 1.974555343690413,
+ "grad_norm": 0.4658355116844177,
+ "learning_rate": 5.219433031723641e-05,
+ "loss": 0.7585,
+ "step": 1974
+ },
+ {
+ "epoch": 1.9755556250195367,
+ "grad_norm": 0.41826239228248596,
+ "learning_rate": 5.210228613297281e-05,
+ "loss": 0.7991,
+ "step": 1975
+ },
+ {
+ "epoch": 1.9765559063486604,
+ "grad_norm": 0.4662337005138397,
+ "learning_rate": 5.201029457963451e-05,
+ "loss": 0.9127,
+ "step": 1976
+ },
+ {
+ "epoch": 1.9775561876777843,
+ "grad_norm": 0.4976811408996582,
+ "learning_rate": 5.191835575830352e-05,
+ "loss": 1.104,
+ "step": 1977
+ },
+ {
+ "epoch": 1.9785564690069082,
+ "grad_norm": 0.5814425945281982,
+ "learning_rate": 5.1826469770004026e-05,
+ "loss": 0.9479,
+ "step": 1978
+ },
+ {
+ "epoch": 1.979556750336032,
+ "grad_norm": 0.5100698471069336,
+ "learning_rate": 5.1734636715702043e-05,
+ "loss": 0.8778,
+ "step": 1979
+ },
+ {
+ "epoch": 1.980557031665156,
+ "grad_norm": 0.5200473070144653,
+ "learning_rate": 5.1642856696305575e-05,
+ "loss": 0.9684,
+ "step": 1980
+ },
+ {
+ "epoch": 1.9815573129942796,
+ "grad_norm": 0.728875458240509,
+ "learning_rate": 5.155112981266422e-05,
+ "loss": 1.1052,
+ "step": 1981
+ },
+ {
+ "epoch": 1.9825575943234035,
+ "grad_norm": 0.504478931427002,
+ "learning_rate": 5.145945616556921e-05,
+ "loss": 0.9089,
+ "step": 1982
+ },
+ {
+ "epoch": 1.9835578756525272,
+ "grad_norm": 0.48226889967918396,
+ "learning_rate": 5.136783585575336e-05,
+ "loss": 0.8765,
+ "step": 1983
+ },
+ {
+ "epoch": 1.984558156981651,
+ "grad_norm": 0.47124215960502625,
+ "learning_rate": 5.127626898389075e-05,
+ "loss": 0.8909,
+ "step": 1984
+ },
+ {
+ "epoch": 1.985558438310775,
+ "grad_norm": 0.4892251491546631,
+ "learning_rate": 5.118475565059691e-05,
+ "loss": 0.7596,
+ "step": 1985
+ },
+ {
+ "epoch": 1.9865587196398988,
+ "grad_norm": 0.550346851348877,
+ "learning_rate": 5.109329595642829e-05,
+ "loss": 1.0297,
+ "step": 1986
+ },
+ {
+ "epoch": 1.9875590009690225,
+ "grad_norm": 0.5535497069358826,
+ "learning_rate": 5.1001890001882734e-05,
+ "loss": 0.8995,
+ "step": 1987
+ },
+ {
+ "epoch": 1.9885592822981464,
+ "grad_norm": 0.4945215582847595,
+ "learning_rate": 5.091053788739878e-05,
+ "loss": 0.8223,
+ "step": 1988
+ },
+ {
+ "epoch": 1.98955956362727,
+ "grad_norm": 0.46689140796661377,
+ "learning_rate": 5.081923971335582e-05,
+ "loss": 0.7746,
+ "step": 1989
+ },
+ {
+ "epoch": 1.990559844956394,
+ "grad_norm": 0.46213075518608093,
+ "learning_rate": 5.072799558007415e-05,
+ "loss": 0.9981,
+ "step": 1990
+ },
+ {
+ "epoch": 1.9915601262855178,
+ "grad_norm": 0.4265044629573822,
+ "learning_rate": 5.063680558781445e-05,
+ "loss": 0.7414,
+ "step": 1991
+ },
+ {
+ "epoch": 1.9925604076146417,
+ "grad_norm": 0.47252804040908813,
+ "learning_rate": 5.0545669836778144e-05,
+ "loss": 0.9779,
+ "step": 1992
+ },
+ {
+ "epoch": 1.9935606889437656,
+ "grad_norm": 0.49390360713005066,
+ "learning_rate": 5.045458842710684e-05,
+ "loss": 1.047,
+ "step": 1993
+ },
+ {
+ "epoch": 1.9945609702728893,
+ "grad_norm": 0.48533156514167786,
+ "learning_rate": 5.036356145888263e-05,
+ "loss": 0.784,
+ "step": 1994
+ },
+ {
+ "epoch": 1.995561251602013,
+ "grad_norm": 0.4855436086654663,
+ "learning_rate": 5.0272589032127594e-05,
+ "loss": 1.0186,
+ "step": 1995
+ },
+ {
+ "epoch": 1.9965615329311368,
+ "grad_norm": 0.48796966671943665,
+ "learning_rate": 5.0181671246804064e-05,
+ "loss": 0.931,
+ "step": 1996
+ },
+ {
+ "epoch": 1.9975618142602607,
+ "grad_norm": 0.476491242647171,
+ "learning_rate": 5.009080820281415e-05,
+ "loss": 0.7653,
+ "step": 1997
+ },
+ {
+ "epoch": 1.9985620955893846,
+ "grad_norm": 0.48085761070251465,
+ "learning_rate": 5.000000000000002e-05,
+ "loss": 0.7846,
+ "step": 1998
+ },
+ {
+ "epoch": 1.9995623769185085,
+ "grad_norm": 0.454314261674881,
+ "learning_rate": 4.990924673814336e-05,
+ "loss": 0.8582,
+ "step": 1999
+ },
+ {
+ "epoch": 2.0005626582476324,
+ "grad_norm": 0.3911774158477783,
+ "learning_rate": 4.981854851696568e-05,
+ "loss": 0.5936,
+ "step": 2000
+ }
+ ],
+ "logging_steps": 1,
+ "max_steps": 2997,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 3,
+ "save_steps": 500,
+ "stateful_callbacks": {
+ "TrainerControl": {
+ "args": {
+ "should_epoch_stop": false,
+ "should_evaluate": false,
+ "should_log": false,
+ "should_save": true,
+ "should_training_stop": false
+ },
+ "attributes": {}
+ }
+ },
+ "total_flos": 1.2941133201683251e+17,
+ "train_batch_size": 2,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/checkpoint-2000/training_args.bin b/checkpoint-2000/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..2f5db54c03213459099040d14f85829b6aeb0666
--- /dev/null
+++ b/checkpoint-2000/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7cb43d78443117126e44061cb7a0c1f9a5c40f27f7bf1d5cd0232587a4334407
+size 5304
diff --git a/checkpoint-2500/config.json b/checkpoint-2500/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..06ec1df58f28234ccce22a5325e108ece94f0078
--- /dev/null
+++ b/checkpoint-2500/config.json
@@ -0,0 +1,34 @@
+{
+ "_name_or_path": "facebook/nllb-200-3.3B",
+ "activation_dropout": 0.0,
+ "activation_function": "relu",
+ "architectures": [
+ "M2M100ForConditionalGeneration"
+ ],
+ "attention_dropout": 0.1,
+ "bos_token_id": 0,
+ "d_model": 2048,
+ "decoder_attention_heads": 16,
+ "decoder_ffn_dim": 8192,
+ "decoder_layerdrop": 0,
+ "decoder_layers": 24,
+ "decoder_start_token_id": 2,
+ "dropout": 0.1,
+ "encoder_attention_heads": 16,
+ "encoder_ffn_dim": 8192,
+ "encoder_layerdrop": 0,
+ "encoder_layers": 24,
+ "eos_token_id": 2,
+ "init_std": 0.02,
+ "is_encoder_decoder": true,
+ "max_length": 200,
+ "max_position_embeddings": 1024,
+ "model_type": "m2m_100",
+ "num_hidden_layers": 24,
+ "pad_token_id": 1,
+ "scale_embedding": true,
+ "torch_dtype": "float32",
+ "transformers_version": "4.43.2",
+ "use_cache": true,
+ "vocab_size": 256206
+}
diff --git a/checkpoint-2500/generation_config.json b/checkpoint-2500/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..402a1a43d1af8c080466b8139184b4e5b7f3f47c
--- /dev/null
+++ b/checkpoint-2500/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 0,
+ "decoder_start_token_id": 2,
+ "eos_token_id": 2,
+ "max_length": 200,
+ "pad_token_id": 1,
+ "transformers_version": "4.43.2"
+}
diff --git a/checkpoint-2500/model-00001-of-00003.safetensors b/checkpoint-2500/model-00001-of-00003.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..b1dde8e6a6c3814784573e888aa5a2770e5caa25
--- /dev/null
+++ b/checkpoint-2500/model-00001-of-00003.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5c604a38042e22292fb9bfbf587f1ed9e8b00ce9ffb668a2069dc9550f45cdd2
+size 4986088344
diff --git a/checkpoint-2500/model-00002-of-00003.safetensors b/checkpoint-2500/model-00002-of-00003.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..af6fd597d39b28b0d157dd7216a82fb55f008633
--- /dev/null
+++ b/checkpoint-2500/model-00002-of-00003.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7b2fb54898460088053df672aa349694182281b60c60c654f2bc0a572a87f864
+size 4985688360
diff --git a/checkpoint-2500/model-00003-of-00003.safetensors b/checkpoint-2500/model-00003-of-00003.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..7f52b916fdbc2a2cc61948ad64be542ba47d9a31
--- /dev/null
+++ b/checkpoint-2500/model-00003-of-00003.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:be1d528517bcaee1178e60c26105642e5e29029a3201355894fad7b2aaf57959
+size 3407796744
diff --git a/checkpoint-2500/model.safetensors.index.json b/checkpoint-2500/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..66f5db79b23230955de24502c00adc6525edbdfc
--- /dev/null
+++ b/checkpoint-2500/model.safetensors.index.json
@@ -0,0 +1,1020 @@
+{
+ "metadata": {
+ "total_size": 13379452928
+ },
+ "weight_map": {
+ "model.decoder.layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.0.encoder_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.encoder_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.encoder_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.encoder_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.encoder_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.encoder_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.encoder_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.encoder_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.encoder_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.encoder_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.encoder_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.encoder_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.encoder_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.encoder_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.encoder_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.encoder_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.encoder_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.encoder_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.encoder_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.encoder_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.encoder_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.encoder_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.encoder_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.encoder_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.encoder_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.encoder_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.encoder_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.encoder_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.encoder_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.encoder_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.11.encoder_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.11.encoder_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.11.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.11.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.11.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.11.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.11.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.11.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.11.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.11.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.11.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.11.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.12.encoder_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.encoder_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.self_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.self_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.self_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.self_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.encoder_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.encoder_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.self_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.self_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.self_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.self_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.encoder_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.encoder_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.self_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.self_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.self_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.self_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.encoder_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.encoder_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.self_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.self_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.self_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.self_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.encoder_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.encoder_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.self_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.self_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.self_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.self_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.encoder_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.encoder_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.self_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.self_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.self_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.self_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.encoder_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.encoder_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.self_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.self_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.self_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.self_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.encoder_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.encoder_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.self_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.self_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.self_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.self_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.2.encoder_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.encoder_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.encoder_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.encoder_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.encoder_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.encoder_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.encoder_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.encoder_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.encoder_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.encoder_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.20.encoder_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.encoder_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.self_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.self_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.self_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.self_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.encoder_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.encoder_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.self_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.self_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.self_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.self_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.encoder_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.encoder_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.self_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.self_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.self_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.self_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.encoder_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.encoder_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.self_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.self_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.self_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.self_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.3.encoder_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.encoder_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.encoder_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.encoder_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.encoder_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.encoder_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.encoder_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.encoder_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.encoder_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.encoder_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.encoder_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.encoder_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.encoder_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.encoder_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.encoder_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.encoder_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.encoder_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.encoder_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.encoder_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.encoder_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.encoder_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.encoder_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.encoder_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.encoder_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.encoder_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.encoder_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.encoder_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.encoder_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.encoder_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.encoder_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.encoder_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.encoder_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.encoder_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.encoder_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.encoder_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.encoder_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.encoder_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.encoder_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.encoder_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.encoder_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.encoder_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.encoder_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.encoder_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.encoder_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.encoder_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.encoder_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.encoder_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.encoder_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.encoder_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.encoder_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.encoder_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.encoder_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.encoder_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.encoder_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.encoder_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.encoder_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.encoder_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.encoder_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.encoder_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.encoder_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.encoder_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.encoder_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.encoder_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.encoder_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.encoder_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.encoder_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.encoder_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.encoder_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.encoder_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.encoder_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.0.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.14.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.14.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.14.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.14.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.14.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.14.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.14.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.14.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.14.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.14.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.14.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.14.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.14.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.14.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.14.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.14.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.15.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.2.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.20.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.3.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.shared.weight": "model-00001-of-00003.safetensors"
+ }
+}
diff --git a/checkpoint-2500/optimizer.pt b/checkpoint-2500/optimizer.pt
new file mode 100644
index 0000000000000000000000000000000000000000..410562ceae1c1b9370b890333c4cc6f0535d3047
--- /dev/null
+++ b/checkpoint-2500/optimizer.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f3b90030d40004b67548c3e9f2d74560d5fdc38e42574eead175e101ee1b9613
+size 16695613
diff --git a/checkpoint-2500/rng_state.pth b/checkpoint-2500/rng_state.pth
new file mode 100644
index 0000000000000000000000000000000000000000..ae1e67e4161cf916ce4e402a1a8842ae515f15aa
--- /dev/null
+++ b/checkpoint-2500/rng_state.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7a050cacc41c9e392abf1c66fff28f5182eb15091933f76c45b40ae0c90fa7c3
+size 14244
diff --git a/checkpoint-2500/scheduler.pt b/checkpoint-2500/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..dd8e27122a81b9d9d7dc83f626b747d6b7fcdfbe
--- /dev/null
+++ b/checkpoint-2500/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:08e9362e46f924529c72e8a4fcf95c0e125b02b2be6e993db23bcdc91ebaad7c
+size 1064
diff --git a/checkpoint-2500/sentencepiece.bpe.model b/checkpoint-2500/sentencepiece.bpe.model
new file mode 100644
index 0000000000000000000000000000000000000000..dc2262d3e1d375b235eb71c24119c8e73f85d4ad
--- /dev/null
+++ b/checkpoint-2500/sentencepiece.bpe.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:14bb8dfb35c0ffdea7bc01e56cea38b9e3d5efcdcb9c251d6b40538e1aab555a
+size 4852054
diff --git a/checkpoint-2500/special_tokens_map.json b/checkpoint-2500/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..770c6f4e25faf27bbc3878b806f2ecfb88c5169e
--- /dev/null
+++ b/checkpoint-2500/special_tokens_map.json
@@ -0,0 +1,255 @@
+{
+ "additional_special_tokens": [
+ "ace_Arab",
+ "ace_Latn",
+ "acm_Arab",
+ "acq_Arab",
+ "aeb_Arab",
+ "afr_Latn",
+ "ajp_Arab",
+ "aka_Latn",
+ "amh_Ethi",
+ "apc_Arab",
+ "arb_Arab",
+ "ars_Arab",
+ "ary_Arab",
+ "arz_Arab",
+ "asm_Beng",
+ "ast_Latn",
+ "awa_Deva",
+ "ayr_Latn",
+ "azb_Arab",
+ "azj_Latn",
+ "bak_Cyrl",
+ "bam_Latn",
+ "ban_Latn",
+ "bel_Cyrl",
+ "bem_Latn",
+ "ben_Beng",
+ "bho_Deva",
+ "bjn_Arab",
+ "bjn_Latn",
+ "bod_Tibt",
+ "bos_Latn",
+ "bug_Latn",
+ "bul_Cyrl",
+ "cat_Latn",
+ "ceb_Latn",
+ "ces_Latn",
+ "cjk_Latn",
+ "ckb_Arab",
+ "crh_Latn",
+ "cym_Latn",
+ "dan_Latn",
+ "deu_Latn",
+ "dik_Latn",
+ "dyu_Latn",
+ "dzo_Tibt",
+ "ell_Grek",
+ "eng_Latn",
+ "epo_Latn",
+ "est_Latn",
+ "eus_Latn",
+ "ewe_Latn",
+ "fao_Latn",
+ "pes_Arab",
+ "fij_Latn",
+ "fin_Latn",
+ "fon_Latn",
+ "fra_Latn",
+ "fur_Latn",
+ "fuv_Latn",
+ "gla_Latn",
+ "gle_Latn",
+ "glg_Latn",
+ "grn_Latn",
+ "guj_Gujr",
+ "hat_Latn",
+ "hau_Latn",
+ "heb_Hebr",
+ "hin_Deva",
+ "hne_Deva",
+ "hrv_Latn",
+ "hun_Latn",
+ "hye_Armn",
+ "ibo_Latn",
+ "ilo_Latn",
+ "ind_Latn",
+ "isl_Latn",
+ "ita_Latn",
+ "jav_Latn",
+ "jpn_Jpan",
+ "kab_Latn",
+ "kac_Latn",
+ "kam_Latn",
+ "kan_Knda",
+ "kas_Arab",
+ "kas_Deva",
+ "kat_Geor",
+ "knc_Arab",
+ "knc_Latn",
+ "kaz_Cyrl",
+ "kbp_Latn",
+ "kea_Latn",
+ "khm_Khmr",
+ "kik_Latn",
+ "kin_Latn",
+ "kir_Cyrl",
+ "kmb_Latn",
+ "kon_Latn",
+ "kor_Hang",
+ "kmr_Latn",
+ "lao_Laoo",
+ "lvs_Latn",
+ "lij_Latn",
+ "lim_Latn",
+ "lin_Latn",
+ "lit_Latn",
+ "lmo_Latn",
+ "ltg_Latn",
+ "ltz_Latn",
+ "lua_Latn",
+ "lug_Latn",
+ "luo_Latn",
+ "lus_Latn",
+ "mag_Deva",
+ "mai_Deva",
+ "mal_Mlym",
+ "mar_Deva",
+ "min_Latn",
+ "mkd_Cyrl",
+ "plt_Latn",
+ "mlt_Latn",
+ "mni_Beng",
+ "khk_Cyrl",
+ "mos_Latn",
+ "mri_Latn",
+ "zsm_Latn",
+ "mya_Mymr",
+ "nld_Latn",
+ "nno_Latn",
+ "nob_Latn",
+ "npi_Deva",
+ "nso_Latn",
+ "nus_Latn",
+ "nya_Latn",
+ "oci_Latn",
+ "gaz_Latn",
+ "ory_Orya",
+ "pag_Latn",
+ "pan_Guru",
+ "pap_Latn",
+ "pol_Latn",
+ "por_Latn",
+ "prs_Arab",
+ "pbt_Arab",
+ "quy_Latn",
+ "ron_Latn",
+ "run_Latn",
+ "rus_Cyrl",
+ "sag_Latn",
+ "san_Deva",
+ "sat_Beng",
+ "scn_Latn",
+ "shn_Mymr",
+ "sin_Sinh",
+ "slk_Latn",
+ "slv_Latn",
+ "smo_Latn",
+ "sna_Latn",
+ "snd_Arab",
+ "som_Latn",
+ "sot_Latn",
+ "spa_Latn",
+ "als_Latn",
+ "srd_Latn",
+ "srp_Cyrl",
+ "ssw_Latn",
+ "sun_Latn",
+ "swe_Latn",
+ "swh_Latn",
+ "szl_Latn",
+ "tam_Taml",
+ "tat_Cyrl",
+ "tel_Telu",
+ "tgk_Cyrl",
+ "tgl_Latn",
+ "tha_Thai",
+ "tir_Ethi",
+ "taq_Latn",
+ "taq_Tfng",
+ "tpi_Latn",
+ "tsn_Latn",
+ "tso_Latn",
+ "tuk_Latn",
+ "tum_Latn",
+ "tur_Latn",
+ "twi_Latn",
+ "tzm_Tfng",
+ "uig_Arab",
+ "ukr_Cyrl",
+ "umb_Latn",
+ "urd_Arab",
+ "uzn_Latn",
+ "vec_Latn",
+ "vie_Latn",
+ "war_Latn",
+ "wol_Latn",
+ "xho_Latn",
+ "ydd_Hebr",
+ "yor_Latn",
+ "yue_Hant",
+ "zho_Hans",
+ "zho_Hant",
+ "zul_Latn"
+ ],
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "cls_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "mask_token": {
+ "content": "",
+ "lstrip": true,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "sep_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/checkpoint-2500/tokenizer.json b/checkpoint-2500/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..98050e98b98364c06d83b3f41864076220cb8408
--- /dev/null
+++ b/checkpoint-2500/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3b39b25b0763a1dd69dec54081fafcf10770d9f2538a3bd975a0c4be6d60a9c2
+size 17331294
diff --git a/checkpoint-2500/tokenizer_config.json b/checkpoint-2500/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f1424d3657c008568198b44be241646482e7e9f2
--- /dev/null
+++ b/checkpoint-2500/tokenizer_config.json
@@ -0,0 +1,1878 @@
+{
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "3": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256001": {
+ "content": "ace_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256002": {
+ "content": "ace_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256003": {
+ "content": "acm_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256004": {
+ "content": "acq_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256005": {
+ "content": "aeb_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256006": {
+ "content": "afr_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256007": {
+ "content": "ajp_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256008": {
+ "content": "aka_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256009": {
+ "content": "amh_Ethi",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256010": {
+ "content": "apc_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256011": {
+ "content": "arb_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256012": {
+ "content": "ars_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256013": {
+ "content": "ary_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256014": {
+ "content": "arz_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256015": {
+ "content": "asm_Beng",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256016": {
+ "content": "ast_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256017": {
+ "content": "awa_Deva",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256018": {
+ "content": "ayr_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256019": {
+ "content": "azb_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256020": {
+ "content": "azj_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256021": {
+ "content": "bak_Cyrl",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256022": {
+ "content": "bam_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256023": {
+ "content": "ban_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256024": {
+ "content": "bel_Cyrl",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256025": {
+ "content": "bem_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256026": {
+ "content": "ben_Beng",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256027": {
+ "content": "bho_Deva",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256028": {
+ "content": "bjn_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256029": {
+ "content": "bjn_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256030": {
+ "content": "bod_Tibt",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256031": {
+ "content": "bos_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256032": {
+ "content": "bug_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256033": {
+ "content": "bul_Cyrl",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256034": {
+ "content": "cat_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256035": {
+ "content": "ceb_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256036": {
+ "content": "ces_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256037": {
+ "content": "cjk_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256038": {
+ "content": "ckb_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256039": {
+ "content": "crh_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256040": {
+ "content": "cym_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256041": {
+ "content": "dan_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256042": {
+ "content": "deu_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256043": {
+ "content": "dik_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256044": {
+ "content": "dyu_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256045": {
+ "content": "dzo_Tibt",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256046": {
+ "content": "ell_Grek",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256047": {
+ "content": "eng_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256048": {
+ "content": "epo_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256049": {
+ "content": "est_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256050": {
+ "content": "eus_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256051": {
+ "content": "ewe_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256052": {
+ "content": "fao_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256053": {
+ "content": "pes_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256054": {
+ "content": "fij_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256055": {
+ "content": "fin_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256056": {
+ "content": "fon_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256057": {
+ "content": "fra_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256058": {
+ "content": "fur_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256059": {
+ "content": "fuv_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256060": {
+ "content": "gla_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256061": {
+ "content": "gle_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256062": {
+ "content": "glg_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256063": {
+ "content": "grn_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256064": {
+ "content": "guj_Gujr",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256065": {
+ "content": "hat_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256066": {
+ "content": "hau_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256067": {
+ "content": "heb_Hebr",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256068": {
+ "content": "hin_Deva",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256069": {
+ "content": "hne_Deva",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256070": {
+ "content": "hrv_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256071": {
+ "content": "hun_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256072": {
+ "content": "hye_Armn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256073": {
+ "content": "ibo_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256074": {
+ "content": "ilo_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256075": {
+ "content": "ind_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256076": {
+ "content": "isl_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256077": {
+ "content": "ita_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256078": {
+ "content": "jav_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256079": {
+ "content": "jpn_Jpan",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256080": {
+ "content": "kab_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256081": {
+ "content": "kac_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256082": {
+ "content": "kam_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256083": {
+ "content": "kan_Knda",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256084": {
+ "content": "kas_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256085": {
+ "content": "kas_Deva",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256086": {
+ "content": "kat_Geor",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256087": {
+ "content": "knc_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256088": {
+ "content": "knc_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256089": {
+ "content": "kaz_Cyrl",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256090": {
+ "content": "kbp_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256091": {
+ "content": "kea_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256092": {
+ "content": "khm_Khmr",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256093": {
+ "content": "kik_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256094": {
+ "content": "kin_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256095": {
+ "content": "kir_Cyrl",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256096": {
+ "content": "kmb_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256097": {
+ "content": "kon_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256098": {
+ "content": "kor_Hang",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256099": {
+ "content": "kmr_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256100": {
+ "content": "lao_Laoo",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256101": {
+ "content": "lvs_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256102": {
+ "content": "lij_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256103": {
+ "content": "lim_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256104": {
+ "content": "lin_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256105": {
+ "content": "lit_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256106": {
+ "content": "lmo_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256107": {
+ "content": "ltg_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256108": {
+ "content": "ltz_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256109": {
+ "content": "lua_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256110": {
+ "content": "lug_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256111": {
+ "content": "luo_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256112": {
+ "content": "lus_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256113": {
+ "content": "mag_Deva",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256114": {
+ "content": "mai_Deva",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256115": {
+ "content": "mal_Mlym",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256116": {
+ "content": "mar_Deva",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256117": {
+ "content": "min_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256118": {
+ "content": "mkd_Cyrl",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256119": {
+ "content": "plt_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256120": {
+ "content": "mlt_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256121": {
+ "content": "mni_Beng",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256122": {
+ "content": "khk_Cyrl",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256123": {
+ "content": "mos_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256124": {
+ "content": "mri_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256125": {
+ "content": "zsm_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256126": {
+ "content": "mya_Mymr",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256127": {
+ "content": "nld_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256128": {
+ "content": "nno_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256129": {
+ "content": "nob_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256130": {
+ "content": "npi_Deva",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256131": {
+ "content": "nso_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256132": {
+ "content": "nus_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256133": {
+ "content": "nya_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256134": {
+ "content": "oci_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256135": {
+ "content": "gaz_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256136": {
+ "content": "ory_Orya",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256137": {
+ "content": "pag_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256138": {
+ "content": "pan_Guru",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256139": {
+ "content": "pap_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256140": {
+ "content": "pol_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256141": {
+ "content": "por_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256142": {
+ "content": "prs_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256143": {
+ "content": "pbt_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256144": {
+ "content": "quy_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256145": {
+ "content": "ron_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256146": {
+ "content": "run_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256147": {
+ "content": "rus_Cyrl",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256148": {
+ "content": "sag_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256149": {
+ "content": "san_Deva",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256150": {
+ "content": "sat_Beng",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256151": {
+ "content": "scn_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256152": {
+ "content": "shn_Mymr",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256153": {
+ "content": "sin_Sinh",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256154": {
+ "content": "slk_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256155": {
+ "content": "slv_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256156": {
+ "content": "smo_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256157": {
+ "content": "sna_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256158": {
+ "content": "snd_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256159": {
+ "content": "som_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256160": {
+ "content": "sot_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256161": {
+ "content": "spa_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256162": {
+ "content": "als_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256163": {
+ "content": "srd_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256164": {
+ "content": "srp_Cyrl",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256165": {
+ "content": "ssw_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256166": {
+ "content": "sun_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256167": {
+ "content": "swe_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256168": {
+ "content": "swh_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256169": {
+ "content": "szl_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256170": {
+ "content": "tam_Taml",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256171": {
+ "content": "tat_Cyrl",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256172": {
+ "content": "tel_Telu",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256173": {
+ "content": "tgk_Cyrl",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256174": {
+ "content": "tgl_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256175": {
+ "content": "tha_Thai",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256176": {
+ "content": "tir_Ethi",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256177": {
+ "content": "taq_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256178": {
+ "content": "taq_Tfng",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256179": {
+ "content": "tpi_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256180": {
+ "content": "tsn_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256181": {
+ "content": "tso_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256182": {
+ "content": "tuk_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256183": {
+ "content": "tum_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256184": {
+ "content": "tur_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256185": {
+ "content": "twi_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256186": {
+ "content": "tzm_Tfng",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256187": {
+ "content": "uig_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256188": {
+ "content": "ukr_Cyrl",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256189": {
+ "content": "umb_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256190": {
+ "content": "urd_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256191": {
+ "content": "uzn_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256192": {
+ "content": "vec_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256193": {
+ "content": "vie_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256194": {
+ "content": "war_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256195": {
+ "content": "wol_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256196": {
+ "content": "xho_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256197": {
+ "content": "ydd_Hebr",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256198": {
+ "content": "yor_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256199": {
+ "content": "yue_Hant",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256200": {
+ "content": "zho_Hans",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256201": {
+ "content": "zho_Hant",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256202": {
+ "content": "zul_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256203": {
+ "content": "",
+ "lstrip": true,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "additional_special_tokens": [
+ "ace_Arab",
+ "ace_Latn",
+ "acm_Arab",
+ "acq_Arab",
+ "aeb_Arab",
+ "afr_Latn",
+ "ajp_Arab",
+ "aka_Latn",
+ "amh_Ethi",
+ "apc_Arab",
+ "arb_Arab",
+ "ars_Arab",
+ "ary_Arab",
+ "arz_Arab",
+ "asm_Beng",
+ "ast_Latn",
+ "awa_Deva",
+ "ayr_Latn",
+ "azb_Arab",
+ "azj_Latn",
+ "bak_Cyrl",
+ "bam_Latn",
+ "ban_Latn",
+ "bel_Cyrl",
+ "bem_Latn",
+ "ben_Beng",
+ "bho_Deva",
+ "bjn_Arab",
+ "bjn_Latn",
+ "bod_Tibt",
+ "bos_Latn",
+ "bug_Latn",
+ "bul_Cyrl",
+ "cat_Latn",
+ "ceb_Latn",
+ "ces_Latn",
+ "cjk_Latn",
+ "ckb_Arab",
+ "crh_Latn",
+ "cym_Latn",
+ "dan_Latn",
+ "deu_Latn",
+ "dik_Latn",
+ "dyu_Latn",
+ "dzo_Tibt",
+ "ell_Grek",
+ "eng_Latn",
+ "epo_Latn",
+ "est_Latn",
+ "eus_Latn",
+ "ewe_Latn",
+ "fao_Latn",
+ "pes_Arab",
+ "fij_Latn",
+ "fin_Latn",
+ "fon_Latn",
+ "fra_Latn",
+ "fur_Latn",
+ "fuv_Latn",
+ "gla_Latn",
+ "gle_Latn",
+ "glg_Latn",
+ "grn_Latn",
+ "guj_Gujr",
+ "hat_Latn",
+ "hau_Latn",
+ "heb_Hebr",
+ "hin_Deva",
+ "hne_Deva",
+ "hrv_Latn",
+ "hun_Latn",
+ "hye_Armn",
+ "ibo_Latn",
+ "ilo_Latn",
+ "ind_Latn",
+ "isl_Latn",
+ "ita_Latn",
+ "jav_Latn",
+ "jpn_Jpan",
+ "kab_Latn",
+ "kac_Latn",
+ "kam_Latn",
+ "kan_Knda",
+ "kas_Arab",
+ "kas_Deva",
+ "kat_Geor",
+ "knc_Arab",
+ "knc_Latn",
+ "kaz_Cyrl",
+ "kbp_Latn",
+ "kea_Latn",
+ "khm_Khmr",
+ "kik_Latn",
+ "kin_Latn",
+ "kir_Cyrl",
+ "kmb_Latn",
+ "kon_Latn",
+ "kor_Hang",
+ "kmr_Latn",
+ "lao_Laoo",
+ "lvs_Latn",
+ "lij_Latn",
+ "lim_Latn",
+ "lin_Latn",
+ "lit_Latn",
+ "lmo_Latn",
+ "ltg_Latn",
+ "ltz_Latn",
+ "lua_Latn",
+ "lug_Latn",
+ "luo_Latn",
+ "lus_Latn",
+ "mag_Deva",
+ "mai_Deva",
+ "mal_Mlym",
+ "mar_Deva",
+ "min_Latn",
+ "mkd_Cyrl",
+ "plt_Latn",
+ "mlt_Latn",
+ "mni_Beng",
+ "khk_Cyrl",
+ "mos_Latn",
+ "mri_Latn",
+ "zsm_Latn",
+ "mya_Mymr",
+ "nld_Latn",
+ "nno_Latn",
+ "nob_Latn",
+ "npi_Deva",
+ "nso_Latn",
+ "nus_Latn",
+ "nya_Latn",
+ "oci_Latn",
+ "gaz_Latn",
+ "ory_Orya",
+ "pag_Latn",
+ "pan_Guru",
+ "pap_Latn",
+ "pol_Latn",
+ "por_Latn",
+ "prs_Arab",
+ "pbt_Arab",
+ "quy_Latn",
+ "ron_Latn",
+ "run_Latn",
+ "rus_Cyrl",
+ "sag_Latn",
+ "san_Deva",
+ "sat_Beng",
+ "scn_Latn",
+ "shn_Mymr",
+ "sin_Sinh",
+ "slk_Latn",
+ "slv_Latn",
+ "smo_Latn",
+ "sna_Latn",
+ "snd_Arab",
+ "som_Latn",
+ "sot_Latn",
+ "spa_Latn",
+ "als_Latn",
+ "srd_Latn",
+ "srp_Cyrl",
+ "ssw_Latn",
+ "sun_Latn",
+ "swe_Latn",
+ "swh_Latn",
+ "szl_Latn",
+ "tam_Taml",
+ "tat_Cyrl",
+ "tel_Telu",
+ "tgk_Cyrl",
+ "tgl_Latn",
+ "tha_Thai",
+ "tir_Ethi",
+ "taq_Latn",
+ "taq_Tfng",
+ "tpi_Latn",
+ "tsn_Latn",
+ "tso_Latn",
+ "tuk_Latn",
+ "tum_Latn",
+ "tur_Latn",
+ "twi_Latn",
+ "tzm_Tfng",
+ "uig_Arab",
+ "ukr_Cyrl",
+ "umb_Latn",
+ "urd_Arab",
+ "uzn_Latn",
+ "vec_Latn",
+ "vie_Latn",
+ "war_Latn",
+ "wol_Latn",
+ "xho_Latn",
+ "ydd_Hebr",
+ "yor_Latn",
+ "yue_Hant",
+ "zho_Hans",
+ "zho_Hant",
+ "zul_Latn"
+ ],
+ "bos_token": "",
+ "clean_up_tokenization_spaces": true,
+ "cls_token": "",
+ "eos_token": "",
+ "legacy_behaviour": false,
+ "mask_token": "",
+ "model_max_length": 1024,
+ "pad_token": "",
+ "sep_token": "",
+ "sp_model_kwargs": {},
+ "src_lang": "eng_Latn",
+ "tgt_lang": null,
+ "tokenizer_class": "NllbTokenizer",
+ "unk_token": ""
+}
diff --git a/checkpoint-2500/trainer_state.json b/checkpoint-2500/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..86177d650df0330f31a8a394123280ee5759ce7a
--- /dev/null
+++ b/checkpoint-2500/trainer_state.json
@@ -0,0 +1,17533 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 2.50070332280954,
+ "eval_steps": 500,
+ "global_step": 2500,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.001000281329123816,
+ "grad_norm": 5.902005195617676,
+ "learning_rate": 0.0001999999450590425,
+ "loss": 3.1875,
+ "step": 1
+ },
+ {
+ "epoch": 0.002000562658247632,
+ "grad_norm": 3.2577760219573975,
+ "learning_rate": 0.00019999978023623033,
+ "loss": 2.3666,
+ "step": 2
+ },
+ {
+ "epoch": 0.003000843987371448,
+ "grad_norm": 5.3700995445251465,
+ "learning_rate": 0.0001999995055317446,
+ "loss": 2.8282,
+ "step": 3
+ },
+ {
+ "epoch": 0.004001125316495264,
+ "grad_norm": 2.1445534229278564,
+ "learning_rate": 0.00019999912094588717,
+ "loss": 2.2322,
+ "step": 4
+ },
+ {
+ "epoch": 0.005001406645619081,
+ "grad_norm": 1.5143821239471436,
+ "learning_rate": 0.00019999862647908064,
+ "loss": 2.1709,
+ "step": 5
+ },
+ {
+ "epoch": 0.006001687974742896,
+ "grad_norm": 2.0491714477539062,
+ "learning_rate": 0.00019999802213186834,
+ "loss": 2.2863,
+ "step": 6
+ },
+ {
+ "epoch": 0.007001969303866712,
+ "grad_norm": 1.2016857862472534,
+ "learning_rate": 0.0001999973079049143,
+ "loss": 1.5595,
+ "step": 7
+ },
+ {
+ "epoch": 0.008002250632990529,
+ "grad_norm": 1.3860406875610352,
+ "learning_rate": 0.00019999648379900338,
+ "loss": 1.7264,
+ "step": 8
+ },
+ {
+ "epoch": 0.009002531962114344,
+ "grad_norm": 1.0861930847167969,
+ "learning_rate": 0.0001999955498150411,
+ "loss": 2.0533,
+ "step": 9
+ },
+ {
+ "epoch": 0.010002813291238161,
+ "grad_norm": 2.233243703842163,
+ "learning_rate": 0.00019999450595405374,
+ "loss": 1.9378,
+ "step": 10
+ },
+ {
+ "epoch": 0.011003094620361977,
+ "grad_norm": 1.302808165550232,
+ "learning_rate": 0.0001999933522171883,
+ "loss": 1.9182,
+ "step": 11
+ },
+ {
+ "epoch": 0.012003375949485792,
+ "grad_norm": 0.8285257816314697,
+ "learning_rate": 0.00019999208860571255,
+ "loss": 1.9146,
+ "step": 12
+ },
+ {
+ "epoch": 0.01300365727860961,
+ "grad_norm": 1.2248319387435913,
+ "learning_rate": 0.00019999071512101496,
+ "loss": 1.7467,
+ "step": 13
+ },
+ {
+ "epoch": 0.014003938607733425,
+ "grad_norm": 0.8307135105133057,
+ "learning_rate": 0.00019998923176460474,
+ "loss": 1.6896,
+ "step": 14
+ },
+ {
+ "epoch": 0.01500421993685724,
+ "grad_norm": 1.1531301736831665,
+ "learning_rate": 0.00019998763853811184,
+ "loss": 1.7549,
+ "step": 15
+ },
+ {
+ "epoch": 0.016004501265981057,
+ "grad_norm": 1.0071958303451538,
+ "learning_rate": 0.00019998593544328692,
+ "loss": 1.903,
+ "step": 16
+ },
+ {
+ "epoch": 0.017004782595104875,
+ "grad_norm": 0.9111937284469604,
+ "learning_rate": 0.00019998412248200138,
+ "loss": 1.8372,
+ "step": 17
+ },
+ {
+ "epoch": 0.01800506392422869,
+ "grad_norm": 0.9943836331367493,
+ "learning_rate": 0.00019998219965624734,
+ "loss": 1.7304,
+ "step": 18
+ },
+ {
+ "epoch": 0.019005345253352506,
+ "grad_norm": 0.8139007687568665,
+ "learning_rate": 0.0001999801669681376,
+ "loss": 1.6932,
+ "step": 19
+ },
+ {
+ "epoch": 0.020005626582476323,
+ "grad_norm": 0.7991273999214172,
+ "learning_rate": 0.00019997802441990573,
+ "loss": 1.9596,
+ "step": 20
+ },
+ {
+ "epoch": 0.021005907911600136,
+ "grad_norm": 0.832266628742218,
+ "learning_rate": 0.00019997577201390606,
+ "loss": 1.7116,
+ "step": 21
+ },
+ {
+ "epoch": 0.022006189240723954,
+ "grad_norm": 0.8465655446052551,
+ "learning_rate": 0.00019997340975261353,
+ "loss": 1.7711,
+ "step": 22
+ },
+ {
+ "epoch": 0.02300647056984777,
+ "grad_norm": 1.032426118850708,
+ "learning_rate": 0.00019997093763862383,
+ "loss": 1.6746,
+ "step": 23
+ },
+ {
+ "epoch": 0.024006751898971584,
+ "grad_norm": 1.0036743879318237,
+ "learning_rate": 0.0001999683556746534,
+ "loss": 1.7274,
+ "step": 24
+ },
+ {
+ "epoch": 0.0250070332280954,
+ "grad_norm": 0.9491412043571472,
+ "learning_rate": 0.0001999656638635393,
+ "loss": 2.0302,
+ "step": 25
+ },
+ {
+ "epoch": 0.02600731455721922,
+ "grad_norm": 0.9477822184562683,
+ "learning_rate": 0.0001999628622082394,
+ "loss": 1.6107,
+ "step": 26
+ },
+ {
+ "epoch": 0.027007595886343033,
+ "grad_norm": 1.0687041282653809,
+ "learning_rate": 0.0001999599507118322,
+ "loss": 1.8225,
+ "step": 27
+ },
+ {
+ "epoch": 0.02800787721546685,
+ "grad_norm": 1.6572712659835815,
+ "learning_rate": 0.00019995692937751683,
+ "loss": 1.896,
+ "step": 28
+ },
+ {
+ "epoch": 0.029008158544590667,
+ "grad_norm": 1.013258695602417,
+ "learning_rate": 0.0001999537982086133,
+ "loss": 1.7847,
+ "step": 29
+ },
+ {
+ "epoch": 0.03000843987371448,
+ "grad_norm": 0.7584932446479797,
+ "learning_rate": 0.00019995055720856218,
+ "loss": 1.5841,
+ "step": 30
+ },
+ {
+ "epoch": 0.031008721202838298,
+ "grad_norm": 1.1543537378311157,
+ "learning_rate": 0.00019994720638092468,
+ "loss": 1.8362,
+ "step": 31
+ },
+ {
+ "epoch": 0.032009002531962115,
+ "grad_norm": 0.8389608860015869,
+ "learning_rate": 0.00019994374572938277,
+ "loss": 1.7913,
+ "step": 32
+ },
+ {
+ "epoch": 0.03300928386108593,
+ "grad_norm": 0.7582125663757324,
+ "learning_rate": 0.00019994017525773913,
+ "loss": 1.5406,
+ "step": 33
+ },
+ {
+ "epoch": 0.03400956519020975,
+ "grad_norm": 0.7866935133934021,
+ "learning_rate": 0.00019993649496991705,
+ "loss": 1.5363,
+ "step": 34
+ },
+ {
+ "epoch": 0.03500984651933356,
+ "grad_norm": 0.8007768988609314,
+ "learning_rate": 0.00019993270486996046,
+ "loss": 1.7597,
+ "step": 35
+ },
+ {
+ "epoch": 0.03601012784845738,
+ "grad_norm": 0.8109031319618225,
+ "learning_rate": 0.000199928804962034,
+ "loss": 1.5554,
+ "step": 36
+ },
+ {
+ "epoch": 0.037010409177581194,
+ "grad_norm": 0.7722628116607666,
+ "learning_rate": 0.00019992479525042303,
+ "loss": 1.6437,
+ "step": 37
+ },
+ {
+ "epoch": 0.03801069050670501,
+ "grad_norm": 0.7336480021476746,
+ "learning_rate": 0.00019992067573953342,
+ "loss": 1.7276,
+ "step": 38
+ },
+ {
+ "epoch": 0.03901097183582883,
+ "grad_norm": 0.6940280795097351,
+ "learning_rate": 0.0001999164464338918,
+ "loss": 1.846,
+ "step": 39
+ },
+ {
+ "epoch": 0.040011253164952645,
+ "grad_norm": 0.7079702615737915,
+ "learning_rate": 0.0001999121073381454,
+ "loss": 1.7017,
+ "step": 40
+ },
+ {
+ "epoch": 0.041011534494076456,
+ "grad_norm": 0.7438498139381409,
+ "learning_rate": 0.0001999076584570621,
+ "loss": 1.665,
+ "step": 41
+ },
+ {
+ "epoch": 0.04201181582320027,
+ "grad_norm": 0.6951525211334229,
+ "learning_rate": 0.00019990309979553045,
+ "loss": 1.588,
+ "step": 42
+ },
+ {
+ "epoch": 0.04301209715232409,
+ "grad_norm": 0.9398604035377502,
+ "learning_rate": 0.00019989843135855958,
+ "loss": 1.6513,
+ "step": 43
+ },
+ {
+ "epoch": 0.04401237848144791,
+ "grad_norm": 0.7384347319602966,
+ "learning_rate": 0.00019989365315127922,
+ "loss": 1.5975,
+ "step": 44
+ },
+ {
+ "epoch": 0.045012659810571724,
+ "grad_norm": 0.9856846332550049,
+ "learning_rate": 0.0001998887651789398,
+ "loss": 1.644,
+ "step": 45
+ },
+ {
+ "epoch": 0.04601294113969554,
+ "grad_norm": 0.7322820425033569,
+ "learning_rate": 0.0001998837674469123,
+ "loss": 1.5207,
+ "step": 46
+ },
+ {
+ "epoch": 0.04701322246881936,
+ "grad_norm": 0.8695257902145386,
+ "learning_rate": 0.00019987865996068833,
+ "loss": 1.5572,
+ "step": 47
+ },
+ {
+ "epoch": 0.04801350379794317,
+ "grad_norm": 0.7231017351150513,
+ "learning_rate": 0.00019987344272588006,
+ "loss": 1.5841,
+ "step": 48
+ },
+ {
+ "epoch": 0.049013785127066986,
+ "grad_norm": 0.7147384285926819,
+ "learning_rate": 0.00019986811574822033,
+ "loss": 1.8628,
+ "step": 49
+ },
+ {
+ "epoch": 0.0500140664561908,
+ "grad_norm": 0.8631477355957031,
+ "learning_rate": 0.00019986267903356254,
+ "loss": 1.8487,
+ "step": 50
+ },
+ {
+ "epoch": 0.05101434778531462,
+ "grad_norm": 0.7995486855506897,
+ "learning_rate": 0.0001998571325878806,
+ "loss": 1.6491,
+ "step": 51
+ },
+ {
+ "epoch": 0.05201462911443844,
+ "grad_norm": 0.7828657031059265,
+ "learning_rate": 0.0001998514764172691,
+ "loss": 1.7496,
+ "step": 52
+ },
+ {
+ "epoch": 0.053014910443562255,
+ "grad_norm": 0.7789833545684814,
+ "learning_rate": 0.00019984571052794313,
+ "loss": 1.6628,
+ "step": 53
+ },
+ {
+ "epoch": 0.054015191772686065,
+ "grad_norm": 0.7077661752700806,
+ "learning_rate": 0.00019983983492623833,
+ "loss": 1.771,
+ "step": 54
+ },
+ {
+ "epoch": 0.05501547310180988,
+ "grad_norm": 0.7939582467079163,
+ "learning_rate": 0.00019983384961861096,
+ "loss": 1.707,
+ "step": 55
+ },
+ {
+ "epoch": 0.0560157544309337,
+ "grad_norm": 0.9438828229904175,
+ "learning_rate": 0.0001998277546116378,
+ "loss": 1.8334,
+ "step": 56
+ },
+ {
+ "epoch": 0.05701603576005752,
+ "grad_norm": 0.8028286695480347,
+ "learning_rate": 0.00019982154991201608,
+ "loss": 1.9117,
+ "step": 57
+ },
+ {
+ "epoch": 0.058016317089181334,
+ "grad_norm": 0.6563037037849426,
+ "learning_rate": 0.00019981523552656377,
+ "loss": 1.4767,
+ "step": 58
+ },
+ {
+ "epoch": 0.05901659841830515,
+ "grad_norm": 0.6600964665412903,
+ "learning_rate": 0.00019980881146221914,
+ "loss": 1.6656,
+ "step": 59
+ },
+ {
+ "epoch": 0.06001687974742896,
+ "grad_norm": 0.7966578602790833,
+ "learning_rate": 0.00019980227772604112,
+ "loss": 1.4844,
+ "step": 60
+ },
+ {
+ "epoch": 0.06101716107655278,
+ "grad_norm": 0.8019976615905762,
+ "learning_rate": 0.0001997956343252091,
+ "loss": 1.5682,
+ "step": 61
+ },
+ {
+ "epoch": 0.062017442405676595,
+ "grad_norm": 0.8935349583625793,
+ "learning_rate": 0.00019978888126702296,
+ "loss": 1.8131,
+ "step": 62
+ },
+ {
+ "epoch": 0.06301772373480041,
+ "grad_norm": 0.8085179924964905,
+ "learning_rate": 0.00019978201855890308,
+ "loss": 1.5602,
+ "step": 63
+ },
+ {
+ "epoch": 0.06401800506392423,
+ "grad_norm": 0.7631951570510864,
+ "learning_rate": 0.00019977504620839035,
+ "loss": 1.8008,
+ "step": 64
+ },
+ {
+ "epoch": 0.06501828639304805,
+ "grad_norm": 0.7315165996551514,
+ "learning_rate": 0.00019976796422314615,
+ "loss": 1.5735,
+ "step": 65
+ },
+ {
+ "epoch": 0.06601856772217186,
+ "grad_norm": 0.745726466178894,
+ "learning_rate": 0.00019976077261095226,
+ "loss": 1.5775,
+ "step": 66
+ },
+ {
+ "epoch": 0.06701884905129568,
+ "grad_norm": 0.9082249999046326,
+ "learning_rate": 0.00019975347137971098,
+ "loss": 1.7427,
+ "step": 67
+ },
+ {
+ "epoch": 0.0680191303804195,
+ "grad_norm": 0.6575669050216675,
+ "learning_rate": 0.00019974606053744503,
+ "loss": 1.5231,
+ "step": 68
+ },
+ {
+ "epoch": 0.06901941170954332,
+ "grad_norm": 0.7749233245849609,
+ "learning_rate": 0.00019973854009229763,
+ "loss": 1.5703,
+ "step": 69
+ },
+ {
+ "epoch": 0.07001969303866712,
+ "grad_norm": 0.7240824699401855,
+ "learning_rate": 0.00019973091005253232,
+ "loss": 1.5197,
+ "step": 70
+ },
+ {
+ "epoch": 0.07101997436779094,
+ "grad_norm": 0.8683856725692749,
+ "learning_rate": 0.0001997231704265332,
+ "loss": 1.6183,
+ "step": 71
+ },
+ {
+ "epoch": 0.07202025569691475,
+ "grad_norm": 0.6885640621185303,
+ "learning_rate": 0.00019971532122280464,
+ "loss": 1.6565,
+ "step": 72
+ },
+ {
+ "epoch": 0.07302053702603857,
+ "grad_norm": 0.6648329496383667,
+ "learning_rate": 0.0001997073624499716,
+ "loss": 1.5943,
+ "step": 73
+ },
+ {
+ "epoch": 0.07402081835516239,
+ "grad_norm": 0.8867416977882385,
+ "learning_rate": 0.0001996992941167792,
+ "loss": 1.7855,
+ "step": 74
+ },
+ {
+ "epoch": 0.0750210996842862,
+ "grad_norm": 0.7790491580963135,
+ "learning_rate": 0.00019969111623209323,
+ "loss": 1.6723,
+ "step": 75
+ },
+ {
+ "epoch": 0.07602138101341002,
+ "grad_norm": 0.7999201416969299,
+ "learning_rate": 0.00019968282880489957,
+ "loss": 1.5619,
+ "step": 76
+ },
+ {
+ "epoch": 0.07702166234253384,
+ "grad_norm": 0.6316407322883606,
+ "learning_rate": 0.00019967443184430467,
+ "loss": 1.6377,
+ "step": 77
+ },
+ {
+ "epoch": 0.07802194367165766,
+ "grad_norm": 0.7680445313453674,
+ "learning_rate": 0.0001996659253595353,
+ "loss": 1.5433,
+ "step": 78
+ },
+ {
+ "epoch": 0.07902222500078147,
+ "grad_norm": 0.7158446907997131,
+ "learning_rate": 0.0001996573093599385,
+ "loss": 1.5436,
+ "step": 79
+ },
+ {
+ "epoch": 0.08002250632990529,
+ "grad_norm": 0.7354825139045715,
+ "learning_rate": 0.00019964858385498172,
+ "loss": 1.6512,
+ "step": 80
+ },
+ {
+ "epoch": 0.08102278765902911,
+ "grad_norm": 0.7031210660934448,
+ "learning_rate": 0.00019963974885425266,
+ "loss": 1.6411,
+ "step": 81
+ },
+ {
+ "epoch": 0.08202306898815291,
+ "grad_norm": 0.8451045751571655,
+ "learning_rate": 0.00019963080436745945,
+ "loss": 1.6622,
+ "step": 82
+ },
+ {
+ "epoch": 0.08302335031727673,
+ "grad_norm": 0.8329521417617798,
+ "learning_rate": 0.00019962175040443044,
+ "loss": 1.9269,
+ "step": 83
+ },
+ {
+ "epoch": 0.08402363164640055,
+ "grad_norm": 0.6967645883560181,
+ "learning_rate": 0.0001996125869751143,
+ "loss": 1.7243,
+ "step": 84
+ },
+ {
+ "epoch": 0.08502391297552436,
+ "grad_norm": 0.8699042797088623,
+ "learning_rate": 0.00019960331408957997,
+ "loss": 1.7211,
+ "step": 85
+ },
+ {
+ "epoch": 0.08602419430464818,
+ "grad_norm": 0.6780512928962708,
+ "learning_rate": 0.00019959393175801671,
+ "loss": 1.6376,
+ "step": 86
+ },
+ {
+ "epoch": 0.087024475633772,
+ "grad_norm": 0.7213720679283142,
+ "learning_rate": 0.00019958443999073397,
+ "loss": 1.6048,
+ "step": 87
+ },
+ {
+ "epoch": 0.08802475696289581,
+ "grad_norm": 0.6077585816383362,
+ "learning_rate": 0.00019957483879816151,
+ "loss": 1.5231,
+ "step": 88
+ },
+ {
+ "epoch": 0.08902503829201963,
+ "grad_norm": 0.6854611039161682,
+ "learning_rate": 0.00019956512819084928,
+ "loss": 1.4726,
+ "step": 89
+ },
+ {
+ "epoch": 0.09002531962114345,
+ "grad_norm": 0.6969390511512756,
+ "learning_rate": 0.00019955530817946748,
+ "loss": 1.6435,
+ "step": 90
+ },
+ {
+ "epoch": 0.09102560095026727,
+ "grad_norm": 0.7178792953491211,
+ "learning_rate": 0.00019954537877480655,
+ "loss": 1.6967,
+ "step": 91
+ },
+ {
+ "epoch": 0.09202588227939108,
+ "grad_norm": 0.8248458504676819,
+ "learning_rate": 0.00019953533998777706,
+ "loss": 1.5884,
+ "step": 92
+ },
+ {
+ "epoch": 0.0930261636085149,
+ "grad_norm": 0.6472075581550598,
+ "learning_rate": 0.00019952519182940993,
+ "loss": 1.5074,
+ "step": 93
+ },
+ {
+ "epoch": 0.09402644493763872,
+ "grad_norm": 0.7548672556877136,
+ "learning_rate": 0.00019951493431085603,
+ "loss": 1.6934,
+ "step": 94
+ },
+ {
+ "epoch": 0.09502672626676252,
+ "grad_norm": 0.6680666208267212,
+ "learning_rate": 0.00019950456744338658,
+ "loss": 1.4014,
+ "step": 95
+ },
+ {
+ "epoch": 0.09602700759588634,
+ "grad_norm": 0.7270862460136414,
+ "learning_rate": 0.00019949409123839288,
+ "loss": 1.6991,
+ "step": 96
+ },
+ {
+ "epoch": 0.09702728892501016,
+ "grad_norm": 0.682833731174469,
+ "learning_rate": 0.00019948350570738642,
+ "loss": 1.4926,
+ "step": 97
+ },
+ {
+ "epoch": 0.09802757025413397,
+ "grad_norm": 0.6598315238952637,
+ "learning_rate": 0.0001994728108619987,
+ "loss": 1.6329,
+ "step": 98
+ },
+ {
+ "epoch": 0.09902785158325779,
+ "grad_norm": 0.830845832824707,
+ "learning_rate": 0.0001994620067139815,
+ "loss": 1.8517,
+ "step": 99
+ },
+ {
+ "epoch": 0.1000281329123816,
+ "grad_norm": 0.6964694857597351,
+ "learning_rate": 0.00019945109327520658,
+ "loss": 1.5459,
+ "step": 100
+ },
+ {
+ "epoch": 0.10102841424150542,
+ "grad_norm": 0.8640177249908447,
+ "learning_rate": 0.00019944007055766586,
+ "loss": 1.6638,
+ "step": 101
+ },
+ {
+ "epoch": 0.10202869557062924,
+ "grad_norm": 0.6485210657119751,
+ "learning_rate": 0.00019942893857347128,
+ "loss": 1.8025,
+ "step": 102
+ },
+ {
+ "epoch": 0.10302897689975306,
+ "grad_norm": 0.6746248006820679,
+ "learning_rate": 0.00019941769733485494,
+ "loss": 1.6954,
+ "step": 103
+ },
+ {
+ "epoch": 0.10402925822887688,
+ "grad_norm": 0.7386549115180969,
+ "learning_rate": 0.00019940634685416888,
+ "loss": 1.4547,
+ "step": 104
+ },
+ {
+ "epoch": 0.10502953955800069,
+ "grad_norm": 0.7518633008003235,
+ "learning_rate": 0.00019939488714388524,
+ "loss": 1.5098,
+ "step": 105
+ },
+ {
+ "epoch": 0.10602982088712451,
+ "grad_norm": 0.7350422739982605,
+ "learning_rate": 0.00019938331821659614,
+ "loss": 1.5452,
+ "step": 106
+ },
+ {
+ "epoch": 0.10703010221624833,
+ "grad_norm": 0.6544668674468994,
+ "learning_rate": 0.0001993716400850138,
+ "loss": 1.5106,
+ "step": 107
+ },
+ {
+ "epoch": 0.10803038354537213,
+ "grad_norm": 0.6108564138412476,
+ "learning_rate": 0.0001993598527619703,
+ "loss": 1.5818,
+ "step": 108
+ },
+ {
+ "epoch": 0.10903066487449595,
+ "grad_norm": 0.731071949005127,
+ "learning_rate": 0.00019934795626041783,
+ "loss": 1.4819,
+ "step": 109
+ },
+ {
+ "epoch": 0.11003094620361976,
+ "grad_norm": 0.5978986620903015,
+ "learning_rate": 0.0001993359505934285,
+ "loss": 1.5469,
+ "step": 110
+ },
+ {
+ "epoch": 0.11103122753274358,
+ "grad_norm": 0.7249881029129028,
+ "learning_rate": 0.00019932383577419432,
+ "loss": 1.7466,
+ "step": 111
+ },
+ {
+ "epoch": 0.1120315088618674,
+ "grad_norm": 0.6161806583404541,
+ "learning_rate": 0.0001993116118160273,
+ "loss": 1.3411,
+ "step": 112
+ },
+ {
+ "epoch": 0.11303179019099122,
+ "grad_norm": 0.6745229363441467,
+ "learning_rate": 0.00019929927873235938,
+ "loss": 1.5615,
+ "step": 113
+ },
+ {
+ "epoch": 0.11403207152011503,
+ "grad_norm": 0.6489872336387634,
+ "learning_rate": 0.00019928683653674237,
+ "loss": 1.6279,
+ "step": 114
+ },
+ {
+ "epoch": 0.11503235284923885,
+ "grad_norm": 0.7769975662231445,
+ "learning_rate": 0.00019927428524284805,
+ "loss": 1.6155,
+ "step": 115
+ },
+ {
+ "epoch": 0.11603263417836267,
+ "grad_norm": 0.734336793422699,
+ "learning_rate": 0.00019926162486446792,
+ "loss": 1.6837,
+ "step": 116
+ },
+ {
+ "epoch": 0.11703291550748648,
+ "grad_norm": 0.6966903805732727,
+ "learning_rate": 0.0001992488554155135,
+ "loss": 1.6659,
+ "step": 117
+ },
+ {
+ "epoch": 0.1180331968366103,
+ "grad_norm": 0.6714586615562439,
+ "learning_rate": 0.00019923597691001615,
+ "loss": 1.5161,
+ "step": 118
+ },
+ {
+ "epoch": 0.11903347816573412,
+ "grad_norm": 0.6390894651412964,
+ "learning_rate": 0.0001992229893621269,
+ "loss": 1.4561,
+ "step": 119
+ },
+ {
+ "epoch": 0.12003375949485792,
+ "grad_norm": 0.6481143832206726,
+ "learning_rate": 0.00019920989278611687,
+ "loss": 1.6331,
+ "step": 120
+ },
+ {
+ "epoch": 0.12103404082398174,
+ "grad_norm": 0.6819384694099426,
+ "learning_rate": 0.0001991966871963767,
+ "loss": 1.6508,
+ "step": 121
+ },
+ {
+ "epoch": 0.12203432215310556,
+ "grad_norm": 0.6839059591293335,
+ "learning_rate": 0.000199183372607417,
+ "loss": 1.6514,
+ "step": 122
+ },
+ {
+ "epoch": 0.12303460348222937,
+ "grad_norm": 0.6401050090789795,
+ "learning_rate": 0.0001991699490338681,
+ "loss": 1.8065,
+ "step": 123
+ },
+ {
+ "epoch": 0.12403488481135319,
+ "grad_norm": 0.6860588788986206,
+ "learning_rate": 0.00019915641649048005,
+ "loss": 1.7658,
+ "step": 124
+ },
+ {
+ "epoch": 0.12503516614047702,
+ "grad_norm": 0.6286434531211853,
+ "learning_rate": 0.0001991427749921227,
+ "loss": 1.7678,
+ "step": 125
+ },
+ {
+ "epoch": 0.12603544746960083,
+ "grad_norm": 0.6609922647476196,
+ "learning_rate": 0.00019912902455378556,
+ "loss": 1.4934,
+ "step": 126
+ },
+ {
+ "epoch": 0.12703572879872463,
+ "grad_norm": 0.7058399319648743,
+ "learning_rate": 0.00019911516519057788,
+ "loss": 1.6058,
+ "step": 127
+ },
+ {
+ "epoch": 0.12803601012784846,
+ "grad_norm": 0.6362051963806152,
+ "learning_rate": 0.00019910119691772863,
+ "loss": 1.502,
+ "step": 128
+ },
+ {
+ "epoch": 0.12903629145697226,
+ "grad_norm": 0.7493100762367249,
+ "learning_rate": 0.00019908711975058637,
+ "loss": 1.5287,
+ "step": 129
+ },
+ {
+ "epoch": 0.1300365727860961,
+ "grad_norm": 0.6492393612861633,
+ "learning_rate": 0.0001990729337046194,
+ "loss": 1.5716,
+ "step": 130
+ },
+ {
+ "epoch": 0.1310368541152199,
+ "grad_norm": 0.70331871509552,
+ "learning_rate": 0.0001990586387954156,
+ "loss": 1.5882,
+ "step": 131
+ },
+ {
+ "epoch": 0.13203713544434373,
+ "grad_norm": 0.7581572532653809,
+ "learning_rate": 0.00019904423503868247,
+ "loss": 1.7627,
+ "step": 132
+ },
+ {
+ "epoch": 0.13303741677346753,
+ "grad_norm": 0.7087228894233704,
+ "learning_rate": 0.00019902972245024715,
+ "loss": 1.6257,
+ "step": 133
+ },
+ {
+ "epoch": 0.13403769810259136,
+ "grad_norm": 0.7920627593994141,
+ "learning_rate": 0.00019901510104605637,
+ "loss": 1.572,
+ "step": 134
+ },
+ {
+ "epoch": 0.13503797943171517,
+ "grad_norm": 0.6869202256202698,
+ "learning_rate": 0.00019900037084217637,
+ "loss": 1.5478,
+ "step": 135
+ },
+ {
+ "epoch": 0.136038260760839,
+ "grad_norm": 0.6879409551620483,
+ "learning_rate": 0.00019898553185479303,
+ "loss": 1.3104,
+ "step": 136
+ },
+ {
+ "epoch": 0.1370385420899628,
+ "grad_norm": 0.6574143767356873,
+ "learning_rate": 0.00019897058410021167,
+ "loss": 1.7041,
+ "step": 137
+ },
+ {
+ "epoch": 0.13803882341908663,
+ "grad_norm": 0.7793259620666504,
+ "learning_rate": 0.00019895552759485722,
+ "loss": 1.5417,
+ "step": 138
+ },
+ {
+ "epoch": 0.13903910474821043,
+ "grad_norm": 0.6310438513755798,
+ "learning_rate": 0.00019894036235527395,
+ "loss": 1.4978,
+ "step": 139
+ },
+ {
+ "epoch": 0.14003938607733424,
+ "grad_norm": 0.6298012137413025,
+ "learning_rate": 0.00019892508839812584,
+ "loss": 1.5367,
+ "step": 140
+ },
+ {
+ "epoch": 0.14103966740645807,
+ "grad_norm": 0.5647856593132019,
+ "learning_rate": 0.00019890970574019617,
+ "loss": 1.537,
+ "step": 141
+ },
+ {
+ "epoch": 0.14203994873558187,
+ "grad_norm": 0.6491876244544983,
+ "learning_rate": 0.00019889421439838763,
+ "loss": 1.6992,
+ "step": 142
+ },
+ {
+ "epoch": 0.1430402300647057,
+ "grad_norm": 0.6574720144271851,
+ "learning_rate": 0.00019887861438972246,
+ "loss": 1.3837,
+ "step": 143
+ },
+ {
+ "epoch": 0.1440405113938295,
+ "grad_norm": 0.6267092227935791,
+ "learning_rate": 0.00019886290573134228,
+ "loss": 1.6307,
+ "step": 144
+ },
+ {
+ "epoch": 0.14504079272295334,
+ "grad_norm": 0.6785029172897339,
+ "learning_rate": 0.000198847088440508,
+ "loss": 1.574,
+ "step": 145
+ },
+ {
+ "epoch": 0.14604107405207714,
+ "grad_norm": 0.6218644380569458,
+ "learning_rate": 0.0001988311625346,
+ "loss": 1.4676,
+ "step": 146
+ },
+ {
+ "epoch": 0.14704135538120097,
+ "grad_norm": 0.6047986745834351,
+ "learning_rate": 0.00019881512803111796,
+ "loss": 1.4316,
+ "step": 147
+ },
+ {
+ "epoch": 0.14804163671032478,
+ "grad_norm": 0.7340937256813049,
+ "learning_rate": 0.00019879898494768093,
+ "loss": 1.5185,
+ "step": 148
+ },
+ {
+ "epoch": 0.1490419180394486,
+ "grad_norm": 0.5874620676040649,
+ "learning_rate": 0.00019878273330202717,
+ "loss": 1.5031,
+ "step": 149
+ },
+ {
+ "epoch": 0.1500421993685724,
+ "grad_norm": 0.6943556666374207,
+ "learning_rate": 0.00019876637311201433,
+ "loss": 1.7323,
+ "step": 150
+ },
+ {
+ "epoch": 0.15104248069769624,
+ "grad_norm": 0.6345832347869873,
+ "learning_rate": 0.00019874990439561934,
+ "loss": 1.4691,
+ "step": 151
+ },
+ {
+ "epoch": 0.15204276202682004,
+ "grad_norm": 0.7047753930091858,
+ "learning_rate": 0.0001987333271709383,
+ "loss": 1.5198,
+ "step": 152
+ },
+ {
+ "epoch": 0.15304304335594385,
+ "grad_norm": 0.6043322086334229,
+ "learning_rate": 0.00019871664145618657,
+ "loss": 1.5488,
+ "step": 153
+ },
+ {
+ "epoch": 0.15404332468506768,
+ "grad_norm": 0.5978446006774902,
+ "learning_rate": 0.00019869984726969878,
+ "loss": 1.4278,
+ "step": 154
+ },
+ {
+ "epoch": 0.15504360601419148,
+ "grad_norm": 0.6796436905860901,
+ "learning_rate": 0.00019868294462992866,
+ "loss": 1.5845,
+ "step": 155
+ },
+ {
+ "epoch": 0.1560438873433153,
+ "grad_norm": 0.7113372087478638,
+ "learning_rate": 0.00019866593355544922,
+ "loss": 1.7509,
+ "step": 156
+ },
+ {
+ "epoch": 0.15704416867243912,
+ "grad_norm": 0.5908107757568359,
+ "learning_rate": 0.00019864881406495246,
+ "loss": 1.5693,
+ "step": 157
+ },
+ {
+ "epoch": 0.15804445000156295,
+ "grad_norm": 0.7135252952575684,
+ "learning_rate": 0.00019863158617724967,
+ "loss": 1.6109,
+ "step": 158
+ },
+ {
+ "epoch": 0.15904473133068675,
+ "grad_norm": 0.5621710419654846,
+ "learning_rate": 0.00019861424991127115,
+ "loss": 1.5368,
+ "step": 159
+ },
+ {
+ "epoch": 0.16004501265981058,
+ "grad_norm": 0.6205443143844604,
+ "learning_rate": 0.00019859680528606637,
+ "loss": 1.5181,
+ "step": 160
+ },
+ {
+ "epoch": 0.16104529398893438,
+ "grad_norm": 0.6933260560035706,
+ "learning_rate": 0.00019857925232080373,
+ "loss": 1.4508,
+ "step": 161
+ },
+ {
+ "epoch": 0.16204557531805822,
+ "grad_norm": 0.6911661028862,
+ "learning_rate": 0.00019856159103477086,
+ "loss": 1.5423,
+ "step": 162
+ },
+ {
+ "epoch": 0.16304585664718202,
+ "grad_norm": 0.7684744000434875,
+ "learning_rate": 0.00019854382144737426,
+ "loss": 1.4097,
+ "step": 163
+ },
+ {
+ "epoch": 0.16404613797630582,
+ "grad_norm": 0.6657288074493408,
+ "learning_rate": 0.00019852594357813952,
+ "loss": 1.6145,
+ "step": 164
+ },
+ {
+ "epoch": 0.16504641930542965,
+ "grad_norm": 0.7030160427093506,
+ "learning_rate": 0.00019850795744671116,
+ "loss": 1.6551,
+ "step": 165
+ },
+ {
+ "epoch": 0.16604670063455346,
+ "grad_norm": 0.87894207239151,
+ "learning_rate": 0.0001984898630728527,
+ "loss": 1.6316,
+ "step": 166
+ },
+ {
+ "epoch": 0.1670469819636773,
+ "grad_norm": 0.6282681226730347,
+ "learning_rate": 0.0001984716604764466,
+ "loss": 1.451,
+ "step": 167
+ },
+ {
+ "epoch": 0.1680472632928011,
+ "grad_norm": 0.6729792952537537,
+ "learning_rate": 0.0001984533496774942,
+ "loss": 1.4381,
+ "step": 168
+ },
+ {
+ "epoch": 0.16904754462192492,
+ "grad_norm": 0.7300116419792175,
+ "learning_rate": 0.0001984349306961158,
+ "loss": 1.4244,
+ "step": 169
+ },
+ {
+ "epoch": 0.17004782595104873,
+ "grad_norm": 0.6853480935096741,
+ "learning_rate": 0.00019841640355255043,
+ "loss": 1.6174,
+ "step": 170
+ },
+ {
+ "epoch": 0.17104810728017256,
+ "grad_norm": 0.735612690448761,
+ "learning_rate": 0.00019839776826715614,
+ "loss": 1.5085,
+ "step": 171
+ },
+ {
+ "epoch": 0.17204838860929636,
+ "grad_norm": 0.6735563278198242,
+ "learning_rate": 0.00019837902486040978,
+ "loss": 1.507,
+ "step": 172
+ },
+ {
+ "epoch": 0.1730486699384202,
+ "grad_norm": 0.6617917418479919,
+ "learning_rate": 0.0001983601733529069,
+ "loss": 1.6774,
+ "step": 173
+ },
+ {
+ "epoch": 0.174048951267544,
+ "grad_norm": 0.7137823700904846,
+ "learning_rate": 0.00019834121376536187,
+ "loss": 1.4665,
+ "step": 174
+ },
+ {
+ "epoch": 0.17504923259666783,
+ "grad_norm": 0.6372626423835754,
+ "learning_rate": 0.00019832214611860793,
+ "loss": 1.3597,
+ "step": 175
+ },
+ {
+ "epoch": 0.17604951392579163,
+ "grad_norm": 0.7131632566452026,
+ "learning_rate": 0.00019830297043359692,
+ "loss": 1.4833,
+ "step": 176
+ },
+ {
+ "epoch": 0.17704979525491543,
+ "grad_norm": 0.7538559436798096,
+ "learning_rate": 0.00019828368673139947,
+ "loss": 1.4714,
+ "step": 177
+ },
+ {
+ "epoch": 0.17805007658403926,
+ "grad_norm": 0.5684806108474731,
+ "learning_rate": 0.0001982642950332049,
+ "loss": 1.5012,
+ "step": 178
+ },
+ {
+ "epoch": 0.17905035791316307,
+ "grad_norm": 0.621658444404602,
+ "learning_rate": 0.00019824479536032112,
+ "loss": 1.9119,
+ "step": 179
+ },
+ {
+ "epoch": 0.1800506392422869,
+ "grad_norm": 0.6564679741859436,
+ "learning_rate": 0.0001982251877341748,
+ "loss": 1.5131,
+ "step": 180
+ },
+ {
+ "epoch": 0.1810509205714107,
+ "grad_norm": 0.6546526551246643,
+ "learning_rate": 0.00019820547217631117,
+ "loss": 1.4493,
+ "step": 181
+ },
+ {
+ "epoch": 0.18205120190053453,
+ "grad_norm": 0.6504479050636292,
+ "learning_rate": 0.00019818564870839405,
+ "loss": 1.6131,
+ "step": 182
+ },
+ {
+ "epoch": 0.18305148322965833,
+ "grad_norm": 0.6269803047180176,
+ "learning_rate": 0.00019816571735220583,
+ "loss": 1.5936,
+ "step": 183
+ },
+ {
+ "epoch": 0.18405176455878217,
+ "grad_norm": 0.6303942799568176,
+ "learning_rate": 0.00019814567812964748,
+ "loss": 1.6948,
+ "step": 184
+ },
+ {
+ "epoch": 0.18505204588790597,
+ "grad_norm": 0.6562885046005249,
+ "learning_rate": 0.00019812553106273847,
+ "loss": 1.5542,
+ "step": 185
+ },
+ {
+ "epoch": 0.1860523272170298,
+ "grad_norm": 0.5844212174415588,
+ "learning_rate": 0.00019810527617361681,
+ "loss": 1.539,
+ "step": 186
+ },
+ {
+ "epoch": 0.1870526085461536,
+ "grad_norm": 0.6402295231819153,
+ "learning_rate": 0.00019808491348453894,
+ "loss": 1.4748,
+ "step": 187
+ },
+ {
+ "epoch": 0.18805288987527743,
+ "grad_norm": 0.6579477190971375,
+ "learning_rate": 0.00019806444301787978,
+ "loss": 1.5114,
+ "step": 188
+ },
+ {
+ "epoch": 0.18905317120440124,
+ "grad_norm": 0.6511597037315369,
+ "learning_rate": 0.0001980438647961327,
+ "loss": 1.4678,
+ "step": 189
+ },
+ {
+ "epoch": 0.19005345253352504,
+ "grad_norm": 0.6911427974700928,
+ "learning_rate": 0.00019802317884190935,
+ "loss": 1.6876,
+ "step": 190
+ },
+ {
+ "epoch": 0.19105373386264887,
+ "grad_norm": 0.6146433353424072,
+ "learning_rate": 0.00019800238517793996,
+ "loss": 1.5986,
+ "step": 191
+ },
+ {
+ "epoch": 0.19205401519177268,
+ "grad_norm": 0.6126302480697632,
+ "learning_rate": 0.00019798148382707296,
+ "loss": 1.571,
+ "step": 192
+ },
+ {
+ "epoch": 0.1930542965208965,
+ "grad_norm": 0.5751072764396667,
+ "learning_rate": 0.00019796047481227515,
+ "loss": 1.4921,
+ "step": 193
+ },
+ {
+ "epoch": 0.1940545778500203,
+ "grad_norm": 0.6484839916229248,
+ "learning_rate": 0.00019793935815663163,
+ "loss": 1.7495,
+ "step": 194
+ },
+ {
+ "epoch": 0.19505485917914414,
+ "grad_norm": 0.6875973343849182,
+ "learning_rate": 0.00019791813388334581,
+ "loss": 1.5782,
+ "step": 195
+ },
+ {
+ "epoch": 0.19605514050826794,
+ "grad_norm": 0.8130943179130554,
+ "learning_rate": 0.00019789680201573933,
+ "loss": 1.4964,
+ "step": 196
+ },
+ {
+ "epoch": 0.19705542183739178,
+ "grad_norm": 0.6734403371810913,
+ "learning_rate": 0.00019787536257725202,
+ "loss": 1.4787,
+ "step": 197
+ },
+ {
+ "epoch": 0.19805570316651558,
+ "grad_norm": 0.6480582356452942,
+ "learning_rate": 0.00019785381559144196,
+ "loss": 1.5629,
+ "step": 198
+ },
+ {
+ "epoch": 0.1990559844956394,
+ "grad_norm": 0.6554624438285828,
+ "learning_rate": 0.00019783216108198542,
+ "loss": 1.5806,
+ "step": 199
+ },
+ {
+ "epoch": 0.2000562658247632,
+ "grad_norm": 0.705443263053894,
+ "learning_rate": 0.00019781039907267677,
+ "loss": 1.8372,
+ "step": 200
+ },
+ {
+ "epoch": 0.20105654715388704,
+ "grad_norm": 0.706923246383667,
+ "learning_rate": 0.00019778852958742853,
+ "loss": 1.6405,
+ "step": 201
+ },
+ {
+ "epoch": 0.20205682848301085,
+ "grad_norm": 0.7062544822692871,
+ "learning_rate": 0.00019776655265027127,
+ "loss": 1.6,
+ "step": 202
+ },
+ {
+ "epoch": 0.20305710981213465,
+ "grad_norm": 0.7227569222450256,
+ "learning_rate": 0.00019774446828535371,
+ "loss": 1.5172,
+ "step": 203
+ },
+ {
+ "epoch": 0.20405739114125848,
+ "grad_norm": 0.6762563586235046,
+ "learning_rate": 0.00019772227651694256,
+ "loss": 1.6753,
+ "step": 204
+ },
+ {
+ "epoch": 0.20505767247038229,
+ "grad_norm": 0.6048421859741211,
+ "learning_rate": 0.00019769997736942258,
+ "loss": 1.4827,
+ "step": 205
+ },
+ {
+ "epoch": 0.20605795379950612,
+ "grad_norm": 0.6002956032752991,
+ "learning_rate": 0.00019767757086729647,
+ "loss": 1.5438,
+ "step": 206
+ },
+ {
+ "epoch": 0.20705823512862992,
+ "grad_norm": 0.7948954701423645,
+ "learning_rate": 0.00019765505703518496,
+ "loss": 1.4988,
+ "step": 207
+ },
+ {
+ "epoch": 0.20805851645775375,
+ "grad_norm": 0.6495680809020996,
+ "learning_rate": 0.00019763243589782662,
+ "loss": 1.5738,
+ "step": 208
+ },
+ {
+ "epoch": 0.20905879778687755,
+ "grad_norm": 0.6413107514381409,
+ "learning_rate": 0.00019760970748007803,
+ "loss": 1.3794,
+ "step": 209
+ },
+ {
+ "epoch": 0.21005907911600138,
+ "grad_norm": 0.5999665260314941,
+ "learning_rate": 0.0001975868718069136,
+ "loss": 1.4313,
+ "step": 210
+ },
+ {
+ "epoch": 0.2110593604451252,
+ "grad_norm": 0.6355773210525513,
+ "learning_rate": 0.00019756392890342563,
+ "loss": 1.5107,
+ "step": 211
+ },
+ {
+ "epoch": 0.21205964177424902,
+ "grad_norm": 0.6068251729011536,
+ "learning_rate": 0.00019754087879482422,
+ "loss": 1.536,
+ "step": 212
+ },
+ {
+ "epoch": 0.21305992310337282,
+ "grad_norm": 0.5568909049034119,
+ "learning_rate": 0.00019751772150643722,
+ "loss": 1.5372,
+ "step": 213
+ },
+ {
+ "epoch": 0.21406020443249665,
+ "grad_norm": 0.5771281719207764,
+ "learning_rate": 0.00019749445706371038,
+ "loss": 1.487,
+ "step": 214
+ },
+ {
+ "epoch": 0.21506048576162046,
+ "grad_norm": 0.6146671772003174,
+ "learning_rate": 0.00019747108549220702,
+ "loss": 1.4585,
+ "step": 215
+ },
+ {
+ "epoch": 0.21606076709074426,
+ "grad_norm": 0.5595754981040955,
+ "learning_rate": 0.00019744760681760832,
+ "loss": 1.4224,
+ "step": 216
+ },
+ {
+ "epoch": 0.2170610484198681,
+ "grad_norm": 0.5873929858207703,
+ "learning_rate": 0.00019742402106571314,
+ "loss": 1.4581,
+ "step": 217
+ },
+ {
+ "epoch": 0.2180613297489919,
+ "grad_norm": 0.5725668668746948,
+ "learning_rate": 0.00019740032826243788,
+ "loss": 1.4393,
+ "step": 218
+ },
+ {
+ "epoch": 0.21906161107811573,
+ "grad_norm": 0.6452648043632507,
+ "learning_rate": 0.0001973765284338167,
+ "loss": 1.6048,
+ "step": 219
+ },
+ {
+ "epoch": 0.22006189240723953,
+ "grad_norm": 0.6166092753410339,
+ "learning_rate": 0.00019735262160600127,
+ "loss": 1.4976,
+ "step": 220
+ },
+ {
+ "epoch": 0.22106217373636336,
+ "grad_norm": 0.7053269147872925,
+ "learning_rate": 0.00019732860780526088,
+ "loss": 1.6882,
+ "step": 221
+ },
+ {
+ "epoch": 0.22206245506548716,
+ "grad_norm": 0.7072796821594238,
+ "learning_rate": 0.00019730448705798239,
+ "loss": 1.5441,
+ "step": 222
+ },
+ {
+ "epoch": 0.223062736394611,
+ "grad_norm": 0.6704496145248413,
+ "learning_rate": 0.00019728025939067008,
+ "loss": 1.3791,
+ "step": 223
+ },
+ {
+ "epoch": 0.2240630177237348,
+ "grad_norm": 0.6141743659973145,
+ "learning_rate": 0.00019725592482994583,
+ "loss": 1.5831,
+ "step": 224
+ },
+ {
+ "epoch": 0.22506329905285863,
+ "grad_norm": 0.6235673427581787,
+ "learning_rate": 0.00019723148340254892,
+ "loss": 1.6103,
+ "step": 225
+ },
+ {
+ "epoch": 0.22606358038198243,
+ "grad_norm": 0.6383673548698425,
+ "learning_rate": 0.00019720693513533598,
+ "loss": 1.6284,
+ "step": 226
+ },
+ {
+ "epoch": 0.22706386171110624,
+ "grad_norm": 0.7666104435920715,
+ "learning_rate": 0.00019718228005528122,
+ "loss": 1.702,
+ "step": 227
+ },
+ {
+ "epoch": 0.22806414304023007,
+ "grad_norm": 0.6431383490562439,
+ "learning_rate": 0.00019715751818947603,
+ "loss": 1.4571,
+ "step": 228
+ },
+ {
+ "epoch": 0.22906442436935387,
+ "grad_norm": 0.6177626252174377,
+ "learning_rate": 0.0001971326495651293,
+ "loss": 1.4326,
+ "step": 229
+ },
+ {
+ "epoch": 0.2300647056984777,
+ "grad_norm": 0.7352898120880127,
+ "learning_rate": 0.00019710767420956705,
+ "loss": 1.7427,
+ "step": 230
+ },
+ {
+ "epoch": 0.2310649870276015,
+ "grad_norm": 0.6259469389915466,
+ "learning_rate": 0.0001970825921502328,
+ "loss": 1.634,
+ "step": 231
+ },
+ {
+ "epoch": 0.23206526835672533,
+ "grad_norm": 0.6699635982513428,
+ "learning_rate": 0.0001970574034146871,
+ "loss": 1.4705,
+ "step": 232
+ },
+ {
+ "epoch": 0.23306554968584914,
+ "grad_norm": 0.5577033162117004,
+ "learning_rate": 0.00019703210803060782,
+ "loss": 1.5438,
+ "step": 233
+ },
+ {
+ "epoch": 0.23406583101497297,
+ "grad_norm": 0.6063429117202759,
+ "learning_rate": 0.00019700670602579008,
+ "loss": 1.555,
+ "step": 234
+ },
+ {
+ "epoch": 0.23506611234409677,
+ "grad_norm": 0.6069104671478271,
+ "learning_rate": 0.00019698119742814606,
+ "loss": 1.5036,
+ "step": 235
+ },
+ {
+ "epoch": 0.2360663936732206,
+ "grad_norm": 0.6158379316329956,
+ "learning_rate": 0.00019695558226570507,
+ "loss": 1.3741,
+ "step": 236
+ },
+ {
+ "epoch": 0.2370666750023444,
+ "grad_norm": 0.6366294622421265,
+ "learning_rate": 0.00019692986056661356,
+ "loss": 1.4467,
+ "step": 237
+ },
+ {
+ "epoch": 0.23806695633146824,
+ "grad_norm": 0.6726595163345337,
+ "learning_rate": 0.00019690403235913504,
+ "loss": 1.3861,
+ "step": 238
+ },
+ {
+ "epoch": 0.23906723766059204,
+ "grad_norm": 0.6546512842178345,
+ "learning_rate": 0.00019687809767165,
+ "loss": 1.6886,
+ "step": 239
+ },
+ {
+ "epoch": 0.24006751898971584,
+ "grad_norm": 0.6623121500015259,
+ "learning_rate": 0.000196852056532656,
+ "loss": 1.5925,
+ "step": 240
+ },
+ {
+ "epoch": 0.24106780031883968,
+ "grad_norm": 0.6577529311180115,
+ "learning_rate": 0.00019682590897076752,
+ "loss": 1.4509,
+ "step": 241
+ },
+ {
+ "epoch": 0.24206808164796348,
+ "grad_norm": 0.5586327314376831,
+ "learning_rate": 0.00019679965501471608,
+ "loss": 1.6346,
+ "step": 242
+ },
+ {
+ "epoch": 0.2430683629770873,
+ "grad_norm": 0.6459937691688538,
+ "learning_rate": 0.0001967732946933499,
+ "loss": 1.4129,
+ "step": 243
+ },
+ {
+ "epoch": 0.2440686443062111,
+ "grad_norm": 0.778732180595398,
+ "learning_rate": 0.00019674682803563428,
+ "loss": 1.5129,
+ "step": 244
+ },
+ {
+ "epoch": 0.24506892563533494,
+ "grad_norm": 0.7264451384544373,
+ "learning_rate": 0.00019672025507065131,
+ "loss": 1.4483,
+ "step": 245
+ },
+ {
+ "epoch": 0.24606920696445875,
+ "grad_norm": 0.616084635257721,
+ "learning_rate": 0.00019669357582759983,
+ "loss": 1.5947,
+ "step": 246
+ },
+ {
+ "epoch": 0.24706948829358258,
+ "grad_norm": 0.5911642909049988,
+ "learning_rate": 0.00019666679033579552,
+ "loss": 1.6407,
+ "step": 247
+ },
+ {
+ "epoch": 0.24806976962270638,
+ "grad_norm": 0.6102796792984009,
+ "learning_rate": 0.00019663989862467082,
+ "loss": 1.5251,
+ "step": 248
+ },
+ {
+ "epoch": 0.2490700509518302,
+ "grad_norm": 0.5973434448242188,
+ "learning_rate": 0.00019661290072377482,
+ "loss": 1.3969,
+ "step": 249
+ },
+ {
+ "epoch": 0.25007033228095404,
+ "grad_norm": 0.8515523076057434,
+ "learning_rate": 0.00019658579666277334,
+ "loss": 1.5687,
+ "step": 250
+ },
+ {
+ "epoch": 0.2510706136100778,
+ "grad_norm": 0.5003417134284973,
+ "learning_rate": 0.0001965585864714488,
+ "loss": 1.4102,
+ "step": 251
+ },
+ {
+ "epoch": 0.25207089493920165,
+ "grad_norm": 0.5215190052986145,
+ "learning_rate": 0.00019653127017970034,
+ "loss": 1.2471,
+ "step": 252
+ },
+ {
+ "epoch": 0.2530711762683255,
+ "grad_norm": 0.6491619348526001,
+ "learning_rate": 0.0001965038478175436,
+ "loss": 1.6969,
+ "step": 253
+ },
+ {
+ "epoch": 0.25407145759744926,
+ "grad_norm": 0.6176133155822754,
+ "learning_rate": 0.00019647631941511082,
+ "loss": 1.5351,
+ "step": 254
+ },
+ {
+ "epoch": 0.2550717389265731,
+ "grad_norm": 0.6913408041000366,
+ "learning_rate": 0.0001964486850026507,
+ "loss": 1.4309,
+ "step": 255
+ },
+ {
+ "epoch": 0.2560720202556969,
+ "grad_norm": 0.5875718593597412,
+ "learning_rate": 0.00019642094461052852,
+ "loss": 1.4679,
+ "step": 256
+ },
+ {
+ "epoch": 0.25707230158482075,
+ "grad_norm": 0.6682264804840088,
+ "learning_rate": 0.00019639309826922585,
+ "loss": 1.5393,
+ "step": 257
+ },
+ {
+ "epoch": 0.2580725829139445,
+ "grad_norm": 0.7241432666778564,
+ "learning_rate": 0.0001963651460093409,
+ "loss": 1.4998,
+ "step": 258
+ },
+ {
+ "epoch": 0.25907286424306836,
+ "grad_norm": 0.5210353136062622,
+ "learning_rate": 0.00019633708786158806,
+ "loss": 1.3837,
+ "step": 259
+ },
+ {
+ "epoch": 0.2600731455721922,
+ "grad_norm": 0.584020733833313,
+ "learning_rate": 0.00019630892385679818,
+ "loss": 1.4961,
+ "step": 260
+ },
+ {
+ "epoch": 0.261073426901316,
+ "grad_norm": 0.6708115935325623,
+ "learning_rate": 0.00019628065402591845,
+ "loss": 1.5277,
+ "step": 261
+ },
+ {
+ "epoch": 0.2620737082304398,
+ "grad_norm": 0.5480003952980042,
+ "learning_rate": 0.00019625227840001225,
+ "loss": 1.556,
+ "step": 262
+ },
+ {
+ "epoch": 0.2630739895595636,
+ "grad_norm": 0.595191478729248,
+ "learning_rate": 0.0001962237970102593,
+ "loss": 1.3514,
+ "step": 263
+ },
+ {
+ "epoch": 0.26407427088868746,
+ "grad_norm": 0.7332099080085754,
+ "learning_rate": 0.0001961952098879555,
+ "loss": 1.5394,
+ "step": 264
+ },
+ {
+ "epoch": 0.26507455221781123,
+ "grad_norm": 0.596319317817688,
+ "learning_rate": 0.00019616651706451287,
+ "loss": 1.3828,
+ "step": 265
+ },
+ {
+ "epoch": 0.26607483354693506,
+ "grad_norm": 0.5998026132583618,
+ "learning_rate": 0.0001961377185714597,
+ "loss": 1.4479,
+ "step": 266
+ },
+ {
+ "epoch": 0.2670751148760589,
+ "grad_norm": 0.6220220923423767,
+ "learning_rate": 0.0001961088144404403,
+ "loss": 1.5121,
+ "step": 267
+ },
+ {
+ "epoch": 0.2680753962051827,
+ "grad_norm": 0.5865943431854248,
+ "learning_rate": 0.00019607980470321505,
+ "loss": 1.6747,
+ "step": 268
+ },
+ {
+ "epoch": 0.2690756775343065,
+ "grad_norm": 0.5790852904319763,
+ "learning_rate": 0.00019605068939166045,
+ "loss": 1.3798,
+ "step": 269
+ },
+ {
+ "epoch": 0.27007595886343033,
+ "grad_norm": 0.6157498955726624,
+ "learning_rate": 0.00019602146853776894,
+ "loss": 1.6799,
+ "step": 270
+ },
+ {
+ "epoch": 0.27107624019255416,
+ "grad_norm": 0.6214422583580017,
+ "learning_rate": 0.000195992142173649,
+ "loss": 1.4782,
+ "step": 271
+ },
+ {
+ "epoch": 0.272076521521678,
+ "grad_norm": 0.6460129618644714,
+ "learning_rate": 0.0001959627103315249,
+ "loss": 1.4874,
+ "step": 272
+ },
+ {
+ "epoch": 0.27307680285080177,
+ "grad_norm": 0.5928930640220642,
+ "learning_rate": 0.00019593317304373705,
+ "loss": 1.4557,
+ "step": 273
+ },
+ {
+ "epoch": 0.2740770841799256,
+ "grad_norm": 0.5123687982559204,
+ "learning_rate": 0.00019590353034274144,
+ "loss": 1.445,
+ "step": 274
+ },
+ {
+ "epoch": 0.27507736550904943,
+ "grad_norm": 0.607455313205719,
+ "learning_rate": 0.00019587378226111014,
+ "loss": 1.4468,
+ "step": 275
+ },
+ {
+ "epoch": 0.27607764683817326,
+ "grad_norm": 0.6108120083808899,
+ "learning_rate": 0.00019584392883153088,
+ "loss": 1.3834,
+ "step": 276
+ },
+ {
+ "epoch": 0.27707792816729704,
+ "grad_norm": 0.680404543876648,
+ "learning_rate": 0.00019581397008680717,
+ "loss": 1.5094,
+ "step": 277
+ },
+ {
+ "epoch": 0.27807820949642087,
+ "grad_norm": 0.6419563889503479,
+ "learning_rate": 0.00019578390605985826,
+ "loss": 1.6933,
+ "step": 278
+ },
+ {
+ "epoch": 0.2790784908255447,
+ "grad_norm": 0.5788853764533997,
+ "learning_rate": 0.00019575373678371909,
+ "loss": 1.4754,
+ "step": 279
+ },
+ {
+ "epoch": 0.2800787721546685,
+ "grad_norm": 0.5943770408630371,
+ "learning_rate": 0.00019572346229154025,
+ "loss": 1.2949,
+ "step": 280
+ },
+ {
+ "epoch": 0.2810790534837923,
+ "grad_norm": 0.5997135043144226,
+ "learning_rate": 0.00019569308261658787,
+ "loss": 1.5365,
+ "step": 281
+ },
+ {
+ "epoch": 0.28207933481291614,
+ "grad_norm": 0.692401647567749,
+ "learning_rate": 0.00019566259779224378,
+ "loss": 1.4946,
+ "step": 282
+ },
+ {
+ "epoch": 0.28307961614203997,
+ "grad_norm": 0.5856708884239197,
+ "learning_rate": 0.00019563200785200526,
+ "loss": 1.426,
+ "step": 283
+ },
+ {
+ "epoch": 0.28407989747116374,
+ "grad_norm": 1.2516822814941406,
+ "learning_rate": 0.00019560131282948516,
+ "loss": 1.5119,
+ "step": 284
+ },
+ {
+ "epoch": 0.2850801788002876,
+ "grad_norm": 0.6360501050949097,
+ "learning_rate": 0.0001955705127584117,
+ "loss": 1.3916,
+ "step": 285
+ },
+ {
+ "epoch": 0.2860804601294114,
+ "grad_norm": 0.6822036504745483,
+ "learning_rate": 0.00019553960767262863,
+ "loss": 1.5565,
+ "step": 286
+ },
+ {
+ "epoch": 0.28708074145853524,
+ "grad_norm": 0.6973714828491211,
+ "learning_rate": 0.00019550859760609503,
+ "loss": 1.5559,
+ "step": 287
+ },
+ {
+ "epoch": 0.288081022787659,
+ "grad_norm": 0.6595618724822998,
+ "learning_rate": 0.00019547748259288536,
+ "loss": 1.5824,
+ "step": 288
+ },
+ {
+ "epoch": 0.28908130411678284,
+ "grad_norm": 0.5625808238983154,
+ "learning_rate": 0.0001954462626671894,
+ "loss": 1.2669,
+ "step": 289
+ },
+ {
+ "epoch": 0.2900815854459067,
+ "grad_norm": 0.6318663358688354,
+ "learning_rate": 0.0001954149378633122,
+ "loss": 1.3896,
+ "step": 290
+ },
+ {
+ "epoch": 0.29108186677503045,
+ "grad_norm": 0.6655906438827515,
+ "learning_rate": 0.00019538350821567404,
+ "loss": 1.3889,
+ "step": 291
+ },
+ {
+ "epoch": 0.2920821481041543,
+ "grad_norm": 0.5947337746620178,
+ "learning_rate": 0.00019535197375881045,
+ "loss": 1.6112,
+ "step": 292
+ },
+ {
+ "epoch": 0.2930824294332781,
+ "grad_norm": 0.6139295101165771,
+ "learning_rate": 0.00019532033452737205,
+ "loss": 1.5185,
+ "step": 293
+ },
+ {
+ "epoch": 0.29408271076240194,
+ "grad_norm": 0.579953670501709,
+ "learning_rate": 0.00019528859055612468,
+ "loss": 1.3874,
+ "step": 294
+ },
+ {
+ "epoch": 0.2950829920915257,
+ "grad_norm": 0.6101506352424622,
+ "learning_rate": 0.0001952567418799492,
+ "loss": 1.5965,
+ "step": 295
+ },
+ {
+ "epoch": 0.29608327342064955,
+ "grad_norm": 0.6393965482711792,
+ "learning_rate": 0.00019522478853384155,
+ "loss": 1.4124,
+ "step": 296
+ },
+ {
+ "epoch": 0.2970835547497734,
+ "grad_norm": 0.6147856712341309,
+ "learning_rate": 0.00019519273055291266,
+ "loss": 1.3776,
+ "step": 297
+ },
+ {
+ "epoch": 0.2980838360788972,
+ "grad_norm": 0.6056416630744934,
+ "learning_rate": 0.00019516056797238846,
+ "loss": 1.4453,
+ "step": 298
+ },
+ {
+ "epoch": 0.299084117408021,
+ "grad_norm": 0.6705831289291382,
+ "learning_rate": 0.00019512830082760987,
+ "loss": 1.3248,
+ "step": 299
+ },
+ {
+ "epoch": 0.3000843987371448,
+ "grad_norm": 0.6664314866065979,
+ "learning_rate": 0.00019509592915403255,
+ "loss": 1.5865,
+ "step": 300
+ },
+ {
+ "epoch": 0.30108468006626865,
+ "grad_norm": 0.5325604677200317,
+ "learning_rate": 0.00019506345298722717,
+ "loss": 1.0646,
+ "step": 301
+ },
+ {
+ "epoch": 0.3020849613953925,
+ "grad_norm": 0.589242160320282,
+ "learning_rate": 0.00019503087236287913,
+ "loss": 1.2297,
+ "step": 302
+ },
+ {
+ "epoch": 0.30308524272451626,
+ "grad_norm": 0.5677699446678162,
+ "learning_rate": 0.00019499818731678873,
+ "loss": 1.3961,
+ "step": 303
+ },
+ {
+ "epoch": 0.3040855240536401,
+ "grad_norm": 0.5676394701004028,
+ "learning_rate": 0.00019496539788487082,
+ "loss": 1.3276,
+ "step": 304
+ },
+ {
+ "epoch": 0.3050858053827639,
+ "grad_norm": 0.7280861139297485,
+ "learning_rate": 0.0001949325041031551,
+ "loss": 1.6731,
+ "step": 305
+ },
+ {
+ "epoch": 0.3060860867118877,
+ "grad_norm": 0.690636396408081,
+ "learning_rate": 0.0001948995060077859,
+ "loss": 1.5443,
+ "step": 306
+ },
+ {
+ "epoch": 0.3070863680410115,
+ "grad_norm": 0.611426055431366,
+ "learning_rate": 0.0001948664036350221,
+ "loss": 1.5827,
+ "step": 307
+ },
+ {
+ "epoch": 0.30808664937013536,
+ "grad_norm": 0.7112497091293335,
+ "learning_rate": 0.00019483319702123732,
+ "loss": 1.5401,
+ "step": 308
+ },
+ {
+ "epoch": 0.3090869306992592,
+ "grad_norm": 0.6598275303840637,
+ "learning_rate": 0.00019479988620291956,
+ "loss": 1.6432,
+ "step": 309
+ },
+ {
+ "epoch": 0.31008721202838296,
+ "grad_norm": 0.5019932985305786,
+ "learning_rate": 0.00019476647121667137,
+ "loss": 1.2561,
+ "step": 310
+ },
+ {
+ "epoch": 0.3110874933575068,
+ "grad_norm": 0.7777897715568542,
+ "learning_rate": 0.00019473295209920983,
+ "loss": 1.6118,
+ "step": 311
+ },
+ {
+ "epoch": 0.3120877746866306,
+ "grad_norm": 0.6028640866279602,
+ "learning_rate": 0.00019469932888736632,
+ "loss": 1.4682,
+ "step": 312
+ },
+ {
+ "epoch": 0.31308805601575446,
+ "grad_norm": 0.554381251335144,
+ "learning_rate": 0.00019466560161808674,
+ "loss": 1.4179,
+ "step": 313
+ },
+ {
+ "epoch": 0.31408833734487823,
+ "grad_norm": 0.6212736368179321,
+ "learning_rate": 0.00019463177032843124,
+ "loss": 1.4327,
+ "step": 314
+ },
+ {
+ "epoch": 0.31508861867400206,
+ "grad_norm": 0.6829814910888672,
+ "learning_rate": 0.00019459783505557424,
+ "loss": 1.4455,
+ "step": 315
+ },
+ {
+ "epoch": 0.3160889000031259,
+ "grad_norm": 0.5808065533638,
+ "learning_rate": 0.00019456379583680452,
+ "loss": 1.3583,
+ "step": 316
+ },
+ {
+ "epoch": 0.31708918133224967,
+ "grad_norm": 0.6354159712791443,
+ "learning_rate": 0.000194529652709525,
+ "loss": 1.6916,
+ "step": 317
+ },
+ {
+ "epoch": 0.3180894626613735,
+ "grad_norm": 0.6299159526824951,
+ "learning_rate": 0.00019449540571125286,
+ "loss": 1.47,
+ "step": 318
+ },
+ {
+ "epoch": 0.31908974399049733,
+ "grad_norm": 0.6222877502441406,
+ "learning_rate": 0.00019446105487961926,
+ "loss": 1.4137,
+ "step": 319
+ },
+ {
+ "epoch": 0.32009002531962116,
+ "grad_norm": 0.5995916724205017,
+ "learning_rate": 0.0001944266002523696,
+ "loss": 1.3679,
+ "step": 320
+ },
+ {
+ "epoch": 0.32109030664874494,
+ "grad_norm": 0.599814236164093,
+ "learning_rate": 0.0001943920418673633,
+ "loss": 1.4075,
+ "step": 321
+ },
+ {
+ "epoch": 0.32209058797786877,
+ "grad_norm": 0.5409269332885742,
+ "learning_rate": 0.00019435737976257377,
+ "loss": 1.4289,
+ "step": 322
+ },
+ {
+ "epoch": 0.3230908693069926,
+ "grad_norm": 0.5298951864242554,
+ "learning_rate": 0.00019432261397608834,
+ "loss": 1.2834,
+ "step": 323
+ },
+ {
+ "epoch": 0.32409115063611643,
+ "grad_norm": 0.7196112871170044,
+ "learning_rate": 0.00019428774454610843,
+ "loss": 1.4845,
+ "step": 324
+ },
+ {
+ "epoch": 0.3250914319652402,
+ "grad_norm": 0.5605450868606567,
+ "learning_rate": 0.00019425277151094913,
+ "loss": 1.4575,
+ "step": 325
+ },
+ {
+ "epoch": 0.32609171329436404,
+ "grad_norm": 0.573080837726593,
+ "learning_rate": 0.00019421769490903957,
+ "loss": 1.5757,
+ "step": 326
+ },
+ {
+ "epoch": 0.32709199462348787,
+ "grad_norm": 0.5017902851104736,
+ "learning_rate": 0.0001941825147789225,
+ "loss": 1.5794,
+ "step": 327
+ },
+ {
+ "epoch": 0.32809227595261165,
+ "grad_norm": 0.643267810344696,
+ "learning_rate": 0.00019414723115925456,
+ "loss": 1.4903,
+ "step": 328
+ },
+ {
+ "epoch": 0.3290925572817355,
+ "grad_norm": 0.6522070169448853,
+ "learning_rate": 0.0001941118440888061,
+ "loss": 1.5907,
+ "step": 329
+ },
+ {
+ "epoch": 0.3300928386108593,
+ "grad_norm": 0.6496105790138245,
+ "learning_rate": 0.0001940763536064611,
+ "loss": 1.4225,
+ "step": 330
+ },
+ {
+ "epoch": 0.33109311993998314,
+ "grad_norm": 0.6011468768119812,
+ "learning_rate": 0.00019404075975121716,
+ "loss": 1.5022,
+ "step": 331
+ },
+ {
+ "epoch": 0.3320934012691069,
+ "grad_norm": 0.6327878832817078,
+ "learning_rate": 0.0001940050625621855,
+ "loss": 1.468,
+ "step": 332
+ },
+ {
+ "epoch": 0.33309368259823074,
+ "grad_norm": 0.6187490820884705,
+ "learning_rate": 0.00019396926207859084,
+ "loss": 1.5183,
+ "step": 333
+ },
+ {
+ "epoch": 0.3340939639273546,
+ "grad_norm": 0.7625093460083008,
+ "learning_rate": 0.0001939333583397715,
+ "loss": 1.4813,
+ "step": 334
+ },
+ {
+ "epoch": 0.3350942452564784,
+ "grad_norm": 0.5286359190940857,
+ "learning_rate": 0.00019389735138517915,
+ "loss": 1.3674,
+ "step": 335
+ },
+ {
+ "epoch": 0.3360945265856022,
+ "grad_norm": 0.5798503160476685,
+ "learning_rate": 0.00019386124125437895,
+ "loss": 1.3016,
+ "step": 336
+ },
+ {
+ "epoch": 0.337094807914726,
+ "grad_norm": 0.48794126510620117,
+ "learning_rate": 0.00019382502798704935,
+ "loss": 1.3642,
+ "step": 337
+ },
+ {
+ "epoch": 0.33809508924384984,
+ "grad_norm": 0.7394312620162964,
+ "learning_rate": 0.00019378871162298227,
+ "loss": 1.327,
+ "step": 338
+ },
+ {
+ "epoch": 0.3390953705729737,
+ "grad_norm": 0.5598319172859192,
+ "learning_rate": 0.00019375229220208276,
+ "loss": 1.4247,
+ "step": 339
+ },
+ {
+ "epoch": 0.34009565190209745,
+ "grad_norm": 0.6099628806114197,
+ "learning_rate": 0.00019371576976436917,
+ "loss": 1.4906,
+ "step": 340
+ },
+ {
+ "epoch": 0.3410959332312213,
+ "grad_norm": 0.6749781370162964,
+ "learning_rate": 0.00019367914434997312,
+ "loss": 1.367,
+ "step": 341
+ },
+ {
+ "epoch": 0.3420962145603451,
+ "grad_norm": 0.7721238136291504,
+ "learning_rate": 0.00019364241599913924,
+ "loss": 1.4464,
+ "step": 342
+ },
+ {
+ "epoch": 0.3430964958894689,
+ "grad_norm": 0.5762369632720947,
+ "learning_rate": 0.0001936055847522254,
+ "loss": 1.409,
+ "step": 343
+ },
+ {
+ "epoch": 0.3440967772185927,
+ "grad_norm": 0.6960498690605164,
+ "learning_rate": 0.00019356865064970244,
+ "loss": 1.3907,
+ "step": 344
+ },
+ {
+ "epoch": 0.34509705854771655,
+ "grad_norm": 0.5805984735488892,
+ "learning_rate": 0.0001935316137321543,
+ "loss": 1.4539,
+ "step": 345
+ },
+ {
+ "epoch": 0.3460973398768404,
+ "grad_norm": 0.5686045289039612,
+ "learning_rate": 0.00019349447404027782,
+ "loss": 1.4493,
+ "step": 346
+ },
+ {
+ "epoch": 0.34709762120596416,
+ "grad_norm": 0.5448501706123352,
+ "learning_rate": 0.00019345723161488283,
+ "loss": 1.5633,
+ "step": 347
+ },
+ {
+ "epoch": 0.348097902535088,
+ "grad_norm": 0.6388784050941467,
+ "learning_rate": 0.000193419886496892,
+ "loss": 1.7179,
+ "step": 348
+ },
+ {
+ "epoch": 0.3490981838642118,
+ "grad_norm": 0.5240457653999329,
+ "learning_rate": 0.00019338243872734086,
+ "loss": 1.4411,
+ "step": 349
+ },
+ {
+ "epoch": 0.35009846519333565,
+ "grad_norm": 0.5460641384124756,
+ "learning_rate": 0.00019334488834737775,
+ "loss": 1.361,
+ "step": 350
+ },
+ {
+ "epoch": 0.3510987465224594,
+ "grad_norm": 0.5495695471763611,
+ "learning_rate": 0.00019330723539826375,
+ "loss": 1.5891,
+ "step": 351
+ },
+ {
+ "epoch": 0.35209902785158326,
+ "grad_norm": 0.5618153214454651,
+ "learning_rate": 0.00019326947992137262,
+ "loss": 1.3084,
+ "step": 352
+ },
+ {
+ "epoch": 0.3530993091807071,
+ "grad_norm": 0.5603707432746887,
+ "learning_rate": 0.00019323162195819082,
+ "loss": 1.5732,
+ "step": 353
+ },
+ {
+ "epoch": 0.35409959050983086,
+ "grad_norm": 0.5732563138008118,
+ "learning_rate": 0.0001931936615503174,
+ "loss": 1.5045,
+ "step": 354
+ },
+ {
+ "epoch": 0.3550998718389547,
+ "grad_norm": 0.5997583866119385,
+ "learning_rate": 0.000193155598739464,
+ "loss": 1.4175,
+ "step": 355
+ },
+ {
+ "epoch": 0.3561001531680785,
+ "grad_norm": 0.5769765377044678,
+ "learning_rate": 0.0001931174335674547,
+ "loss": 1.4834,
+ "step": 356
+ },
+ {
+ "epoch": 0.35710043449720236,
+ "grad_norm": 0.5902683138847351,
+ "learning_rate": 0.0001930791660762262,
+ "loss": 1.4664,
+ "step": 357
+ },
+ {
+ "epoch": 0.35810071582632613,
+ "grad_norm": 0.6354758143424988,
+ "learning_rate": 0.00019304079630782752,
+ "loss": 1.3891,
+ "step": 358
+ },
+ {
+ "epoch": 0.35910099715544996,
+ "grad_norm": 0.6018317341804504,
+ "learning_rate": 0.0001930023243044201,
+ "loss": 1.4514,
+ "step": 359
+ },
+ {
+ "epoch": 0.3601012784845738,
+ "grad_norm": 0.5409123301506042,
+ "learning_rate": 0.00019296375010827773,
+ "loss": 1.4708,
+ "step": 360
+ },
+ {
+ "epoch": 0.3611015598136976,
+ "grad_norm": 0.5457523465156555,
+ "learning_rate": 0.00019292507376178643,
+ "loss": 1.4988,
+ "step": 361
+ },
+ {
+ "epoch": 0.3621018411428214,
+ "grad_norm": 0.626768946647644,
+ "learning_rate": 0.00019288629530744454,
+ "loss": 1.5722,
+ "step": 362
+ },
+ {
+ "epoch": 0.36310212247194523,
+ "grad_norm": 0.566554069519043,
+ "learning_rate": 0.0001928474147878626,
+ "loss": 1.2135,
+ "step": 363
+ },
+ {
+ "epoch": 0.36410240380106906,
+ "grad_norm": 0.7327786684036255,
+ "learning_rate": 0.0001928084322457632,
+ "loss": 1.5245,
+ "step": 364
+ },
+ {
+ "epoch": 0.3651026851301929,
+ "grad_norm": 0.5205698609352112,
+ "learning_rate": 0.00019276934772398114,
+ "loss": 1.2068,
+ "step": 365
+ },
+ {
+ "epoch": 0.36610296645931667,
+ "grad_norm": 1.0956753492355347,
+ "learning_rate": 0.00019273016126546323,
+ "loss": 1.5044,
+ "step": 366
+ },
+ {
+ "epoch": 0.3671032477884405,
+ "grad_norm": 0.6484043598175049,
+ "learning_rate": 0.00019269087291326833,
+ "loss": 1.6369,
+ "step": 367
+ },
+ {
+ "epoch": 0.36810352911756433,
+ "grad_norm": 0.6363429427146912,
+ "learning_rate": 0.00019265148271056722,
+ "loss": 1.4338,
+ "step": 368
+ },
+ {
+ "epoch": 0.3691038104466881,
+ "grad_norm": 0.6295244693756104,
+ "learning_rate": 0.0001926119907006426,
+ "loss": 1.4701,
+ "step": 369
+ },
+ {
+ "epoch": 0.37010409177581194,
+ "grad_norm": 0.6013259887695312,
+ "learning_rate": 0.00019257239692688907,
+ "loss": 1.7629,
+ "step": 370
+ },
+ {
+ "epoch": 0.37110437310493577,
+ "grad_norm": 0.6949493885040283,
+ "learning_rate": 0.00019253270143281296,
+ "loss": 1.6713,
+ "step": 371
+ },
+ {
+ "epoch": 0.3721046544340596,
+ "grad_norm": 0.6933801174163818,
+ "learning_rate": 0.00019249290426203252,
+ "loss": 1.6131,
+ "step": 372
+ },
+ {
+ "epoch": 0.3731049357631834,
+ "grad_norm": 0.5847527384757996,
+ "learning_rate": 0.0001924530054582776,
+ "loss": 1.3968,
+ "step": 373
+ },
+ {
+ "epoch": 0.3741052170923072,
+ "grad_norm": 0.6053057312965393,
+ "learning_rate": 0.0001924130050653898,
+ "loss": 1.3311,
+ "step": 374
+ },
+ {
+ "epoch": 0.37510549842143104,
+ "grad_norm": 0.5513793230056763,
+ "learning_rate": 0.00019237290312732226,
+ "loss": 1.5063,
+ "step": 375
+ },
+ {
+ "epoch": 0.37610577975055487,
+ "grad_norm": 0.5859197378158569,
+ "learning_rate": 0.00019233269968813984,
+ "loss": 1.3556,
+ "step": 376
+ },
+ {
+ "epoch": 0.37710606107967864,
+ "grad_norm": 0.5623495578765869,
+ "learning_rate": 0.00019229239479201876,
+ "loss": 1.3859,
+ "step": 377
+ },
+ {
+ "epoch": 0.3781063424088025,
+ "grad_norm": 0.602118968963623,
+ "learning_rate": 0.0001922519884832469,
+ "loss": 1.334,
+ "step": 378
+ },
+ {
+ "epoch": 0.3791066237379263,
+ "grad_norm": 0.5212380886077881,
+ "learning_rate": 0.0001922114808062234,
+ "loss": 1.401,
+ "step": 379
+ },
+ {
+ "epoch": 0.3801069050670501,
+ "grad_norm": 0.4969455599784851,
+ "learning_rate": 0.00019217087180545893,
+ "loss": 1.2292,
+ "step": 380
+ },
+ {
+ "epoch": 0.3811071863961739,
+ "grad_norm": 0.578629732131958,
+ "learning_rate": 0.0001921301615255754,
+ "loss": 1.5015,
+ "step": 381
+ },
+ {
+ "epoch": 0.38210746772529774,
+ "grad_norm": 0.593053936958313,
+ "learning_rate": 0.0001920893500113061,
+ "loss": 1.302,
+ "step": 382
+ },
+ {
+ "epoch": 0.3831077490544216,
+ "grad_norm": 0.5832563638687134,
+ "learning_rate": 0.00019204843730749547,
+ "loss": 1.3695,
+ "step": 383
+ },
+ {
+ "epoch": 0.38410803038354535,
+ "grad_norm": 0.5608510375022888,
+ "learning_rate": 0.00019200742345909915,
+ "loss": 1.3792,
+ "step": 384
+ },
+ {
+ "epoch": 0.3851083117126692,
+ "grad_norm": 0.5337334275245667,
+ "learning_rate": 0.00019196630851118398,
+ "loss": 1.4163,
+ "step": 385
+ },
+ {
+ "epoch": 0.386108593041793,
+ "grad_norm": 0.5460125803947449,
+ "learning_rate": 0.0001919250925089278,
+ "loss": 1.2439,
+ "step": 386
+ },
+ {
+ "epoch": 0.38710887437091684,
+ "grad_norm": 0.6217851638793945,
+ "learning_rate": 0.00019188377549761963,
+ "loss": 1.6428,
+ "step": 387
+ },
+ {
+ "epoch": 0.3881091557000406,
+ "grad_norm": 0.7154502868652344,
+ "learning_rate": 0.00019184235752265928,
+ "loss": 1.3468,
+ "step": 388
+ },
+ {
+ "epoch": 0.38910943702916445,
+ "grad_norm": 0.5044635534286499,
+ "learning_rate": 0.00019180083862955772,
+ "loss": 1.1877,
+ "step": 389
+ },
+ {
+ "epoch": 0.3901097183582883,
+ "grad_norm": 0.5755971074104309,
+ "learning_rate": 0.00019175921886393666,
+ "loss": 1.3475,
+ "step": 390
+ },
+ {
+ "epoch": 0.39110999968741206,
+ "grad_norm": 0.6121137738227844,
+ "learning_rate": 0.00019171749827152869,
+ "loss": 1.4342,
+ "step": 391
+ },
+ {
+ "epoch": 0.3921102810165359,
+ "grad_norm": 0.5615536570549011,
+ "learning_rate": 0.0001916756768981772,
+ "loss": 1.5471,
+ "step": 392
+ },
+ {
+ "epoch": 0.3931105623456597,
+ "grad_norm": 0.6527026295661926,
+ "learning_rate": 0.00019163375478983632,
+ "loss": 1.6363,
+ "step": 393
+ },
+ {
+ "epoch": 0.39411084367478355,
+ "grad_norm": 0.6465044617652893,
+ "learning_rate": 0.00019159173199257085,
+ "loss": 1.3823,
+ "step": 394
+ },
+ {
+ "epoch": 0.3951111250039073,
+ "grad_norm": 0.5620000958442688,
+ "learning_rate": 0.00019154960855255628,
+ "loss": 1.5418,
+ "step": 395
+ },
+ {
+ "epoch": 0.39611140633303116,
+ "grad_norm": 0.7090588808059692,
+ "learning_rate": 0.0001915073845160786,
+ "loss": 1.4593,
+ "step": 396
+ },
+ {
+ "epoch": 0.397111687662155,
+ "grad_norm": 0.6644489169120789,
+ "learning_rate": 0.00019146505992953446,
+ "loss": 1.4236,
+ "step": 397
+ },
+ {
+ "epoch": 0.3981119689912788,
+ "grad_norm": 0.6038135886192322,
+ "learning_rate": 0.00019142263483943085,
+ "loss": 1.1805,
+ "step": 398
+ },
+ {
+ "epoch": 0.3991122503204026,
+ "grad_norm": 0.6746726036071777,
+ "learning_rate": 0.00019138010929238534,
+ "loss": 1.5264,
+ "step": 399
+ },
+ {
+ "epoch": 0.4001125316495264,
+ "grad_norm": 0.5871374607086182,
+ "learning_rate": 0.00019133748333512575,
+ "loss": 1.3709,
+ "step": 400
+ },
+ {
+ "epoch": 0.40111281297865026,
+ "grad_norm": 0.5743412375450134,
+ "learning_rate": 0.00019129475701449035,
+ "loss": 1.4677,
+ "step": 401
+ },
+ {
+ "epoch": 0.4021130943077741,
+ "grad_norm": 0.6184396743774414,
+ "learning_rate": 0.0001912519303774276,
+ "loss": 1.4228,
+ "step": 402
+ },
+ {
+ "epoch": 0.40311337563689786,
+ "grad_norm": 0.5872434973716736,
+ "learning_rate": 0.0001912090034709963,
+ "loss": 1.3495,
+ "step": 403
+ },
+ {
+ "epoch": 0.4041136569660217,
+ "grad_norm": 0.6500155925750732,
+ "learning_rate": 0.00019116597634236525,
+ "loss": 1.4315,
+ "step": 404
+ },
+ {
+ "epoch": 0.4051139382951455,
+ "grad_norm": 0.5240740180015564,
+ "learning_rate": 0.0001911228490388136,
+ "loss": 1.4954,
+ "step": 405
+ },
+ {
+ "epoch": 0.4061142196242693,
+ "grad_norm": 0.5531806945800781,
+ "learning_rate": 0.00019107962160773035,
+ "loss": 1.3949,
+ "step": 406
+ },
+ {
+ "epoch": 0.40711450095339313,
+ "grad_norm": 0.5266262888908386,
+ "learning_rate": 0.0001910362940966147,
+ "loss": 1.2859,
+ "step": 407
+ },
+ {
+ "epoch": 0.40811478228251696,
+ "grad_norm": 0.5734869241714478,
+ "learning_rate": 0.00019099286655307568,
+ "loss": 1.2451,
+ "step": 408
+ },
+ {
+ "epoch": 0.4091150636116408,
+ "grad_norm": 0.5922874212265015,
+ "learning_rate": 0.0001909493390248324,
+ "loss": 1.5429,
+ "step": 409
+ },
+ {
+ "epoch": 0.41011534494076457,
+ "grad_norm": 0.542540431022644,
+ "learning_rate": 0.00019090571155971366,
+ "loss": 1.4138,
+ "step": 410
+ },
+ {
+ "epoch": 0.4111156262698884,
+ "grad_norm": 0.57356196641922,
+ "learning_rate": 0.00019086198420565823,
+ "loss": 1.2592,
+ "step": 411
+ },
+ {
+ "epoch": 0.41211590759901223,
+ "grad_norm": 0.6042733192443848,
+ "learning_rate": 0.00019081815701071445,
+ "loss": 1.5524,
+ "step": 412
+ },
+ {
+ "epoch": 0.41311618892813606,
+ "grad_norm": 0.46550241112709045,
+ "learning_rate": 0.0001907742300230406,
+ "loss": 1.308,
+ "step": 413
+ },
+ {
+ "epoch": 0.41411647025725984,
+ "grad_norm": 0.6283137798309326,
+ "learning_rate": 0.00019073020329090444,
+ "loss": 1.4753,
+ "step": 414
+ },
+ {
+ "epoch": 0.41511675158638367,
+ "grad_norm": 0.5254876613616943,
+ "learning_rate": 0.0001906860768626834,
+ "loss": 1.2157,
+ "step": 415
+ },
+ {
+ "epoch": 0.4161170329155075,
+ "grad_norm": 0.59089195728302,
+ "learning_rate": 0.00019064185078686443,
+ "loss": 1.2684,
+ "step": 416
+ },
+ {
+ "epoch": 0.4171173142446313,
+ "grad_norm": 0.7129126787185669,
+ "learning_rate": 0.000190597525112044,
+ "loss": 1.3974,
+ "step": 417
+ },
+ {
+ "epoch": 0.4181175955737551,
+ "grad_norm": 0.607305109500885,
+ "learning_rate": 0.000190553099886928,
+ "loss": 1.4312,
+ "step": 418
+ },
+ {
+ "epoch": 0.41911787690287894,
+ "grad_norm": 0.49921515583992004,
+ "learning_rate": 0.00019050857516033173,
+ "loss": 1.3469,
+ "step": 419
+ },
+ {
+ "epoch": 0.42011815823200277,
+ "grad_norm": 0.6167325377464294,
+ "learning_rate": 0.00019046395098117983,
+ "loss": 1.4723,
+ "step": 420
+ },
+ {
+ "epoch": 0.42111843956112655,
+ "grad_norm": 0.6144593358039856,
+ "learning_rate": 0.00019041922739850616,
+ "loss": 1.5502,
+ "step": 421
+ },
+ {
+ "epoch": 0.4221187208902504,
+ "grad_norm": 0.61333167552948,
+ "learning_rate": 0.00019037440446145385,
+ "loss": 1.3283,
+ "step": 422
+ },
+ {
+ "epoch": 0.4231190022193742,
+ "grad_norm": 0.5881702303886414,
+ "learning_rate": 0.00019032948221927524,
+ "loss": 1.4206,
+ "step": 423
+ },
+ {
+ "epoch": 0.42411928354849804,
+ "grad_norm": 0.5334322452545166,
+ "learning_rate": 0.00019028446072133175,
+ "loss": 1.4603,
+ "step": 424
+ },
+ {
+ "epoch": 0.4251195648776218,
+ "grad_norm": 0.5730605721473694,
+ "learning_rate": 0.00019023934001709383,
+ "loss": 1.4375,
+ "step": 425
+ },
+ {
+ "epoch": 0.42611984620674564,
+ "grad_norm": 0.6227820515632629,
+ "learning_rate": 0.00019019412015614098,
+ "loss": 1.4888,
+ "step": 426
+ },
+ {
+ "epoch": 0.4271201275358695,
+ "grad_norm": 0.5811313390731812,
+ "learning_rate": 0.00019014880118816164,
+ "loss": 1.3492,
+ "step": 427
+ },
+ {
+ "epoch": 0.4281204088649933,
+ "grad_norm": 0.5685800313949585,
+ "learning_rate": 0.0001901033831629532,
+ "loss": 1.5052,
+ "step": 428
+ },
+ {
+ "epoch": 0.4291206901941171,
+ "grad_norm": 0.5961394309997559,
+ "learning_rate": 0.00019005786613042185,
+ "loss": 1.3324,
+ "step": 429
+ },
+ {
+ "epoch": 0.4301209715232409,
+ "grad_norm": 0.5845314860343933,
+ "learning_rate": 0.00019001225014058255,
+ "loss": 1.5733,
+ "step": 430
+ },
+ {
+ "epoch": 0.43112125285236474,
+ "grad_norm": 0.5400176048278809,
+ "learning_rate": 0.00018996653524355902,
+ "loss": 1.3973,
+ "step": 431
+ },
+ {
+ "epoch": 0.4321215341814885,
+ "grad_norm": 0.5462201833724976,
+ "learning_rate": 0.00018992072148958368,
+ "loss": 1.2167,
+ "step": 432
+ },
+ {
+ "epoch": 0.43312181551061235,
+ "grad_norm": 0.6200360059738159,
+ "learning_rate": 0.00018987480892899758,
+ "loss": 1.5596,
+ "step": 433
+ },
+ {
+ "epoch": 0.4341220968397362,
+ "grad_norm": 0.5230718851089478,
+ "learning_rate": 0.00018982879761225027,
+ "loss": 1.3661,
+ "step": 434
+ },
+ {
+ "epoch": 0.43512237816886,
+ "grad_norm": 0.5868643522262573,
+ "learning_rate": 0.00018978268758989991,
+ "loss": 1.4792,
+ "step": 435
+ },
+ {
+ "epoch": 0.4361226594979838,
+ "grad_norm": 0.580892026424408,
+ "learning_rate": 0.00018973647891261307,
+ "loss": 1.3275,
+ "step": 436
+ },
+ {
+ "epoch": 0.4371229408271076,
+ "grad_norm": 0.5903263688087463,
+ "learning_rate": 0.00018969017163116472,
+ "loss": 1.4721,
+ "step": 437
+ },
+ {
+ "epoch": 0.43812322215623145,
+ "grad_norm": 0.5108968019485474,
+ "learning_rate": 0.0001896437657964382,
+ "loss": 1.3785,
+ "step": 438
+ },
+ {
+ "epoch": 0.4391235034853553,
+ "grad_norm": 0.6707500219345093,
+ "learning_rate": 0.00018959726145942508,
+ "loss": 1.5033,
+ "step": 439
+ },
+ {
+ "epoch": 0.44012378481447906,
+ "grad_norm": 0.5793184638023376,
+ "learning_rate": 0.00018955065867122528,
+ "loss": 1.3629,
+ "step": 440
+ },
+ {
+ "epoch": 0.4411240661436029,
+ "grad_norm": 0.5549041628837585,
+ "learning_rate": 0.00018950395748304678,
+ "loss": 1.5557,
+ "step": 441
+ },
+ {
+ "epoch": 0.4421243474727267,
+ "grad_norm": 0.5406919121742249,
+ "learning_rate": 0.0001894571579462058,
+ "loss": 1.4441,
+ "step": 442
+ },
+ {
+ "epoch": 0.4431246288018505,
+ "grad_norm": 0.5131089091300964,
+ "learning_rate": 0.00018941026011212654,
+ "loss": 1.3051,
+ "step": 443
+ },
+ {
+ "epoch": 0.4441249101309743,
+ "grad_norm": 0.601586639881134,
+ "learning_rate": 0.00018936326403234125,
+ "loss": 1.5297,
+ "step": 444
+ },
+ {
+ "epoch": 0.44512519146009816,
+ "grad_norm": 0.5036457180976868,
+ "learning_rate": 0.00018931616975849006,
+ "loss": 1.357,
+ "step": 445
+ },
+ {
+ "epoch": 0.446125472789222,
+ "grad_norm": 0.5471266508102417,
+ "learning_rate": 0.00018926897734232115,
+ "loss": 1.2176,
+ "step": 446
+ },
+ {
+ "epoch": 0.44712575411834576,
+ "grad_norm": 0.6057867407798767,
+ "learning_rate": 0.0001892216868356904,
+ "loss": 1.4763,
+ "step": 447
+ },
+ {
+ "epoch": 0.4481260354474696,
+ "grad_norm": 0.5384593605995178,
+ "learning_rate": 0.0001891742982905615,
+ "loss": 1.513,
+ "step": 448
+ },
+ {
+ "epoch": 0.4491263167765934,
+ "grad_norm": 0.6144880056381226,
+ "learning_rate": 0.00018912681175900598,
+ "loss": 1.5782,
+ "step": 449
+ },
+ {
+ "epoch": 0.45012659810571726,
+ "grad_norm": 0.4838174879550934,
+ "learning_rate": 0.00018907922729320285,
+ "loss": 1.4085,
+ "step": 450
+ },
+ {
+ "epoch": 0.45112687943484103,
+ "grad_norm": 0.6852928400039673,
+ "learning_rate": 0.00018903154494543889,
+ "loss": 1.5989,
+ "step": 451
+ },
+ {
+ "epoch": 0.45212716076396486,
+ "grad_norm": 0.47527411580085754,
+ "learning_rate": 0.00018898376476810834,
+ "loss": 1.3409,
+ "step": 452
+ },
+ {
+ "epoch": 0.4531274420930887,
+ "grad_norm": 0.5665884613990784,
+ "learning_rate": 0.00018893588681371303,
+ "loss": 1.5395,
+ "step": 453
+ },
+ {
+ "epoch": 0.45412772342221247,
+ "grad_norm": 0.5792158246040344,
+ "learning_rate": 0.00018888791113486213,
+ "loss": 1.516,
+ "step": 454
+ },
+ {
+ "epoch": 0.4551280047513363,
+ "grad_norm": 0.5223523378372192,
+ "learning_rate": 0.00018883983778427227,
+ "loss": 1.3678,
+ "step": 455
+ },
+ {
+ "epoch": 0.45612828608046013,
+ "grad_norm": 0.5927590131759644,
+ "learning_rate": 0.0001887916668147673,
+ "loss": 1.3617,
+ "step": 456
+ },
+ {
+ "epoch": 0.45712856740958396,
+ "grad_norm": 0.7266496419906616,
+ "learning_rate": 0.00018874339827927846,
+ "loss": 1.3734,
+ "step": 457
+ },
+ {
+ "epoch": 0.45812884873870774,
+ "grad_norm": 0.6495805978775024,
+ "learning_rate": 0.00018869503223084414,
+ "loss": 1.5282,
+ "step": 458
+ },
+ {
+ "epoch": 0.45912913006783157,
+ "grad_norm": 0.6099816560745239,
+ "learning_rate": 0.00018864656872260985,
+ "loss": 1.4691,
+ "step": 459
+ },
+ {
+ "epoch": 0.4601294113969554,
+ "grad_norm": 0.5208227038383484,
+ "learning_rate": 0.00018859800780782828,
+ "loss": 1.3949,
+ "step": 460
+ },
+ {
+ "epoch": 0.46112969272607923,
+ "grad_norm": 0.5526600480079651,
+ "learning_rate": 0.000188549349539859,
+ "loss": 1.3557,
+ "step": 461
+ },
+ {
+ "epoch": 0.462129974055203,
+ "grad_norm": 0.5537740588188171,
+ "learning_rate": 0.00018850059397216876,
+ "loss": 1.4703,
+ "step": 462
+ },
+ {
+ "epoch": 0.46313025538432684,
+ "grad_norm": 0.5553976893424988,
+ "learning_rate": 0.00018845174115833099,
+ "loss": 1.4356,
+ "step": 463
+ },
+ {
+ "epoch": 0.46413053671345067,
+ "grad_norm": 0.6027779579162598,
+ "learning_rate": 0.0001884027911520262,
+ "loss": 1.4763,
+ "step": 464
+ },
+ {
+ "epoch": 0.4651308180425745,
+ "grad_norm": 0.5559154748916626,
+ "learning_rate": 0.00018835374400704154,
+ "loss": 1.4148,
+ "step": 465
+ },
+ {
+ "epoch": 0.4661310993716983,
+ "grad_norm": 0.6124109029769897,
+ "learning_rate": 0.00018830459977727096,
+ "loss": 1.4468,
+ "step": 466
+ },
+ {
+ "epoch": 0.4671313807008221,
+ "grad_norm": 0.4762580692768097,
+ "learning_rate": 0.0001882553585167151,
+ "loss": 1.3714,
+ "step": 467
+ },
+ {
+ "epoch": 0.46813166202994594,
+ "grad_norm": 0.5793487429618835,
+ "learning_rate": 0.00018820602027948114,
+ "loss": 1.4828,
+ "step": 468
+ },
+ {
+ "epoch": 0.4691319433590697,
+ "grad_norm": 0.55177241563797,
+ "learning_rate": 0.00018815658511978298,
+ "loss": 1.4157,
+ "step": 469
+ },
+ {
+ "epoch": 0.47013222468819355,
+ "grad_norm": 0.5065292716026306,
+ "learning_rate": 0.00018810705309194083,
+ "loss": 1.4519,
+ "step": 470
+ },
+ {
+ "epoch": 0.4711325060173174,
+ "grad_norm": 0.5401413440704346,
+ "learning_rate": 0.00018805742425038145,
+ "loss": 1.4344,
+ "step": 471
+ },
+ {
+ "epoch": 0.4721327873464412,
+ "grad_norm": 0.7173880338668823,
+ "learning_rate": 0.00018800769864963802,
+ "loss": 1.7325,
+ "step": 472
+ },
+ {
+ "epoch": 0.473133068675565,
+ "grad_norm": 0.507682204246521,
+ "learning_rate": 0.00018795787634434994,
+ "loss": 1.37,
+ "step": 473
+ },
+ {
+ "epoch": 0.4741333500046888,
+ "grad_norm": 0.551888644695282,
+ "learning_rate": 0.0001879079573892629,
+ "loss": 1.3695,
+ "step": 474
+ },
+ {
+ "epoch": 0.47513363133381264,
+ "grad_norm": 0.5109260082244873,
+ "learning_rate": 0.00018785794183922883,
+ "loss": 1.4001,
+ "step": 475
+ },
+ {
+ "epoch": 0.4761339126629365,
+ "grad_norm": 0.4565551280975342,
+ "learning_rate": 0.00018780782974920572,
+ "loss": 1.1752,
+ "step": 476
+ },
+ {
+ "epoch": 0.47713419399206025,
+ "grad_norm": 0.5651509761810303,
+ "learning_rate": 0.00018775762117425777,
+ "loss": 1.4291,
+ "step": 477
+ },
+ {
+ "epoch": 0.4781344753211841,
+ "grad_norm": 0.5827792286872864,
+ "learning_rate": 0.0001877073161695551,
+ "loss": 1.3438,
+ "step": 478
+ },
+ {
+ "epoch": 0.4791347566503079,
+ "grad_norm": 0.5719752907752991,
+ "learning_rate": 0.00018765691479037376,
+ "loss": 1.4683,
+ "step": 479
+ },
+ {
+ "epoch": 0.4801350379794317,
+ "grad_norm": 0.5153111815452576,
+ "learning_rate": 0.00018760641709209583,
+ "loss": 1.4392,
+ "step": 480
+ },
+ {
+ "epoch": 0.4811353193085555,
+ "grad_norm": 0.5455904603004456,
+ "learning_rate": 0.0001875558231302091,
+ "loss": 1.1603,
+ "step": 481
+ },
+ {
+ "epoch": 0.48213560063767935,
+ "grad_norm": 0.5857074856758118,
+ "learning_rate": 0.00018750513296030718,
+ "loss": 1.3099,
+ "step": 482
+ },
+ {
+ "epoch": 0.4831358819668032,
+ "grad_norm": 0.6051676273345947,
+ "learning_rate": 0.00018745434663808942,
+ "loss": 1.3587,
+ "step": 483
+ },
+ {
+ "epoch": 0.48413616329592696,
+ "grad_norm": 0.588749885559082,
+ "learning_rate": 0.0001874034642193608,
+ "loss": 1.5277,
+ "step": 484
+ },
+ {
+ "epoch": 0.4851364446250508,
+ "grad_norm": 0.5295410752296448,
+ "learning_rate": 0.0001873524857600319,
+ "loss": 1.2084,
+ "step": 485
+ },
+ {
+ "epoch": 0.4861367259541746,
+ "grad_norm": 0.5313368439674377,
+ "learning_rate": 0.00018730141131611882,
+ "loss": 1.4002,
+ "step": 486
+ },
+ {
+ "epoch": 0.48713700728329845,
+ "grad_norm": 0.5166353583335876,
+ "learning_rate": 0.00018725024094374315,
+ "loss": 1.208,
+ "step": 487
+ },
+ {
+ "epoch": 0.4881372886124222,
+ "grad_norm": 0.5478363037109375,
+ "learning_rate": 0.00018719897469913184,
+ "loss": 1.3236,
+ "step": 488
+ },
+ {
+ "epoch": 0.48913756994154606,
+ "grad_norm": 0.5531913042068481,
+ "learning_rate": 0.00018714761263861728,
+ "loss": 1.4938,
+ "step": 489
+ },
+ {
+ "epoch": 0.4901378512706699,
+ "grad_norm": 0.5334530472755432,
+ "learning_rate": 0.000187096154818637,
+ "loss": 1.4172,
+ "step": 490
+ },
+ {
+ "epoch": 0.4911381325997937,
+ "grad_norm": 0.5667001605033875,
+ "learning_rate": 0.00018704460129573391,
+ "loss": 1.3517,
+ "step": 491
+ },
+ {
+ "epoch": 0.4921384139289175,
+ "grad_norm": 0.5568780303001404,
+ "learning_rate": 0.00018699295212655596,
+ "loss": 1.4287,
+ "step": 492
+ },
+ {
+ "epoch": 0.4931386952580413,
+ "grad_norm": 0.6663610935211182,
+ "learning_rate": 0.00018694120736785632,
+ "loss": 1.5416,
+ "step": 493
+ },
+ {
+ "epoch": 0.49413897658716516,
+ "grad_norm": 0.5753045082092285,
+ "learning_rate": 0.00018688936707649304,
+ "loss": 1.5552,
+ "step": 494
+ },
+ {
+ "epoch": 0.49513925791628893,
+ "grad_norm": 0.5707410573959351,
+ "learning_rate": 0.00018683743130942928,
+ "loss": 1.5332,
+ "step": 495
+ },
+ {
+ "epoch": 0.49613953924541276,
+ "grad_norm": 0.5847951173782349,
+ "learning_rate": 0.00018678540012373302,
+ "loss": 1.3488,
+ "step": 496
+ },
+ {
+ "epoch": 0.4971398205745366,
+ "grad_norm": 0.60503751039505,
+ "learning_rate": 0.00018673327357657715,
+ "loss": 1.3924,
+ "step": 497
+ },
+ {
+ "epoch": 0.4981401019036604,
+ "grad_norm": 0.635142982006073,
+ "learning_rate": 0.0001866810517252393,
+ "loss": 1.4392,
+ "step": 498
+ },
+ {
+ "epoch": 0.4991403832327842,
+ "grad_norm": 0.5536782741546631,
+ "learning_rate": 0.00018662873462710184,
+ "loss": 1.286,
+ "step": 499
+ },
+ {
+ "epoch": 0.5001406645619081,
+ "grad_norm": 0.5676659345626831,
+ "learning_rate": 0.0001865763223396518,
+ "loss": 1.3006,
+ "step": 500
+ },
+ {
+ "epoch": 0.5011409458910319,
+ "grad_norm": 0.5546663403511047,
+ "learning_rate": 0.00018652381492048083,
+ "loss": 1.418,
+ "step": 501
+ },
+ {
+ "epoch": 0.5021412272201556,
+ "grad_norm": 0.5137162804603577,
+ "learning_rate": 0.00018647121242728506,
+ "loss": 1.3173,
+ "step": 502
+ },
+ {
+ "epoch": 0.5031415085492795,
+ "grad_norm": 0.5474348068237305,
+ "learning_rate": 0.00018641851491786512,
+ "loss": 1.6652,
+ "step": 503
+ },
+ {
+ "epoch": 0.5041417898784033,
+ "grad_norm": 0.5563383102416992,
+ "learning_rate": 0.00018636572245012606,
+ "loss": 1.4519,
+ "step": 504
+ },
+ {
+ "epoch": 0.5051420712075271,
+ "grad_norm": 0.5621083974838257,
+ "learning_rate": 0.00018631283508207725,
+ "loss": 1.5418,
+ "step": 505
+ },
+ {
+ "epoch": 0.506142352536651,
+ "grad_norm": 0.49915972352027893,
+ "learning_rate": 0.00018625985287183233,
+ "loss": 1.2969,
+ "step": 506
+ },
+ {
+ "epoch": 0.5071426338657747,
+ "grad_norm": 0.601996660232544,
+ "learning_rate": 0.00018620677587760916,
+ "loss": 1.4483,
+ "step": 507
+ },
+ {
+ "epoch": 0.5081429151948985,
+ "grad_norm": 0.5594652891159058,
+ "learning_rate": 0.00018615360415772978,
+ "loss": 1.4094,
+ "step": 508
+ },
+ {
+ "epoch": 0.5091431965240224,
+ "grad_norm": 0.557381808757782,
+ "learning_rate": 0.00018610033777062025,
+ "loss": 1.216,
+ "step": 509
+ },
+ {
+ "epoch": 0.5101434778531462,
+ "grad_norm": 0.5841740369796753,
+ "learning_rate": 0.0001860469767748108,
+ "loss": 1.4924,
+ "step": 510
+ },
+ {
+ "epoch": 0.5111437591822701,
+ "grad_norm": 0.4968324899673462,
+ "learning_rate": 0.00018599352122893539,
+ "loss": 1.2474,
+ "step": 511
+ },
+ {
+ "epoch": 0.5121440405113938,
+ "grad_norm": 0.5390318632125854,
+ "learning_rate": 0.00018593997119173205,
+ "loss": 1.4484,
+ "step": 512
+ },
+ {
+ "epoch": 0.5131443218405176,
+ "grad_norm": 0.6626128554344177,
+ "learning_rate": 0.00018588632672204264,
+ "loss": 1.5664,
+ "step": 513
+ },
+ {
+ "epoch": 0.5141446031696415,
+ "grad_norm": 0.6183133721351624,
+ "learning_rate": 0.0001858325878788126,
+ "loss": 1.5603,
+ "step": 514
+ },
+ {
+ "epoch": 0.5151448844987653,
+ "grad_norm": 0.5574773550033569,
+ "learning_rate": 0.00018577875472109134,
+ "loss": 1.3668,
+ "step": 515
+ },
+ {
+ "epoch": 0.516145165827889,
+ "grad_norm": 0.5127518773078918,
+ "learning_rate": 0.0001857248273080317,
+ "loss": 1.264,
+ "step": 516
+ },
+ {
+ "epoch": 0.5171454471570129,
+ "grad_norm": 0.6540619134902954,
+ "learning_rate": 0.00018567080569889015,
+ "loss": 1.3091,
+ "step": 517
+ },
+ {
+ "epoch": 0.5181457284861367,
+ "grad_norm": 0.5286336541175842,
+ "learning_rate": 0.00018561668995302667,
+ "loss": 1.3581,
+ "step": 518
+ },
+ {
+ "epoch": 0.5191460098152605,
+ "grad_norm": 0.6609972715377808,
+ "learning_rate": 0.00018556248012990468,
+ "loss": 1.3123,
+ "step": 519
+ },
+ {
+ "epoch": 0.5201462911443844,
+ "grad_norm": 0.48230236768722534,
+ "learning_rate": 0.000185508176289091,
+ "loss": 1.2372,
+ "step": 520
+ },
+ {
+ "epoch": 0.5211465724735082,
+ "grad_norm": 0.5173765420913696,
+ "learning_rate": 0.00018545377849025566,
+ "loss": 1.327,
+ "step": 521
+ },
+ {
+ "epoch": 0.522146853802632,
+ "grad_norm": 0.5822583436965942,
+ "learning_rate": 0.0001853992867931721,
+ "loss": 1.3851,
+ "step": 522
+ },
+ {
+ "epoch": 0.5231471351317558,
+ "grad_norm": 0.6025621891021729,
+ "learning_rate": 0.00018534470125771674,
+ "loss": 1.5627,
+ "step": 523
+ },
+ {
+ "epoch": 0.5241474164608796,
+ "grad_norm": 0.5516778230667114,
+ "learning_rate": 0.0001852900219438693,
+ "loss": 1.4036,
+ "step": 524
+ },
+ {
+ "epoch": 0.5251476977900035,
+ "grad_norm": 0.5738380551338196,
+ "learning_rate": 0.0001852352489117124,
+ "loss": 1.5042,
+ "step": 525
+ },
+ {
+ "epoch": 0.5261479791191273,
+ "grad_norm": 0.6360776424407959,
+ "learning_rate": 0.00018518038222143174,
+ "loss": 1.4101,
+ "step": 526
+ },
+ {
+ "epoch": 0.527148260448251,
+ "grad_norm": 0.5776675939559937,
+ "learning_rate": 0.00018512542193331583,
+ "loss": 1.6015,
+ "step": 527
+ },
+ {
+ "epoch": 0.5281485417773749,
+ "grad_norm": 0.5662726759910583,
+ "learning_rate": 0.00018507036810775615,
+ "loss": 1.3186,
+ "step": 528
+ },
+ {
+ "epoch": 0.5291488231064987,
+ "grad_norm": 0.6518335938453674,
+ "learning_rate": 0.00018501522080524688,
+ "loss": 1.4882,
+ "step": 529
+ },
+ {
+ "epoch": 0.5301491044356225,
+ "grad_norm": 0.5475590825080872,
+ "learning_rate": 0.0001849599800863849,
+ "loss": 1.487,
+ "step": 530
+ },
+ {
+ "epoch": 0.5311493857647464,
+ "grad_norm": 0.6275209188461304,
+ "learning_rate": 0.0001849046460118698,
+ "loss": 1.3563,
+ "step": 531
+ },
+ {
+ "epoch": 0.5321496670938701,
+ "grad_norm": 0.5629132390022278,
+ "learning_rate": 0.0001848492186425037,
+ "loss": 1.516,
+ "step": 532
+ },
+ {
+ "epoch": 0.533149948422994,
+ "grad_norm": 0.5251057744026184,
+ "learning_rate": 0.0001847936980391913,
+ "loss": 1.5254,
+ "step": 533
+ },
+ {
+ "epoch": 0.5341502297521178,
+ "grad_norm": 0.5635396838188171,
+ "learning_rate": 0.00018473808426293964,
+ "loss": 1.3408,
+ "step": 534
+ },
+ {
+ "epoch": 0.5351505110812416,
+ "grad_norm": 0.527082622051239,
+ "learning_rate": 0.00018468237737485823,
+ "loss": 1.2664,
+ "step": 535
+ },
+ {
+ "epoch": 0.5361507924103655,
+ "grad_norm": 0.6555044054985046,
+ "learning_rate": 0.00018462657743615888,
+ "loss": 1.464,
+ "step": 536
+ },
+ {
+ "epoch": 0.5371510737394892,
+ "grad_norm": 0.5468676686286926,
+ "learning_rate": 0.00018457068450815562,
+ "loss": 1.3733,
+ "step": 537
+ },
+ {
+ "epoch": 0.538151355068613,
+ "grad_norm": 0.5662835836410522,
+ "learning_rate": 0.00018451469865226464,
+ "loss": 1.509,
+ "step": 538
+ },
+ {
+ "epoch": 0.5391516363977369,
+ "grad_norm": 0.5553548336029053,
+ "learning_rate": 0.00018445861993000436,
+ "loss": 1.2476,
+ "step": 539
+ },
+ {
+ "epoch": 0.5401519177268607,
+ "grad_norm": 0.6240925192832947,
+ "learning_rate": 0.00018440244840299506,
+ "loss": 1.5835,
+ "step": 540
+ },
+ {
+ "epoch": 0.5411521990559846,
+ "grad_norm": 0.6107541918754578,
+ "learning_rate": 0.0001843461841329591,
+ "loss": 1.7176,
+ "step": 541
+ },
+ {
+ "epoch": 0.5421524803851083,
+ "grad_norm": 0.6990326642990112,
+ "learning_rate": 0.0001842898271817208,
+ "loss": 1.4235,
+ "step": 542
+ },
+ {
+ "epoch": 0.5431527617142321,
+ "grad_norm": 0.583871603012085,
+ "learning_rate": 0.00018423337761120618,
+ "loss": 1.5283,
+ "step": 543
+ },
+ {
+ "epoch": 0.544153043043356,
+ "grad_norm": 0.5585455894470215,
+ "learning_rate": 0.00018417683548344318,
+ "loss": 1.4875,
+ "step": 544
+ },
+ {
+ "epoch": 0.5451533243724798,
+ "grad_norm": 0.5199955701828003,
+ "learning_rate": 0.00018412020086056133,
+ "loss": 1.3989,
+ "step": 545
+ },
+ {
+ "epoch": 0.5461536057016035,
+ "grad_norm": 0.5517343878746033,
+ "learning_rate": 0.0001840634738047918,
+ "loss": 1.4073,
+ "step": 546
+ },
+ {
+ "epoch": 0.5471538870307274,
+ "grad_norm": 0.7140716314315796,
+ "learning_rate": 0.0001840066543784675,
+ "loss": 1.4477,
+ "step": 547
+ },
+ {
+ "epoch": 0.5481541683598512,
+ "grad_norm": 0.548422634601593,
+ "learning_rate": 0.00018394974264402257,
+ "loss": 1.4198,
+ "step": 548
+ },
+ {
+ "epoch": 0.549154449688975,
+ "grad_norm": 0.5907624363899231,
+ "learning_rate": 0.00018389273866399275,
+ "loss": 1.4033,
+ "step": 549
+ },
+ {
+ "epoch": 0.5501547310180989,
+ "grad_norm": 0.5327603220939636,
+ "learning_rate": 0.00018383564250101512,
+ "loss": 1.2674,
+ "step": 550
+ },
+ {
+ "epoch": 0.5511550123472226,
+ "grad_norm": 0.4678132236003876,
+ "learning_rate": 0.000183778454217828,
+ "loss": 1.3644,
+ "step": 551
+ },
+ {
+ "epoch": 0.5521552936763465,
+ "grad_norm": 0.674040675163269,
+ "learning_rate": 0.0001837211738772711,
+ "loss": 1.6942,
+ "step": 552
+ },
+ {
+ "epoch": 0.5531555750054703,
+ "grad_norm": 0.5374539494514465,
+ "learning_rate": 0.000183663801542285,
+ "loss": 1.1887,
+ "step": 553
+ },
+ {
+ "epoch": 0.5541558563345941,
+ "grad_norm": 0.5528072118759155,
+ "learning_rate": 0.00018360633727591155,
+ "loss": 1.2,
+ "step": 554
+ },
+ {
+ "epoch": 0.555156137663718,
+ "grad_norm": 0.6597411632537842,
+ "learning_rate": 0.00018354878114129367,
+ "loss": 1.402,
+ "step": 555
+ },
+ {
+ "epoch": 0.5561564189928417,
+ "grad_norm": 0.5931501388549805,
+ "learning_rate": 0.00018349113320167504,
+ "loss": 1.5583,
+ "step": 556
+ },
+ {
+ "epoch": 0.5571567003219655,
+ "grad_norm": 0.6331121921539307,
+ "learning_rate": 0.00018343339352040042,
+ "loss": 1.7882,
+ "step": 557
+ },
+ {
+ "epoch": 0.5581569816510894,
+ "grad_norm": 0.5221824645996094,
+ "learning_rate": 0.00018337556216091517,
+ "loss": 1.2457,
+ "step": 558
+ },
+ {
+ "epoch": 0.5591572629802132,
+ "grad_norm": 0.6008853912353516,
+ "learning_rate": 0.00018331763918676556,
+ "loss": 1.5916,
+ "step": 559
+ },
+ {
+ "epoch": 0.560157544309337,
+ "grad_norm": 0.5409006476402283,
+ "learning_rate": 0.00018325962466159848,
+ "loss": 1.3457,
+ "step": 560
+ },
+ {
+ "epoch": 0.5611578256384608,
+ "grad_norm": 0.5095859169960022,
+ "learning_rate": 0.00018320151864916135,
+ "loss": 1.3622,
+ "step": 561
+ },
+ {
+ "epoch": 0.5621581069675846,
+ "grad_norm": 0.5716331005096436,
+ "learning_rate": 0.00018314332121330225,
+ "loss": 1.6168,
+ "step": 562
+ },
+ {
+ "epoch": 0.5631583882967085,
+ "grad_norm": 0.600307047367096,
+ "learning_rate": 0.0001830850324179695,
+ "loss": 1.4117,
+ "step": 563
+ },
+ {
+ "epoch": 0.5641586696258323,
+ "grad_norm": 0.7528484463691711,
+ "learning_rate": 0.00018302665232721208,
+ "loss": 1.3418,
+ "step": 564
+ },
+ {
+ "epoch": 0.565158950954956,
+ "grad_norm": 0.6119087338447571,
+ "learning_rate": 0.0001829681810051791,
+ "loss": 1.4908,
+ "step": 565
+ },
+ {
+ "epoch": 0.5661592322840799,
+ "grad_norm": 0.6440190672874451,
+ "learning_rate": 0.00018290961851611995,
+ "loss": 1.3511,
+ "step": 566
+ },
+ {
+ "epoch": 0.5671595136132037,
+ "grad_norm": 0.647294282913208,
+ "learning_rate": 0.00018285096492438424,
+ "loss": 1.5165,
+ "step": 567
+ },
+ {
+ "epoch": 0.5681597949423275,
+ "grad_norm": 0.5499668717384338,
+ "learning_rate": 0.00018279222029442163,
+ "loss": 1.2876,
+ "step": 568
+ },
+ {
+ "epoch": 0.5691600762714514,
+ "grad_norm": 0.5629482865333557,
+ "learning_rate": 0.00018273338469078186,
+ "loss": 1.2256,
+ "step": 569
+ },
+ {
+ "epoch": 0.5701603576005752,
+ "grad_norm": 0.48661288619041443,
+ "learning_rate": 0.00018267445817811466,
+ "loss": 1.44,
+ "step": 570
+ },
+ {
+ "epoch": 0.5711606389296989,
+ "grad_norm": 0.5713567733764648,
+ "learning_rate": 0.00018261544082116954,
+ "loss": 1.741,
+ "step": 571
+ },
+ {
+ "epoch": 0.5721609202588228,
+ "grad_norm": 0.6130850315093994,
+ "learning_rate": 0.00018255633268479595,
+ "loss": 1.526,
+ "step": 572
+ },
+ {
+ "epoch": 0.5731612015879466,
+ "grad_norm": 0.5415536761283875,
+ "learning_rate": 0.00018249713383394303,
+ "loss": 1.2405,
+ "step": 573
+ },
+ {
+ "epoch": 0.5741614829170705,
+ "grad_norm": 0.600574791431427,
+ "learning_rate": 0.0001824378443336596,
+ "loss": 1.4534,
+ "step": 574
+ },
+ {
+ "epoch": 0.5751617642461943,
+ "grad_norm": 0.5479387044906616,
+ "learning_rate": 0.00018237846424909413,
+ "loss": 1.4277,
+ "step": 575
+ },
+ {
+ "epoch": 0.576162045575318,
+ "grad_norm": 0.5536132454872131,
+ "learning_rate": 0.00018231899364549455,
+ "loss": 1.3918,
+ "step": 576
+ },
+ {
+ "epoch": 0.5771623269044419,
+ "grad_norm": 0.6228598356246948,
+ "learning_rate": 0.00018225943258820833,
+ "loss": 1.413,
+ "step": 577
+ },
+ {
+ "epoch": 0.5781626082335657,
+ "grad_norm": 0.5498123168945312,
+ "learning_rate": 0.00018219978114268227,
+ "loss": 1.3558,
+ "step": 578
+ },
+ {
+ "epoch": 0.5791628895626895,
+ "grad_norm": 0.5427498817443848,
+ "learning_rate": 0.00018214003937446253,
+ "loss": 1.509,
+ "step": 579
+ },
+ {
+ "epoch": 0.5801631708918134,
+ "grad_norm": 0.522285521030426,
+ "learning_rate": 0.00018208020734919455,
+ "loss": 1.3847,
+ "step": 580
+ },
+ {
+ "epoch": 0.5811634522209371,
+ "grad_norm": 0.5963860750198364,
+ "learning_rate": 0.00018202028513262288,
+ "loss": 1.4605,
+ "step": 581
+ },
+ {
+ "epoch": 0.5821637335500609,
+ "grad_norm": 0.4854499101638794,
+ "learning_rate": 0.00018196027279059117,
+ "loss": 1.4968,
+ "step": 582
+ },
+ {
+ "epoch": 0.5831640148791848,
+ "grad_norm": 0.503466010093689,
+ "learning_rate": 0.00018190017038904215,
+ "loss": 1.2568,
+ "step": 583
+ },
+ {
+ "epoch": 0.5841642962083086,
+ "grad_norm": 0.6027483940124512,
+ "learning_rate": 0.0001818399779940175,
+ "loss": 1.5744,
+ "step": 584
+ },
+ {
+ "epoch": 0.5851645775374325,
+ "grad_norm": 0.5450258851051331,
+ "learning_rate": 0.0001817796956716578,
+ "loss": 1.2672,
+ "step": 585
+ },
+ {
+ "epoch": 0.5861648588665562,
+ "grad_norm": 0.5376724600791931,
+ "learning_rate": 0.00018171932348820234,
+ "loss": 1.5099,
+ "step": 586
+ },
+ {
+ "epoch": 0.58716514019568,
+ "grad_norm": 0.513921856880188,
+ "learning_rate": 0.0001816588615099893,
+ "loss": 1.3213,
+ "step": 587
+ },
+ {
+ "epoch": 0.5881654215248039,
+ "grad_norm": 0.7540159225463867,
+ "learning_rate": 0.00018159830980345548,
+ "loss": 1.2231,
+ "step": 588
+ },
+ {
+ "epoch": 0.5891657028539277,
+ "grad_norm": 0.5917702317237854,
+ "learning_rate": 0.0001815376684351362,
+ "loss": 1.6094,
+ "step": 589
+ },
+ {
+ "epoch": 0.5901659841830514,
+ "grad_norm": 0.5507463216781616,
+ "learning_rate": 0.00018147693747166534,
+ "loss": 1.3904,
+ "step": 590
+ },
+ {
+ "epoch": 0.5911662655121753,
+ "grad_norm": 0.545695960521698,
+ "learning_rate": 0.00018141611697977529,
+ "loss": 1.5172,
+ "step": 591
+ },
+ {
+ "epoch": 0.5921665468412991,
+ "grad_norm": 0.5876530408859253,
+ "learning_rate": 0.00018135520702629675,
+ "loss": 1.3676,
+ "step": 592
+ },
+ {
+ "epoch": 0.5931668281704229,
+ "grad_norm": 0.5510894060134888,
+ "learning_rate": 0.0001812942076781588,
+ "loss": 1.4379,
+ "step": 593
+ },
+ {
+ "epoch": 0.5941671094995468,
+ "grad_norm": 0.5105913877487183,
+ "learning_rate": 0.0001812331190023886,
+ "loss": 1.3687,
+ "step": 594
+ },
+ {
+ "epoch": 0.5951673908286705,
+ "grad_norm": 0.47876060009002686,
+ "learning_rate": 0.0001811719410661116,
+ "loss": 1.3178,
+ "step": 595
+ },
+ {
+ "epoch": 0.5961676721577944,
+ "grad_norm": 0.6079074144363403,
+ "learning_rate": 0.00018111067393655132,
+ "loss": 1.4713,
+ "step": 596
+ },
+ {
+ "epoch": 0.5971679534869182,
+ "grad_norm": 0.5363487601280212,
+ "learning_rate": 0.0001810493176810292,
+ "loss": 1.1868,
+ "step": 597
+ },
+ {
+ "epoch": 0.598168234816042,
+ "grad_norm": 0.5252292156219482,
+ "learning_rate": 0.00018098787236696474,
+ "loss": 1.303,
+ "step": 598
+ },
+ {
+ "epoch": 0.5991685161451659,
+ "grad_norm": 0.5377137064933777,
+ "learning_rate": 0.00018092633806187513,
+ "loss": 1.3653,
+ "step": 599
+ },
+ {
+ "epoch": 0.6001687974742896,
+ "grad_norm": 0.5274302363395691,
+ "learning_rate": 0.0001808647148333755,
+ "loss": 1.3693,
+ "step": 600
+ },
+ {
+ "epoch": 0.6011690788034134,
+ "grad_norm": 0.5664658546447754,
+ "learning_rate": 0.00018080300274917862,
+ "loss": 1.3807,
+ "step": 601
+ },
+ {
+ "epoch": 0.6021693601325373,
+ "grad_norm": 0.6609538197517395,
+ "learning_rate": 0.00018074120187709495,
+ "loss": 1.5015,
+ "step": 602
+ },
+ {
+ "epoch": 0.6031696414616611,
+ "grad_norm": 0.4943195879459381,
+ "learning_rate": 0.00018067931228503246,
+ "loss": 1.4436,
+ "step": 603
+ },
+ {
+ "epoch": 0.604169922790785,
+ "grad_norm": 0.549712598323822,
+ "learning_rate": 0.00018061733404099655,
+ "loss": 1.455,
+ "step": 604
+ },
+ {
+ "epoch": 0.6051702041199087,
+ "grad_norm": 0.5765941143035889,
+ "learning_rate": 0.00018055526721309016,
+ "loss": 1.3317,
+ "step": 605
+ },
+ {
+ "epoch": 0.6061704854490325,
+ "grad_norm": 0.5223068594932556,
+ "learning_rate": 0.0001804931118695135,
+ "loss": 1.3456,
+ "step": 606
+ },
+ {
+ "epoch": 0.6071707667781564,
+ "grad_norm": 0.5385129451751709,
+ "learning_rate": 0.00018043086807856403,
+ "loss": 1.3388,
+ "step": 607
+ },
+ {
+ "epoch": 0.6081710481072802,
+ "grad_norm": 0.5244528651237488,
+ "learning_rate": 0.00018036853590863648,
+ "loss": 1.398,
+ "step": 608
+ },
+ {
+ "epoch": 0.609171329436404,
+ "grad_norm": 0.5274112224578857,
+ "learning_rate": 0.00018030611542822257,
+ "loss": 1.3105,
+ "step": 609
+ },
+ {
+ "epoch": 0.6101716107655278,
+ "grad_norm": 0.5351893305778503,
+ "learning_rate": 0.00018024360670591114,
+ "loss": 1.3128,
+ "step": 610
+ },
+ {
+ "epoch": 0.6111718920946516,
+ "grad_norm": 0.5729460120201111,
+ "learning_rate": 0.00018018100981038798,
+ "loss": 1.3606,
+ "step": 611
+ },
+ {
+ "epoch": 0.6121721734237754,
+ "grad_norm": 0.5494408011436462,
+ "learning_rate": 0.00018011832481043576,
+ "loss": 1.4517,
+ "step": 612
+ },
+ {
+ "epoch": 0.6131724547528993,
+ "grad_norm": 0.5205882787704468,
+ "learning_rate": 0.00018005555177493394,
+ "loss": 1.4943,
+ "step": 613
+ },
+ {
+ "epoch": 0.614172736082023,
+ "grad_norm": 0.5488479137420654,
+ "learning_rate": 0.00017999269077285875,
+ "loss": 1.3939,
+ "step": 614
+ },
+ {
+ "epoch": 0.6151730174111469,
+ "grad_norm": 0.5779786109924316,
+ "learning_rate": 0.00017992974187328305,
+ "loss": 1.5744,
+ "step": 615
+ },
+ {
+ "epoch": 0.6161732987402707,
+ "grad_norm": 0.5576769113540649,
+ "learning_rate": 0.00017986670514537627,
+ "loss": 1.2284,
+ "step": 616
+ },
+ {
+ "epoch": 0.6171735800693945,
+ "grad_norm": 0.4912784993648529,
+ "learning_rate": 0.00017980358065840444,
+ "loss": 1.292,
+ "step": 617
+ },
+ {
+ "epoch": 0.6181738613985184,
+ "grad_norm": 0.657666027545929,
+ "learning_rate": 0.0001797403684817299,
+ "loss": 1.4918,
+ "step": 618
+ },
+ {
+ "epoch": 0.6191741427276422,
+ "grad_norm": 0.5642833113670349,
+ "learning_rate": 0.00017967706868481144,
+ "loss": 1.4718,
+ "step": 619
+ },
+ {
+ "epoch": 0.6201744240567659,
+ "grad_norm": 0.7243106961250305,
+ "learning_rate": 0.00017961368133720407,
+ "loss": 1.4342,
+ "step": 620
+ },
+ {
+ "epoch": 0.6211747053858898,
+ "grad_norm": 0.4982456564903259,
+ "learning_rate": 0.000179550206508559,
+ "loss": 1.4478,
+ "step": 621
+ },
+ {
+ "epoch": 0.6221749867150136,
+ "grad_norm": 0.5249592065811157,
+ "learning_rate": 0.00017948664426862364,
+ "loss": 1.485,
+ "step": 622
+ },
+ {
+ "epoch": 0.6231752680441374,
+ "grad_norm": 0.6167681217193604,
+ "learning_rate": 0.00017942299468724134,
+ "loss": 1.4813,
+ "step": 623
+ },
+ {
+ "epoch": 0.6241755493732613,
+ "grad_norm": 0.5300460457801819,
+ "learning_rate": 0.0001793592578343515,
+ "loss": 1.1364,
+ "step": 624
+ },
+ {
+ "epoch": 0.625175830702385,
+ "grad_norm": 0.5908417105674744,
+ "learning_rate": 0.0001792954337799894,
+ "loss": 1.4402,
+ "step": 625
+ },
+ {
+ "epoch": 0.6261761120315089,
+ "grad_norm": 0.5684035420417786,
+ "learning_rate": 0.00017923152259428612,
+ "loss": 1.4847,
+ "step": 626
+ },
+ {
+ "epoch": 0.6271763933606327,
+ "grad_norm": 0.5421493053436279,
+ "learning_rate": 0.00017916752434746856,
+ "loss": 1.3348,
+ "step": 627
+ },
+ {
+ "epoch": 0.6281766746897565,
+ "grad_norm": 0.5295160412788391,
+ "learning_rate": 0.0001791034391098591,
+ "loss": 1.4703,
+ "step": 628
+ },
+ {
+ "epoch": 0.6291769560188804,
+ "grad_norm": 0.5196051001548767,
+ "learning_rate": 0.00017903926695187595,
+ "loss": 1.3478,
+ "step": 629
+ },
+ {
+ "epoch": 0.6301772373480041,
+ "grad_norm": 0.4994469881057739,
+ "learning_rate": 0.0001789750079440326,
+ "loss": 1.2368,
+ "step": 630
+ },
+ {
+ "epoch": 0.6311775186771279,
+ "grad_norm": 0.5117055177688599,
+ "learning_rate": 0.00017891066215693817,
+ "loss": 1.3429,
+ "step": 631
+ },
+ {
+ "epoch": 0.6321778000062518,
+ "grad_norm": 0.49438026547431946,
+ "learning_rate": 0.00017884622966129695,
+ "loss": 1.301,
+ "step": 632
+ },
+ {
+ "epoch": 0.6331780813353756,
+ "grad_norm": 0.6113334894180298,
+ "learning_rate": 0.00017878171052790868,
+ "loss": 1.4636,
+ "step": 633
+ },
+ {
+ "epoch": 0.6341783626644993,
+ "grad_norm": 0.6063141822814941,
+ "learning_rate": 0.00017871710482766817,
+ "loss": 1.2262,
+ "step": 634
+ },
+ {
+ "epoch": 0.6351786439936232,
+ "grad_norm": 0.5604403614997864,
+ "learning_rate": 0.00017865241263156546,
+ "loss": 1.4112,
+ "step": 635
+ },
+ {
+ "epoch": 0.636178925322747,
+ "grad_norm": 0.523415207862854,
+ "learning_rate": 0.0001785876340106855,
+ "loss": 1.3281,
+ "step": 636
+ },
+ {
+ "epoch": 0.6371792066518709,
+ "grad_norm": 0.5602991580963135,
+ "learning_rate": 0.0001785227690362083,
+ "loss": 1.44,
+ "step": 637
+ },
+ {
+ "epoch": 0.6381794879809947,
+ "grad_norm": 0.46946853399276733,
+ "learning_rate": 0.00017845781777940878,
+ "loss": 1.2956,
+ "step": 638
+ },
+ {
+ "epoch": 0.6391797693101184,
+ "grad_norm": 0.5586503744125366,
+ "learning_rate": 0.00017839278031165658,
+ "loss": 1.5419,
+ "step": 639
+ },
+ {
+ "epoch": 0.6401800506392423,
+ "grad_norm": 0.5270752310752869,
+ "learning_rate": 0.00017832765670441612,
+ "loss": 1.305,
+ "step": 640
+ },
+ {
+ "epoch": 0.6411803319683661,
+ "grad_norm": 0.57756108045578,
+ "learning_rate": 0.0001782624470292465,
+ "loss": 1.2145,
+ "step": 641
+ },
+ {
+ "epoch": 0.6421806132974899,
+ "grad_norm": 0.5709058046340942,
+ "learning_rate": 0.0001781971513578013,
+ "loss": 1.4804,
+ "step": 642
+ },
+ {
+ "epoch": 0.6431808946266138,
+ "grad_norm": 0.505849301815033,
+ "learning_rate": 0.00017813176976182873,
+ "loss": 1.3964,
+ "step": 643
+ },
+ {
+ "epoch": 0.6441811759557375,
+ "grad_norm": 0.5171617269515991,
+ "learning_rate": 0.00017806630231317127,
+ "loss": 1.3283,
+ "step": 644
+ },
+ {
+ "epoch": 0.6451814572848613,
+ "grad_norm": 0.5567512512207031,
+ "learning_rate": 0.00017800074908376584,
+ "loss": 1.481,
+ "step": 645
+ },
+ {
+ "epoch": 0.6461817386139852,
+ "grad_norm": 0.5000666379928589,
+ "learning_rate": 0.00017793511014564358,
+ "loss": 1.2856,
+ "step": 646
+ },
+ {
+ "epoch": 0.647182019943109,
+ "grad_norm": 0.49550777673721313,
+ "learning_rate": 0.00017786938557092983,
+ "loss": 1.3447,
+ "step": 647
+ },
+ {
+ "epoch": 0.6481823012722329,
+ "grad_norm": 0.5904624462127686,
+ "learning_rate": 0.00017780357543184397,
+ "loss": 1.241,
+ "step": 648
+ },
+ {
+ "epoch": 0.6491825826013566,
+ "grad_norm": 0.4615901708602905,
+ "learning_rate": 0.00017773767980069945,
+ "loss": 1.3436,
+ "step": 649
+ },
+ {
+ "epoch": 0.6501828639304804,
+ "grad_norm": 0.48083069920539856,
+ "learning_rate": 0.0001776716987499037,
+ "loss": 1.3906,
+ "step": 650
+ },
+ {
+ "epoch": 0.6511831452596043,
+ "grad_norm": 0.4525931775569916,
+ "learning_rate": 0.0001776056323519579,
+ "loss": 1.3417,
+ "step": 651
+ },
+ {
+ "epoch": 0.6521834265887281,
+ "grad_norm": 0.6179555058479309,
+ "learning_rate": 0.00017753948067945712,
+ "loss": 1.3438,
+ "step": 652
+ },
+ {
+ "epoch": 0.6531837079178519,
+ "grad_norm": 0.5525293946266174,
+ "learning_rate": 0.00017747324380509006,
+ "loss": 1.4551,
+ "step": 653
+ },
+ {
+ "epoch": 0.6541839892469757,
+ "grad_norm": 0.533028781414032,
+ "learning_rate": 0.00017740692180163908,
+ "loss": 1.4396,
+ "step": 654
+ },
+ {
+ "epoch": 0.6551842705760995,
+ "grad_norm": 0.5196881890296936,
+ "learning_rate": 0.00017734051474198003,
+ "loss": 1.3032,
+ "step": 655
+ },
+ {
+ "epoch": 0.6561845519052233,
+ "grad_norm": 0.5190469622612,
+ "learning_rate": 0.0001772740226990823,
+ "loss": 1.4049,
+ "step": 656
+ },
+ {
+ "epoch": 0.6571848332343472,
+ "grad_norm": 0.49517175555229187,
+ "learning_rate": 0.00017720744574600863,
+ "loss": 1.3696,
+ "step": 657
+ },
+ {
+ "epoch": 0.658185114563471,
+ "grad_norm": 0.5165138244628906,
+ "learning_rate": 0.00017714078395591502,
+ "loss": 1.3667,
+ "step": 658
+ },
+ {
+ "epoch": 0.6591853958925948,
+ "grad_norm": 0.5624507665634155,
+ "learning_rate": 0.00017707403740205071,
+ "loss": 1.2109,
+ "step": 659
+ },
+ {
+ "epoch": 0.6601856772217186,
+ "grad_norm": 0.45942649245262146,
+ "learning_rate": 0.00017700720615775812,
+ "loss": 1.259,
+ "step": 660
+ },
+ {
+ "epoch": 0.6611859585508424,
+ "grad_norm": 0.5019019842147827,
+ "learning_rate": 0.0001769402902964727,
+ "loss": 1.3739,
+ "step": 661
+ },
+ {
+ "epoch": 0.6621862398799663,
+ "grad_norm": 0.4661652743816376,
+ "learning_rate": 0.00017687328989172288,
+ "loss": 1.2606,
+ "step": 662
+ },
+ {
+ "epoch": 0.66318652120909,
+ "grad_norm": 0.5310545563697815,
+ "learning_rate": 0.00017680620501712996,
+ "loss": 1.3406,
+ "step": 663
+ },
+ {
+ "epoch": 0.6641868025382138,
+ "grad_norm": 0.5190532207489014,
+ "learning_rate": 0.00017673903574640814,
+ "loss": 1.3052,
+ "step": 664
+ },
+ {
+ "epoch": 0.6651870838673377,
+ "grad_norm": 0.5265533328056335,
+ "learning_rate": 0.00017667178215336423,
+ "loss": 1.2326,
+ "step": 665
+ },
+ {
+ "epoch": 0.6661873651964615,
+ "grad_norm": 0.5971291065216064,
+ "learning_rate": 0.0001766044443118978,
+ "loss": 1.4291,
+ "step": 666
+ },
+ {
+ "epoch": 0.6671876465255854,
+ "grad_norm": 0.5295760631561279,
+ "learning_rate": 0.000176537022296001,
+ "loss": 1.2781,
+ "step": 667
+ },
+ {
+ "epoch": 0.6681879278547092,
+ "grad_norm": 0.5124595761299133,
+ "learning_rate": 0.00017646951617975837,
+ "loss": 1.318,
+ "step": 668
+ },
+ {
+ "epoch": 0.6691882091838329,
+ "grad_norm": 0.5968078970909119,
+ "learning_rate": 0.00017640192603734692,
+ "loss": 1.1483,
+ "step": 669
+ },
+ {
+ "epoch": 0.6701884905129568,
+ "grad_norm": 0.6211404204368591,
+ "learning_rate": 0.00017633425194303606,
+ "loss": 1.1164,
+ "step": 670
+ },
+ {
+ "epoch": 0.6711887718420806,
+ "grad_norm": 0.5539883375167847,
+ "learning_rate": 0.00017626649397118734,
+ "loss": 1.453,
+ "step": 671
+ },
+ {
+ "epoch": 0.6721890531712044,
+ "grad_norm": 0.5188294649124146,
+ "learning_rate": 0.00017619865219625452,
+ "loss": 1.5201,
+ "step": 672
+ },
+ {
+ "epoch": 0.6731893345003283,
+ "grad_norm": 0.531973659992218,
+ "learning_rate": 0.00017613072669278343,
+ "loss": 1.3176,
+ "step": 673
+ },
+ {
+ "epoch": 0.674189615829452,
+ "grad_norm": 0.5878707766532898,
+ "learning_rate": 0.00017606271753541192,
+ "loss": 1.5326,
+ "step": 674
+ },
+ {
+ "epoch": 0.6751898971585758,
+ "grad_norm": 0.595443844795227,
+ "learning_rate": 0.00017599462479886974,
+ "loss": 1.4033,
+ "step": 675
+ },
+ {
+ "epoch": 0.6761901784876997,
+ "grad_norm": 0.5093846321105957,
+ "learning_rate": 0.00017592644855797854,
+ "loss": 1.2995,
+ "step": 676
+ },
+ {
+ "epoch": 0.6771904598168235,
+ "grad_norm": 0.5521978735923767,
+ "learning_rate": 0.00017585818888765168,
+ "loss": 1.2912,
+ "step": 677
+ },
+ {
+ "epoch": 0.6781907411459474,
+ "grad_norm": 0.4612530469894409,
+ "learning_rate": 0.0001757898458628941,
+ "loss": 1.1902,
+ "step": 678
+ },
+ {
+ "epoch": 0.6791910224750711,
+ "grad_norm": 0.4973600506782532,
+ "learning_rate": 0.00017572141955880252,
+ "loss": 1.3547,
+ "step": 679
+ },
+ {
+ "epoch": 0.6801913038041949,
+ "grad_norm": 0.606407105922699,
+ "learning_rate": 0.00017565291005056504,
+ "loss": 1.371,
+ "step": 680
+ },
+ {
+ "epoch": 0.6811915851333188,
+ "grad_norm": 0.5027814507484436,
+ "learning_rate": 0.00017558431741346122,
+ "loss": 1.4551,
+ "step": 681
+ },
+ {
+ "epoch": 0.6821918664624426,
+ "grad_norm": 0.5732039213180542,
+ "learning_rate": 0.00017551564172286197,
+ "loss": 1.4181,
+ "step": 682
+ },
+ {
+ "epoch": 0.6831921477915663,
+ "grad_norm": 0.6327995657920837,
+ "learning_rate": 0.00017544688305422943,
+ "loss": 1.237,
+ "step": 683
+ },
+ {
+ "epoch": 0.6841924291206902,
+ "grad_norm": 0.5779625177383423,
+ "learning_rate": 0.00017537804148311695,
+ "loss": 1.5356,
+ "step": 684
+ },
+ {
+ "epoch": 0.685192710449814,
+ "grad_norm": 0.6031951308250427,
+ "learning_rate": 0.00017530911708516902,
+ "loss": 1.3776,
+ "step": 685
+ },
+ {
+ "epoch": 0.6861929917789378,
+ "grad_norm": 0.4811258018016815,
+ "learning_rate": 0.00017524010993612098,
+ "loss": 1.185,
+ "step": 686
+ },
+ {
+ "epoch": 0.6871932731080617,
+ "grad_norm": 0.5048002600669861,
+ "learning_rate": 0.00017517102011179933,
+ "loss": 1.3335,
+ "step": 687
+ },
+ {
+ "epoch": 0.6881935544371854,
+ "grad_norm": 0.5963343977928162,
+ "learning_rate": 0.0001751018476881212,
+ "loss": 1.4326,
+ "step": 688
+ },
+ {
+ "epoch": 0.6891938357663093,
+ "grad_norm": 0.4770168960094452,
+ "learning_rate": 0.00017503259274109464,
+ "loss": 1.4664,
+ "step": 689
+ },
+ {
+ "epoch": 0.6901941170954331,
+ "grad_norm": 0.5020537376403809,
+ "learning_rate": 0.00017496325534681825,
+ "loss": 1.349,
+ "step": 690
+ },
+ {
+ "epoch": 0.6911943984245569,
+ "grad_norm": 0.5567785501480103,
+ "learning_rate": 0.00017489383558148136,
+ "loss": 1.452,
+ "step": 691
+ },
+ {
+ "epoch": 0.6921946797536808,
+ "grad_norm": 0.5167350769042969,
+ "learning_rate": 0.00017482433352136365,
+ "loss": 1.1148,
+ "step": 692
+ },
+ {
+ "epoch": 0.6931949610828045,
+ "grad_norm": 0.6030716300010681,
+ "learning_rate": 0.00017475474924283536,
+ "loss": 1.3473,
+ "step": 693
+ },
+ {
+ "epoch": 0.6941952424119283,
+ "grad_norm": 0.5643062591552734,
+ "learning_rate": 0.00017468508282235704,
+ "loss": 1.3476,
+ "step": 694
+ },
+ {
+ "epoch": 0.6951955237410522,
+ "grad_norm": 0.5124102234840393,
+ "learning_rate": 0.00017461533433647946,
+ "loss": 1.339,
+ "step": 695
+ },
+ {
+ "epoch": 0.696195805070176,
+ "grad_norm": 0.5690215229988098,
+ "learning_rate": 0.00017454550386184362,
+ "loss": 1.3816,
+ "step": 696
+ },
+ {
+ "epoch": 0.6971960863992998,
+ "grad_norm": 0.5938367247581482,
+ "learning_rate": 0.00017447559147518055,
+ "loss": 1.4554,
+ "step": 697
+ },
+ {
+ "epoch": 0.6981963677284236,
+ "grad_norm": 0.5288996696472168,
+ "learning_rate": 0.00017440559725331135,
+ "loss": 1.2904,
+ "step": 698
+ },
+ {
+ "epoch": 0.6991966490575474,
+ "grad_norm": 0.5047140121459961,
+ "learning_rate": 0.000174335521273147,
+ "loss": 1.2362,
+ "step": 699
+ },
+ {
+ "epoch": 0.7001969303866713,
+ "grad_norm": 0.5563321709632874,
+ "learning_rate": 0.00017426536361168834,
+ "loss": 1.2863,
+ "step": 700
+ },
+ {
+ "epoch": 0.7011972117157951,
+ "grad_norm": 0.48857688903808594,
+ "learning_rate": 0.00017419512434602594,
+ "loss": 1.3387,
+ "step": 701
+ },
+ {
+ "epoch": 0.7021974930449189,
+ "grad_norm": 0.5205016732215881,
+ "learning_rate": 0.00017412480355334005,
+ "loss": 1.3874,
+ "step": 702
+ },
+ {
+ "epoch": 0.7031977743740427,
+ "grad_norm": 0.5850381851196289,
+ "learning_rate": 0.00017405440131090048,
+ "loss": 1.5369,
+ "step": 703
+ },
+ {
+ "epoch": 0.7041980557031665,
+ "grad_norm": 0.5708681344985962,
+ "learning_rate": 0.00017398391769606658,
+ "loss": 1.3622,
+ "step": 704
+ },
+ {
+ "epoch": 0.7051983370322903,
+ "grad_norm": 0.5743641257286072,
+ "learning_rate": 0.00017391335278628712,
+ "loss": 1.2946,
+ "step": 705
+ },
+ {
+ "epoch": 0.7061986183614142,
+ "grad_norm": 0.5376024842262268,
+ "learning_rate": 0.00017384270665910014,
+ "loss": 1.2952,
+ "step": 706
+ },
+ {
+ "epoch": 0.707198899690538,
+ "grad_norm": 0.6123641133308411,
+ "learning_rate": 0.000173771979392133,
+ "loss": 1.4239,
+ "step": 707
+ },
+ {
+ "epoch": 0.7081991810196617,
+ "grad_norm": 0.5639240741729736,
+ "learning_rate": 0.00017370117106310214,
+ "loss": 1.3627,
+ "step": 708
+ },
+ {
+ "epoch": 0.7091994623487856,
+ "grad_norm": 0.5551653504371643,
+ "learning_rate": 0.0001736302817498131,
+ "loss": 1.3435,
+ "step": 709
+ },
+ {
+ "epoch": 0.7101997436779094,
+ "grad_norm": 0.4746958911418915,
+ "learning_rate": 0.00017355931153016044,
+ "loss": 1.2402,
+ "step": 710
+ },
+ {
+ "epoch": 0.7112000250070333,
+ "grad_norm": 0.4722553491592407,
+ "learning_rate": 0.0001734882604821276,
+ "loss": 1.3962,
+ "step": 711
+ },
+ {
+ "epoch": 0.712200306336157,
+ "grad_norm": 0.5038101077079773,
+ "learning_rate": 0.0001734171286837868,
+ "loss": 1.3261,
+ "step": 712
+ },
+ {
+ "epoch": 0.7132005876652808,
+ "grad_norm": 0.5004639625549316,
+ "learning_rate": 0.00017334591621329906,
+ "loss": 1.4943,
+ "step": 713
+ },
+ {
+ "epoch": 0.7142008689944047,
+ "grad_norm": 0.5141516327857971,
+ "learning_rate": 0.00017327462314891402,
+ "loss": 1.2754,
+ "step": 714
+ },
+ {
+ "epoch": 0.7152011503235285,
+ "grad_norm": 0.5491873025894165,
+ "learning_rate": 0.00017320324956896977,
+ "loss": 1.3052,
+ "step": 715
+ },
+ {
+ "epoch": 0.7162014316526523,
+ "grad_norm": 0.49937358498573303,
+ "learning_rate": 0.00017313179555189306,
+ "loss": 1.2277,
+ "step": 716
+ },
+ {
+ "epoch": 0.7172017129817762,
+ "grad_norm": 0.6419594287872314,
+ "learning_rate": 0.00017306026117619889,
+ "loss": 1.4844,
+ "step": 717
+ },
+ {
+ "epoch": 0.7182019943108999,
+ "grad_norm": 0.521108090877533,
+ "learning_rate": 0.0001729886465204906,
+ "loss": 1.2917,
+ "step": 718
+ },
+ {
+ "epoch": 0.7192022756400237,
+ "grad_norm": 0.532421886920929,
+ "learning_rate": 0.0001729169516634598,
+ "loss": 1.4555,
+ "step": 719
+ },
+ {
+ "epoch": 0.7202025569691476,
+ "grad_norm": 0.5168073177337646,
+ "learning_rate": 0.0001728451766838861,
+ "loss": 1.2116,
+ "step": 720
+ },
+ {
+ "epoch": 0.7212028382982714,
+ "grad_norm": 0.5593972206115723,
+ "learning_rate": 0.00017277332166063726,
+ "loss": 1.4345,
+ "step": 721
+ },
+ {
+ "epoch": 0.7222031196273953,
+ "grad_norm": 0.5317432284355164,
+ "learning_rate": 0.00017270138667266894,
+ "loss": 1.2987,
+ "step": 722
+ },
+ {
+ "epoch": 0.723203400956519,
+ "grad_norm": 0.6262248158454895,
+ "learning_rate": 0.00017262937179902472,
+ "loss": 1.2591,
+ "step": 723
+ },
+ {
+ "epoch": 0.7242036822856428,
+ "grad_norm": 0.5377100110054016,
+ "learning_rate": 0.00017255727711883588,
+ "loss": 1.366,
+ "step": 724
+ },
+ {
+ "epoch": 0.7252039636147667,
+ "grad_norm": 0.5637168288230896,
+ "learning_rate": 0.00017248510271132144,
+ "loss": 1.4593,
+ "step": 725
+ },
+ {
+ "epoch": 0.7262042449438905,
+ "grad_norm": 0.5360320210456848,
+ "learning_rate": 0.00017241284865578802,
+ "loss": 1.4797,
+ "step": 726
+ },
+ {
+ "epoch": 0.7272045262730142,
+ "grad_norm": 0.48500168323516846,
+ "learning_rate": 0.00017234051503162978,
+ "loss": 1.3875,
+ "step": 727
+ },
+ {
+ "epoch": 0.7282048076021381,
+ "grad_norm": 0.5666176080703735,
+ "learning_rate": 0.0001722681019183283,
+ "loss": 1.4683,
+ "step": 728
+ },
+ {
+ "epoch": 0.7292050889312619,
+ "grad_norm": 0.5710940361022949,
+ "learning_rate": 0.00017219560939545246,
+ "loss": 1.5538,
+ "step": 729
+ },
+ {
+ "epoch": 0.7302053702603858,
+ "grad_norm": 0.5658044219017029,
+ "learning_rate": 0.00017212303754265843,
+ "loss": 1.248,
+ "step": 730
+ },
+ {
+ "epoch": 0.7312056515895096,
+ "grad_norm": 0.5355331301689148,
+ "learning_rate": 0.0001720503864396896,
+ "loss": 1.259,
+ "step": 731
+ },
+ {
+ "epoch": 0.7322059329186333,
+ "grad_norm": 0.5683363676071167,
+ "learning_rate": 0.00017197765616637636,
+ "loss": 1.4242,
+ "step": 732
+ },
+ {
+ "epoch": 0.7332062142477572,
+ "grad_norm": 0.488972932100296,
+ "learning_rate": 0.0001719048468026361,
+ "loss": 1.3442,
+ "step": 733
+ },
+ {
+ "epoch": 0.734206495576881,
+ "grad_norm": 0.45563748478889465,
+ "learning_rate": 0.00017183195842847322,
+ "loss": 1.3236,
+ "step": 734
+ },
+ {
+ "epoch": 0.7352067769060048,
+ "grad_norm": 0.5114185214042664,
+ "learning_rate": 0.0001717589911239788,
+ "loss": 1.3071,
+ "step": 735
+ },
+ {
+ "epoch": 0.7362070582351287,
+ "grad_norm": 0.558686375617981,
+ "learning_rate": 0.00017168594496933074,
+ "loss": 1.2889,
+ "step": 736
+ },
+ {
+ "epoch": 0.7372073395642524,
+ "grad_norm": 0.49099281430244446,
+ "learning_rate": 0.00017161282004479351,
+ "loss": 1.1701,
+ "step": 737
+ },
+ {
+ "epoch": 0.7382076208933762,
+ "grad_norm": 0.549524188041687,
+ "learning_rate": 0.0001715396164307182,
+ "loss": 1.2853,
+ "step": 738
+ },
+ {
+ "epoch": 0.7392079022225001,
+ "grad_norm": 0.5683863162994385,
+ "learning_rate": 0.0001714663342075424,
+ "loss": 1.4201,
+ "step": 739
+ },
+ {
+ "epoch": 0.7402081835516239,
+ "grad_norm": 0.5957104563713074,
+ "learning_rate": 0.00017139297345578994,
+ "loss": 1.3406,
+ "step": 740
+ },
+ {
+ "epoch": 0.7412084648807478,
+ "grad_norm": 0.4645147919654846,
+ "learning_rate": 0.00017131953425607104,
+ "loss": 1.2344,
+ "step": 741
+ },
+ {
+ "epoch": 0.7422087462098715,
+ "grad_norm": 0.4981783330440521,
+ "learning_rate": 0.00017124601668908212,
+ "loss": 1.422,
+ "step": 742
+ },
+ {
+ "epoch": 0.7432090275389953,
+ "grad_norm": 0.5426530838012695,
+ "learning_rate": 0.00017117242083560568,
+ "loss": 1.4275,
+ "step": 743
+ },
+ {
+ "epoch": 0.7442093088681192,
+ "grad_norm": 0.5585354566574097,
+ "learning_rate": 0.00017109874677651024,
+ "loss": 1.5049,
+ "step": 744
+ },
+ {
+ "epoch": 0.745209590197243,
+ "grad_norm": 0.5639151930809021,
+ "learning_rate": 0.0001710249945927503,
+ "loss": 1.4019,
+ "step": 745
+ },
+ {
+ "epoch": 0.7462098715263668,
+ "grad_norm": 0.8334717750549316,
+ "learning_rate": 0.00017095116436536612,
+ "loss": 1.5607,
+ "step": 746
+ },
+ {
+ "epoch": 0.7472101528554906,
+ "grad_norm": 0.513970673084259,
+ "learning_rate": 0.00017087725617548385,
+ "loss": 1.1967,
+ "step": 747
+ },
+ {
+ "epoch": 0.7482104341846144,
+ "grad_norm": 0.6200702786445618,
+ "learning_rate": 0.00017080327010431513,
+ "loss": 1.2298,
+ "step": 748
+ },
+ {
+ "epoch": 0.7492107155137382,
+ "grad_norm": 0.54522305727005,
+ "learning_rate": 0.00017072920623315734,
+ "loss": 1.3214,
+ "step": 749
+ },
+ {
+ "epoch": 0.7502109968428621,
+ "grad_norm": 0.6682360172271729,
+ "learning_rate": 0.00017065506464339326,
+ "loss": 1.4631,
+ "step": 750
+ },
+ {
+ "epoch": 0.7512112781719859,
+ "grad_norm": 0.5061535239219666,
+ "learning_rate": 0.00017058084541649106,
+ "loss": 1.5062,
+ "step": 751
+ },
+ {
+ "epoch": 0.7522115595011097,
+ "grad_norm": 0.5790627598762512,
+ "learning_rate": 0.00017050654863400429,
+ "loss": 1.1371,
+ "step": 752
+ },
+ {
+ "epoch": 0.7532118408302335,
+ "grad_norm": 0.6058077216148376,
+ "learning_rate": 0.00017043217437757164,
+ "loss": 1.2185,
+ "step": 753
+ },
+ {
+ "epoch": 0.7542121221593573,
+ "grad_norm": 0.5494515895843506,
+ "learning_rate": 0.00017035772272891702,
+ "loss": 1.2468,
+ "step": 754
+ },
+ {
+ "epoch": 0.7552124034884812,
+ "grad_norm": 0.5687912106513977,
+ "learning_rate": 0.00017028319376984928,
+ "loss": 1.5621,
+ "step": 755
+ },
+ {
+ "epoch": 0.756212684817605,
+ "grad_norm": 0.5341185927391052,
+ "learning_rate": 0.00017020858758226229,
+ "loss": 1.3598,
+ "step": 756
+ },
+ {
+ "epoch": 0.7572129661467287,
+ "grad_norm": 0.5373026132583618,
+ "learning_rate": 0.0001701339042481347,
+ "loss": 1.4185,
+ "step": 757
+ },
+ {
+ "epoch": 0.7582132474758526,
+ "grad_norm": 0.46508973836898804,
+ "learning_rate": 0.00017005914384953007,
+ "loss": 1.2962,
+ "step": 758
+ },
+ {
+ "epoch": 0.7592135288049764,
+ "grad_norm": 0.4580937325954437,
+ "learning_rate": 0.00016998430646859654,
+ "loss": 1.0707,
+ "step": 759
+ },
+ {
+ "epoch": 0.7602138101341002,
+ "grad_norm": 0.5277093052864075,
+ "learning_rate": 0.00016990939218756683,
+ "loss": 1.2529,
+ "step": 760
+ },
+ {
+ "epoch": 0.761214091463224,
+ "grad_norm": 0.5356671214103699,
+ "learning_rate": 0.0001698344010887582,
+ "loss": 1.4032,
+ "step": 761
+ },
+ {
+ "epoch": 0.7622143727923478,
+ "grad_norm": 0.6881769299507141,
+ "learning_rate": 0.0001697593332545723,
+ "loss": 1.4885,
+ "step": 762
+ },
+ {
+ "epoch": 0.7632146541214717,
+ "grad_norm": 0.5370383262634277,
+ "learning_rate": 0.0001696841887674951,
+ "loss": 1.3271,
+ "step": 763
+ },
+ {
+ "epoch": 0.7642149354505955,
+ "grad_norm": 0.4792316257953644,
+ "learning_rate": 0.00016960896771009684,
+ "loss": 1.2274,
+ "step": 764
+ },
+ {
+ "epoch": 0.7652152167797193,
+ "grad_norm": 0.5276592373847961,
+ "learning_rate": 0.00016953367016503182,
+ "loss": 1.2399,
+ "step": 765
+ },
+ {
+ "epoch": 0.7662154981088432,
+ "grad_norm": 0.4789050221443176,
+ "learning_rate": 0.00016945829621503838,
+ "loss": 1.4002,
+ "step": 766
+ },
+ {
+ "epoch": 0.7672157794379669,
+ "grad_norm": 0.492712140083313,
+ "learning_rate": 0.00016938284594293897,
+ "loss": 1.3897,
+ "step": 767
+ },
+ {
+ "epoch": 0.7682160607670907,
+ "grad_norm": 0.5009675621986389,
+ "learning_rate": 0.00016930731943163972,
+ "loss": 1.3797,
+ "step": 768
+ },
+ {
+ "epoch": 0.7692163420962146,
+ "grad_norm": 0.4863432049751282,
+ "learning_rate": 0.00016923171676413063,
+ "loss": 1.4251,
+ "step": 769
+ },
+ {
+ "epoch": 0.7702166234253384,
+ "grad_norm": 0.5190616846084595,
+ "learning_rate": 0.00016915603802348535,
+ "loss": 1.4265,
+ "step": 770
+ },
+ {
+ "epoch": 0.7712169047544621,
+ "grad_norm": 0.5603469610214233,
+ "learning_rate": 0.00016908028329286112,
+ "loss": 1.2852,
+ "step": 771
+ },
+ {
+ "epoch": 0.772217186083586,
+ "grad_norm": 0.5128753185272217,
+ "learning_rate": 0.0001690044526554987,
+ "loss": 1.3324,
+ "step": 772
+ },
+ {
+ "epoch": 0.7732174674127098,
+ "grad_norm": 0.4992072284221649,
+ "learning_rate": 0.00016892854619472223,
+ "loss": 1.2498,
+ "step": 773
+ },
+ {
+ "epoch": 0.7742177487418337,
+ "grad_norm": 0.6128407716751099,
+ "learning_rate": 0.00016885256399393924,
+ "loss": 1.2967,
+ "step": 774
+ },
+ {
+ "epoch": 0.7752180300709575,
+ "grad_norm": 0.5186858177185059,
+ "learning_rate": 0.00016877650613664034,
+ "loss": 1.2654,
+ "step": 775
+ },
+ {
+ "epoch": 0.7762183114000812,
+ "grad_norm": 0.5207421183586121,
+ "learning_rate": 0.00016870037270639942,
+ "loss": 1.2994,
+ "step": 776
+ },
+ {
+ "epoch": 0.7772185927292051,
+ "grad_norm": 0.509912371635437,
+ "learning_rate": 0.0001686241637868734,
+ "loss": 1.3971,
+ "step": 777
+ },
+ {
+ "epoch": 0.7782188740583289,
+ "grad_norm": 0.47703370451927185,
+ "learning_rate": 0.00016854787946180198,
+ "loss": 1.282,
+ "step": 778
+ },
+ {
+ "epoch": 0.7792191553874527,
+ "grad_norm": 0.5404442548751831,
+ "learning_rate": 0.00016847151981500789,
+ "loss": 1.1986,
+ "step": 779
+ },
+ {
+ "epoch": 0.7802194367165766,
+ "grad_norm": 0.541050136089325,
+ "learning_rate": 0.00016839508493039657,
+ "loss": 1.4478,
+ "step": 780
+ },
+ {
+ "epoch": 0.7812197180457003,
+ "grad_norm": 0.46520569920539856,
+ "learning_rate": 0.00016831857489195618,
+ "loss": 1.2385,
+ "step": 781
+ },
+ {
+ "epoch": 0.7822199993748241,
+ "grad_norm": 0.5150445699691772,
+ "learning_rate": 0.00016824198978375736,
+ "loss": 1.3695,
+ "step": 782
+ },
+ {
+ "epoch": 0.783220280703948,
+ "grad_norm": 0.5754334926605225,
+ "learning_rate": 0.00016816532968995328,
+ "loss": 1.3026,
+ "step": 783
+ },
+ {
+ "epoch": 0.7842205620330718,
+ "grad_norm": 0.5335776209831238,
+ "learning_rate": 0.0001680885946947796,
+ "loss": 1.3391,
+ "step": 784
+ },
+ {
+ "epoch": 0.7852208433621957,
+ "grad_norm": 0.6596659421920776,
+ "learning_rate": 0.00016801178488255413,
+ "loss": 1.3224,
+ "step": 785
+ },
+ {
+ "epoch": 0.7862211246913194,
+ "grad_norm": 0.5251991748809814,
+ "learning_rate": 0.00016793490033767698,
+ "loss": 1.1744,
+ "step": 786
+ },
+ {
+ "epoch": 0.7872214060204432,
+ "grad_norm": 0.5112204551696777,
+ "learning_rate": 0.00016785794114463037,
+ "loss": 1.2455,
+ "step": 787
+ },
+ {
+ "epoch": 0.7882216873495671,
+ "grad_norm": 0.532893717288971,
+ "learning_rate": 0.00016778090738797853,
+ "loss": 1.2437,
+ "step": 788
+ },
+ {
+ "epoch": 0.7892219686786909,
+ "grad_norm": 0.5534240007400513,
+ "learning_rate": 0.00016770379915236766,
+ "loss": 1.396,
+ "step": 789
+ },
+ {
+ "epoch": 0.7902222500078147,
+ "grad_norm": 0.5164292454719543,
+ "learning_rate": 0.00016762661652252567,
+ "loss": 1.3138,
+ "step": 790
+ },
+ {
+ "epoch": 0.7912225313369385,
+ "grad_norm": 0.5660764575004578,
+ "learning_rate": 0.00016754935958326244,
+ "loss": 1.3014,
+ "step": 791
+ },
+ {
+ "epoch": 0.7922228126660623,
+ "grad_norm": 0.5137651562690735,
+ "learning_rate": 0.00016747202841946928,
+ "loss": 1.2834,
+ "step": 792
+ },
+ {
+ "epoch": 0.7932230939951862,
+ "grad_norm": 0.5546874403953552,
+ "learning_rate": 0.00016739462311611919,
+ "loss": 1.2841,
+ "step": 793
+ },
+ {
+ "epoch": 0.79422337532431,
+ "grad_norm": 0.5112007260322571,
+ "learning_rate": 0.00016731714375826657,
+ "loss": 1.1873,
+ "step": 794
+ },
+ {
+ "epoch": 0.7952236566534338,
+ "grad_norm": 0.5462679862976074,
+ "learning_rate": 0.00016723959043104728,
+ "loss": 1.2602,
+ "step": 795
+ },
+ {
+ "epoch": 0.7962239379825576,
+ "grad_norm": 0.5083702802658081,
+ "learning_rate": 0.00016716196321967832,
+ "loss": 1.334,
+ "step": 796
+ },
+ {
+ "epoch": 0.7972242193116814,
+ "grad_norm": 0.5491913557052612,
+ "learning_rate": 0.00016708426220945802,
+ "loss": 1.335,
+ "step": 797
+ },
+ {
+ "epoch": 0.7982245006408052,
+ "grad_norm": 0.5257419943809509,
+ "learning_rate": 0.00016700648748576574,
+ "loss": 1.374,
+ "step": 798
+ },
+ {
+ "epoch": 0.7992247819699291,
+ "grad_norm": 0.5252013206481934,
+ "learning_rate": 0.0001669286391340618,
+ "loss": 1.281,
+ "step": 799
+ },
+ {
+ "epoch": 0.8002250632990529,
+ "grad_norm": 0.5784058570861816,
+ "learning_rate": 0.00016685071723988748,
+ "loss": 1.385,
+ "step": 800
+ },
+ {
+ "epoch": 0.8012253446281766,
+ "grad_norm": 0.5508819818496704,
+ "learning_rate": 0.00016677272188886483,
+ "loss": 1.5138,
+ "step": 801
+ },
+ {
+ "epoch": 0.8022256259573005,
+ "grad_norm": 0.5943104028701782,
+ "learning_rate": 0.00016669465316669667,
+ "loss": 1.2341,
+ "step": 802
+ },
+ {
+ "epoch": 0.8032259072864243,
+ "grad_norm": 0.5109750032424927,
+ "learning_rate": 0.00016661651115916642,
+ "loss": 1.361,
+ "step": 803
+ },
+ {
+ "epoch": 0.8042261886155482,
+ "grad_norm": 0.5322972536087036,
+ "learning_rate": 0.00016653829595213794,
+ "loss": 1.3383,
+ "step": 804
+ },
+ {
+ "epoch": 0.805226469944672,
+ "grad_norm": 0.4870489537715912,
+ "learning_rate": 0.00016646000763155568,
+ "loss": 1.2932,
+ "step": 805
+ },
+ {
+ "epoch": 0.8062267512737957,
+ "grad_norm": 0.6070749163627625,
+ "learning_rate": 0.00016638164628344425,
+ "loss": 1.3517,
+ "step": 806
+ },
+ {
+ "epoch": 0.8072270326029196,
+ "grad_norm": 0.5695485472679138,
+ "learning_rate": 0.00016630321199390867,
+ "loss": 1.295,
+ "step": 807
+ },
+ {
+ "epoch": 0.8082273139320434,
+ "grad_norm": 0.49092933535575867,
+ "learning_rate": 0.00016622470484913406,
+ "loss": 1.1708,
+ "step": 808
+ },
+ {
+ "epoch": 0.8092275952611672,
+ "grad_norm": 0.5488709807395935,
+ "learning_rate": 0.00016614612493538551,
+ "loss": 1.3101,
+ "step": 809
+ },
+ {
+ "epoch": 0.810227876590291,
+ "grad_norm": 0.6875150799751282,
+ "learning_rate": 0.00016606747233900815,
+ "loss": 1.3,
+ "step": 810
+ },
+ {
+ "epoch": 0.8112281579194148,
+ "grad_norm": 0.5599775910377502,
+ "learning_rate": 0.00016598874714642697,
+ "loss": 1.5711,
+ "step": 811
+ },
+ {
+ "epoch": 0.8122284392485386,
+ "grad_norm": 0.7102994322776794,
+ "learning_rate": 0.00016590994944414678,
+ "loss": 1.4553,
+ "step": 812
+ },
+ {
+ "epoch": 0.8132287205776625,
+ "grad_norm": 0.5191233158111572,
+ "learning_rate": 0.00016583107931875192,
+ "loss": 1.4292,
+ "step": 813
+ },
+ {
+ "epoch": 0.8142290019067863,
+ "grad_norm": 0.4739600718021393,
+ "learning_rate": 0.0001657521368569064,
+ "loss": 1.3776,
+ "step": 814
+ },
+ {
+ "epoch": 0.8152292832359102,
+ "grad_norm": 0.5282078981399536,
+ "learning_rate": 0.0001656731221453537,
+ "loss": 1.4359,
+ "step": 815
+ },
+ {
+ "epoch": 0.8162295645650339,
+ "grad_norm": 0.690367579460144,
+ "learning_rate": 0.00016559403527091675,
+ "loss": 1.1747,
+ "step": 816
+ },
+ {
+ "epoch": 0.8172298458941577,
+ "grad_norm": 0.5715120434761047,
+ "learning_rate": 0.0001655148763204977,
+ "loss": 1.3289,
+ "step": 817
+ },
+ {
+ "epoch": 0.8182301272232816,
+ "grad_norm": 0.7024423480033875,
+ "learning_rate": 0.00016543564538107797,
+ "loss": 1.4758,
+ "step": 818
+ },
+ {
+ "epoch": 0.8192304085524054,
+ "grad_norm": 0.5568886399269104,
+ "learning_rate": 0.00016535634253971794,
+ "loss": 1.5172,
+ "step": 819
+ },
+ {
+ "epoch": 0.8202306898815291,
+ "grad_norm": 0.5847441554069519,
+ "learning_rate": 0.00016527696788355714,
+ "loss": 1.1993,
+ "step": 820
+ },
+ {
+ "epoch": 0.821230971210653,
+ "grad_norm": 0.5402149558067322,
+ "learning_rate": 0.00016519752149981397,
+ "loss": 1.2921,
+ "step": 821
+ },
+ {
+ "epoch": 0.8222312525397768,
+ "grad_norm": 0.6050311326980591,
+ "learning_rate": 0.0001651180034757856,
+ "loss": 1.59,
+ "step": 822
+ },
+ {
+ "epoch": 0.8232315338689006,
+ "grad_norm": 0.6215486526489258,
+ "learning_rate": 0.00016503841389884798,
+ "loss": 1.4562,
+ "step": 823
+ },
+ {
+ "epoch": 0.8242318151980245,
+ "grad_norm": 0.6507789492607117,
+ "learning_rate": 0.00016495875285645566,
+ "loss": 1.349,
+ "step": 824
+ },
+ {
+ "epoch": 0.8252320965271482,
+ "grad_norm": 0.5273147225379944,
+ "learning_rate": 0.00016487902043614173,
+ "loss": 1.4016,
+ "step": 825
+ },
+ {
+ "epoch": 0.8262323778562721,
+ "grad_norm": 0.579987645149231,
+ "learning_rate": 0.0001647992167255177,
+ "loss": 1.4077,
+ "step": 826
+ },
+ {
+ "epoch": 0.8272326591853959,
+ "grad_norm": 0.5068405270576477,
+ "learning_rate": 0.0001647193418122734,
+ "loss": 1.5075,
+ "step": 827
+ },
+ {
+ "epoch": 0.8282329405145197,
+ "grad_norm": 0.519982099533081,
+ "learning_rate": 0.00016463939578417692,
+ "loss": 1.2721,
+ "step": 828
+ },
+ {
+ "epoch": 0.8292332218436436,
+ "grad_norm": 0.5181561708450317,
+ "learning_rate": 0.0001645593787290745,
+ "loss": 1.2299,
+ "step": 829
+ },
+ {
+ "epoch": 0.8302335031727673,
+ "grad_norm": 0.47413337230682373,
+ "learning_rate": 0.0001644792907348904,
+ "loss": 1.2462,
+ "step": 830
+ },
+ {
+ "epoch": 0.8312337845018911,
+ "grad_norm": 0.5426570773124695,
+ "learning_rate": 0.00016439913188962685,
+ "loss": 1.4496,
+ "step": 831
+ },
+ {
+ "epoch": 0.832234065831015,
+ "grad_norm": 0.5744379758834839,
+ "learning_rate": 0.0001643189022813639,
+ "loss": 1.3284,
+ "step": 832
+ },
+ {
+ "epoch": 0.8332343471601388,
+ "grad_norm": 0.49693235754966736,
+ "learning_rate": 0.0001642386019982594,
+ "loss": 1.4082,
+ "step": 833
+ },
+ {
+ "epoch": 0.8342346284892626,
+ "grad_norm": 0.5346773862838745,
+ "learning_rate": 0.00016415823112854883,
+ "loss": 1.4238,
+ "step": 834
+ },
+ {
+ "epoch": 0.8352349098183864,
+ "grad_norm": 0.6201802492141724,
+ "learning_rate": 0.00016407778976054526,
+ "loss": 1.3288,
+ "step": 835
+ },
+ {
+ "epoch": 0.8362351911475102,
+ "grad_norm": 0.5161807537078857,
+ "learning_rate": 0.0001639972779826392,
+ "loss": 1.3798,
+ "step": 836
+ },
+ {
+ "epoch": 0.8372354724766341,
+ "grad_norm": 0.4670160412788391,
+ "learning_rate": 0.0001639166958832985,
+ "loss": 1.3765,
+ "step": 837
+ },
+ {
+ "epoch": 0.8382357538057579,
+ "grad_norm": 0.6492543816566467,
+ "learning_rate": 0.00016383604355106837,
+ "loss": 1.5277,
+ "step": 838
+ },
+ {
+ "epoch": 0.8392360351348817,
+ "grad_norm": 0.5766328573226929,
+ "learning_rate": 0.00016375532107457108,
+ "loss": 1.2481,
+ "step": 839
+ },
+ {
+ "epoch": 0.8402363164640055,
+ "grad_norm": 0.6431072950363159,
+ "learning_rate": 0.00016367452854250603,
+ "loss": 1.2755,
+ "step": 840
+ },
+ {
+ "epoch": 0.8412365977931293,
+ "grad_norm": 0.5121828317642212,
+ "learning_rate": 0.00016359366604364972,
+ "loss": 1.2927,
+ "step": 841
+ },
+ {
+ "epoch": 0.8422368791222531,
+ "grad_norm": 0.5222392678260803,
+ "learning_rate": 0.00016351273366685526,
+ "loss": 1.2626,
+ "step": 842
+ },
+ {
+ "epoch": 0.843237160451377,
+ "grad_norm": 0.5536903142929077,
+ "learning_rate": 0.00016343173150105278,
+ "loss": 1.1892,
+ "step": 843
+ },
+ {
+ "epoch": 0.8442374417805008,
+ "grad_norm": 0.5569381713867188,
+ "learning_rate": 0.00016335065963524897,
+ "loss": 1.4263,
+ "step": 844
+ },
+ {
+ "epoch": 0.8452377231096245,
+ "grad_norm": 0.6490715742111206,
+ "learning_rate": 0.0001632695181585272,
+ "loss": 1.452,
+ "step": 845
+ },
+ {
+ "epoch": 0.8462380044387484,
+ "grad_norm": 0.5965350270271301,
+ "learning_rate": 0.00016318830716004722,
+ "loss": 1.4189,
+ "step": 846
+ },
+ {
+ "epoch": 0.8472382857678722,
+ "grad_norm": 0.45904603600502014,
+ "learning_rate": 0.00016310702672904528,
+ "loss": 1.4024,
+ "step": 847
+ },
+ {
+ "epoch": 0.8482385670969961,
+ "grad_norm": 0.4320334494113922,
+ "learning_rate": 0.00016302567695483382,
+ "loss": 1.2105,
+ "step": 848
+ },
+ {
+ "epoch": 0.8492388484261199,
+ "grad_norm": 0.527032196521759,
+ "learning_rate": 0.0001629442579268016,
+ "loss": 1.1996,
+ "step": 849
+ },
+ {
+ "epoch": 0.8502391297552436,
+ "grad_norm": 0.6317036747932434,
+ "learning_rate": 0.00016286276973441333,
+ "loss": 1.4811,
+ "step": 850
+ },
+ {
+ "epoch": 0.8512394110843675,
+ "grad_norm": 0.5726277828216553,
+ "learning_rate": 0.00016278121246720987,
+ "loss": 1.3249,
+ "step": 851
+ },
+ {
+ "epoch": 0.8522396924134913,
+ "grad_norm": 0.4624577462673187,
+ "learning_rate": 0.00016269958621480788,
+ "loss": 1.3291,
+ "step": 852
+ },
+ {
+ "epoch": 0.8532399737426151,
+ "grad_norm": 0.5774461627006531,
+ "learning_rate": 0.0001626178910668998,
+ "loss": 1.2891,
+ "step": 853
+ },
+ {
+ "epoch": 0.854240255071739,
+ "grad_norm": 0.503584086894989,
+ "learning_rate": 0.00016253612711325386,
+ "loss": 1.3048,
+ "step": 854
+ },
+ {
+ "epoch": 0.8552405364008627,
+ "grad_norm": 0.4560583233833313,
+ "learning_rate": 0.0001624542944437139,
+ "loss": 1.2658,
+ "step": 855
+ },
+ {
+ "epoch": 0.8562408177299866,
+ "grad_norm": 0.49611610174179077,
+ "learning_rate": 0.00016237239314819917,
+ "loss": 1.1017,
+ "step": 856
+ },
+ {
+ "epoch": 0.8572410990591104,
+ "grad_norm": 0.5600405931472778,
+ "learning_rate": 0.0001622904233167044,
+ "loss": 1.3274,
+ "step": 857
+ },
+ {
+ "epoch": 0.8582413803882342,
+ "grad_norm": 0.5849353075027466,
+ "learning_rate": 0.0001622083850392996,
+ "loss": 1.274,
+ "step": 858
+ },
+ {
+ "epoch": 0.859241661717358,
+ "grad_norm": 0.5781377553939819,
+ "learning_rate": 0.00016212627840613003,
+ "loss": 1.4157,
+ "step": 859
+ },
+ {
+ "epoch": 0.8602419430464818,
+ "grad_norm": 0.4908173680305481,
+ "learning_rate": 0.000162044103507416,
+ "loss": 1.3,
+ "step": 860
+ },
+ {
+ "epoch": 0.8612422243756056,
+ "grad_norm": 0.5844553112983704,
+ "learning_rate": 0.00016196186043345288,
+ "loss": 1.2325,
+ "step": 861
+ },
+ {
+ "epoch": 0.8622425057047295,
+ "grad_norm": 0.5381117463111877,
+ "learning_rate": 0.00016187954927461093,
+ "loss": 1.41,
+ "step": 862
+ },
+ {
+ "epoch": 0.8632427870338533,
+ "grad_norm": 0.5468165278434753,
+ "learning_rate": 0.00016179717012133521,
+ "loss": 1.4272,
+ "step": 863
+ },
+ {
+ "epoch": 0.864243068362977,
+ "grad_norm": 0.5702970027923584,
+ "learning_rate": 0.00016171472306414554,
+ "loss": 1.3624,
+ "step": 864
+ },
+ {
+ "epoch": 0.8652433496921009,
+ "grad_norm": 0.5430637001991272,
+ "learning_rate": 0.00016163220819363628,
+ "loss": 1.2555,
+ "step": 865
+ },
+ {
+ "epoch": 0.8662436310212247,
+ "grad_norm": 0.5266844034194946,
+ "learning_rate": 0.00016154962560047643,
+ "loss": 1.3743,
+ "step": 866
+ },
+ {
+ "epoch": 0.8672439123503486,
+ "grad_norm": 0.5201333165168762,
+ "learning_rate": 0.00016146697537540924,
+ "loss": 1.3959,
+ "step": 867
+ },
+ {
+ "epoch": 0.8682441936794724,
+ "grad_norm": 0.44362199306488037,
+ "learning_rate": 0.0001613842576092524,
+ "loss": 1.2661,
+ "step": 868
+ },
+ {
+ "epoch": 0.8692444750085961,
+ "grad_norm": 0.5465226769447327,
+ "learning_rate": 0.00016130147239289778,
+ "loss": 1.3688,
+ "step": 869
+ },
+ {
+ "epoch": 0.87024475633772,
+ "grad_norm": 0.5353460907936096,
+ "learning_rate": 0.00016121861981731135,
+ "loss": 1.2327,
+ "step": 870
+ },
+ {
+ "epoch": 0.8712450376668438,
+ "grad_norm": 0.5463739633560181,
+ "learning_rate": 0.00016113569997353312,
+ "loss": 1.2994,
+ "step": 871
+ },
+ {
+ "epoch": 0.8722453189959676,
+ "grad_norm": 0.5219647288322449,
+ "learning_rate": 0.000161052712952677,
+ "loss": 1.3916,
+ "step": 872
+ },
+ {
+ "epoch": 0.8732456003250915,
+ "grad_norm": 0.4675636887550354,
+ "learning_rate": 0.0001609696588459307,
+ "loss": 1.2786,
+ "step": 873
+ },
+ {
+ "epoch": 0.8742458816542152,
+ "grad_norm": 0.48863986134529114,
+ "learning_rate": 0.00016088653774455568,
+ "loss": 1.1762,
+ "step": 874
+ },
+ {
+ "epoch": 0.875246162983339,
+ "grad_norm": 0.48759785294532776,
+ "learning_rate": 0.00016080334973988695,
+ "loss": 1.2107,
+ "step": 875
+ },
+ {
+ "epoch": 0.8762464443124629,
+ "grad_norm": 0.7353807687759399,
+ "learning_rate": 0.00016072009492333318,
+ "loss": 1.4855,
+ "step": 876
+ },
+ {
+ "epoch": 0.8772467256415867,
+ "grad_norm": 0.4878953993320465,
+ "learning_rate": 0.0001606367733863763,
+ "loss": 1.2343,
+ "step": 877
+ },
+ {
+ "epoch": 0.8782470069707106,
+ "grad_norm": 0.4764840304851532,
+ "learning_rate": 0.00016055338522057158,
+ "loss": 1.3159,
+ "step": 878
+ },
+ {
+ "epoch": 0.8792472882998343,
+ "grad_norm": 0.5289160013198853,
+ "learning_rate": 0.00016046993051754756,
+ "loss": 1.3298,
+ "step": 879
+ },
+ {
+ "epoch": 0.8802475696289581,
+ "grad_norm": 0.5421459078788757,
+ "learning_rate": 0.00016038640936900586,
+ "loss": 1.4081,
+ "step": 880
+ },
+ {
+ "epoch": 0.881247850958082,
+ "grad_norm": 0.5096681118011475,
+ "learning_rate": 0.00016030282186672116,
+ "loss": 1.2406,
+ "step": 881
+ },
+ {
+ "epoch": 0.8822481322872058,
+ "grad_norm": 0.5783627033233643,
+ "learning_rate": 0.00016021916810254097,
+ "loss": 1.3505,
+ "step": 882
+ },
+ {
+ "epoch": 0.8832484136163296,
+ "grad_norm": 0.5718142986297607,
+ "learning_rate": 0.00016013544816838565,
+ "loss": 1.4106,
+ "step": 883
+ },
+ {
+ "epoch": 0.8842486949454534,
+ "grad_norm": 0.551607072353363,
+ "learning_rate": 0.00016005166215624827,
+ "loss": 1.3474,
+ "step": 884
+ },
+ {
+ "epoch": 0.8852489762745772,
+ "grad_norm": 0.5464247465133667,
+ "learning_rate": 0.0001599678101581945,
+ "loss": 1.4443,
+ "step": 885
+ },
+ {
+ "epoch": 0.886249257603701,
+ "grad_norm": 0.5075456500053406,
+ "learning_rate": 0.00015988389226636253,
+ "loss": 1.4919,
+ "step": 886
+ },
+ {
+ "epoch": 0.8872495389328249,
+ "grad_norm": 0.48557186126708984,
+ "learning_rate": 0.00015979990857296295,
+ "loss": 1.4225,
+ "step": 887
+ },
+ {
+ "epoch": 0.8882498202619487,
+ "grad_norm": 0.5385611653327942,
+ "learning_rate": 0.00015971585917027862,
+ "loss": 1.2937,
+ "step": 888
+ },
+ {
+ "epoch": 0.8892501015910725,
+ "grad_norm": 0.6477749943733215,
+ "learning_rate": 0.00015963174415066468,
+ "loss": 1.5628,
+ "step": 889
+ },
+ {
+ "epoch": 0.8902503829201963,
+ "grad_norm": 0.6205973029136658,
+ "learning_rate": 0.0001595475636065483,
+ "loss": 1.4902,
+ "step": 890
+ },
+ {
+ "epoch": 0.8912506642493201,
+ "grad_norm": 0.45717301964759827,
+ "learning_rate": 0.00015946331763042867,
+ "loss": 1.1998,
+ "step": 891
+ },
+ {
+ "epoch": 0.892250945578444,
+ "grad_norm": 0.5279855132102966,
+ "learning_rate": 0.00015937900631487686,
+ "loss": 1.0668,
+ "step": 892
+ },
+ {
+ "epoch": 0.8932512269075678,
+ "grad_norm": 0.5207269787788391,
+ "learning_rate": 0.00015929462975253585,
+ "loss": 1.2774,
+ "step": 893
+ },
+ {
+ "epoch": 0.8942515082366915,
+ "grad_norm": 0.5200834274291992,
+ "learning_rate": 0.00015921018803612014,
+ "loss": 1.4316,
+ "step": 894
+ },
+ {
+ "epoch": 0.8952517895658154,
+ "grad_norm": 0.48317649960517883,
+ "learning_rate": 0.0001591256812584159,
+ "loss": 1.4101,
+ "step": 895
+ },
+ {
+ "epoch": 0.8962520708949392,
+ "grad_norm": 0.475483775138855,
+ "learning_rate": 0.00015904110951228082,
+ "loss": 1.2011,
+ "step": 896
+ },
+ {
+ "epoch": 0.897252352224063,
+ "grad_norm": 0.6542660593986511,
+ "learning_rate": 0.00015895647289064396,
+ "loss": 1.56,
+ "step": 897
+ },
+ {
+ "epoch": 0.8982526335531869,
+ "grad_norm": 0.5154829621315002,
+ "learning_rate": 0.00015887177148650564,
+ "loss": 1.3748,
+ "step": 898
+ },
+ {
+ "epoch": 0.8992529148823106,
+ "grad_norm": 0.5744799375534058,
+ "learning_rate": 0.0001587870053929374,
+ "loss": 1.4072,
+ "step": 899
+ },
+ {
+ "epoch": 0.9002531962114345,
+ "grad_norm": 0.4835909307003021,
+ "learning_rate": 0.00015870217470308188,
+ "loss": 1.3037,
+ "step": 900
+ },
+ {
+ "epoch": 0.9012534775405583,
+ "grad_norm": 0.5292366743087769,
+ "learning_rate": 0.0001586172795101526,
+ "loss": 1.2395,
+ "step": 901
+ },
+ {
+ "epoch": 0.9022537588696821,
+ "grad_norm": 0.5905430912971497,
+ "learning_rate": 0.00015853231990743406,
+ "loss": 1.29,
+ "step": 902
+ },
+ {
+ "epoch": 0.903254040198806,
+ "grad_norm": 0.4918007254600525,
+ "learning_rate": 0.0001584472959882815,
+ "loss": 1.2593,
+ "step": 903
+ },
+ {
+ "epoch": 0.9042543215279297,
+ "grad_norm": 0.4735652208328247,
+ "learning_rate": 0.00015836220784612085,
+ "loss": 1.1669,
+ "step": 904
+ },
+ {
+ "epoch": 0.9052546028570535,
+ "grad_norm": 0.6272550821304321,
+ "learning_rate": 0.00015827705557444852,
+ "loss": 1.3692,
+ "step": 905
+ },
+ {
+ "epoch": 0.9062548841861774,
+ "grad_norm": 0.5333564877510071,
+ "learning_rate": 0.00015819183926683153,
+ "loss": 1.3672,
+ "step": 906
+ },
+ {
+ "epoch": 0.9072551655153012,
+ "grad_norm": 0.44029948115348816,
+ "learning_rate": 0.00015810655901690715,
+ "loss": 1.2124,
+ "step": 907
+ },
+ {
+ "epoch": 0.9082554468444249,
+ "grad_norm": 0.5636379718780518,
+ "learning_rate": 0.00015802121491838297,
+ "loss": 1.3507,
+ "step": 908
+ },
+ {
+ "epoch": 0.9092557281735488,
+ "grad_norm": 0.4394778907299042,
+ "learning_rate": 0.0001579358070650367,
+ "loss": 1.3159,
+ "step": 909
+ },
+ {
+ "epoch": 0.9102560095026726,
+ "grad_norm": 0.5382723212242126,
+ "learning_rate": 0.00015785033555071616,
+ "loss": 1.3733,
+ "step": 910
+ },
+ {
+ "epoch": 0.9112562908317965,
+ "grad_norm": 0.5251659750938416,
+ "learning_rate": 0.00015776480046933905,
+ "loss": 1.2253,
+ "step": 911
+ },
+ {
+ "epoch": 0.9122565721609203,
+ "grad_norm": 0.4791383743286133,
+ "learning_rate": 0.000157679201914893,
+ "loss": 1.2341,
+ "step": 912
+ },
+ {
+ "epoch": 0.913256853490044,
+ "grad_norm": 0.5058613419532776,
+ "learning_rate": 0.00015759353998143528,
+ "loss": 1.2717,
+ "step": 913
+ },
+ {
+ "epoch": 0.9142571348191679,
+ "grad_norm": 0.46837320923805237,
+ "learning_rate": 0.00015750781476309288,
+ "loss": 1.2484,
+ "step": 914
+ },
+ {
+ "epoch": 0.9152574161482917,
+ "grad_norm": 0.524444580078125,
+ "learning_rate": 0.00015742202635406235,
+ "loss": 1.5512,
+ "step": 915
+ },
+ {
+ "epoch": 0.9162576974774155,
+ "grad_norm": 0.6169744729995728,
+ "learning_rate": 0.00015733617484860963,
+ "loss": 1.271,
+ "step": 916
+ },
+ {
+ "epoch": 0.9172579788065394,
+ "grad_norm": 0.48883670568466187,
+ "learning_rate": 0.00015725026034106996,
+ "loss": 1.4779,
+ "step": 917
+ },
+ {
+ "epoch": 0.9182582601356631,
+ "grad_norm": 0.5408645272254944,
+ "learning_rate": 0.00015716428292584787,
+ "loss": 1.3574,
+ "step": 918
+ },
+ {
+ "epoch": 0.919258541464787,
+ "grad_norm": 0.5622221231460571,
+ "learning_rate": 0.00015707824269741702,
+ "loss": 1.2146,
+ "step": 919
+ },
+ {
+ "epoch": 0.9202588227939108,
+ "grad_norm": 0.477328896522522,
+ "learning_rate": 0.00015699213975031996,
+ "loss": 1.162,
+ "step": 920
+ },
+ {
+ "epoch": 0.9212591041230346,
+ "grad_norm": 0.503027081489563,
+ "learning_rate": 0.0001569059741791684,
+ "loss": 1.1674,
+ "step": 921
+ },
+ {
+ "epoch": 0.9222593854521585,
+ "grad_norm": 0.5951637625694275,
+ "learning_rate": 0.0001568197460786426,
+ "loss": 1.3737,
+ "step": 922
+ },
+ {
+ "epoch": 0.9232596667812822,
+ "grad_norm": 0.5276626348495483,
+ "learning_rate": 0.0001567334555434917,
+ "loss": 1.2494,
+ "step": 923
+ },
+ {
+ "epoch": 0.924259948110406,
+ "grad_norm": 0.6354761123657227,
+ "learning_rate": 0.0001566471026685334,
+ "loss": 1.2052,
+ "step": 924
+ },
+ {
+ "epoch": 0.9252602294395299,
+ "grad_norm": 0.4227287471294403,
+ "learning_rate": 0.00015656068754865387,
+ "loss": 1.1446,
+ "step": 925
+ },
+ {
+ "epoch": 0.9262605107686537,
+ "grad_norm": 0.5290839076042175,
+ "learning_rate": 0.00015647421027880772,
+ "loss": 1.2057,
+ "step": 926
+ },
+ {
+ "epoch": 0.9272607920977775,
+ "grad_norm": 0.4961225986480713,
+ "learning_rate": 0.0001563876709540178,
+ "loss": 1.2788,
+ "step": 927
+ },
+ {
+ "epoch": 0.9282610734269013,
+ "grad_norm": 0.5095213651657104,
+ "learning_rate": 0.0001563010696693752,
+ "loss": 1.2751,
+ "step": 928
+ },
+ {
+ "epoch": 0.9292613547560251,
+ "grad_norm": 0.5027223825454712,
+ "learning_rate": 0.00015621440652003907,
+ "loss": 1.3653,
+ "step": 929
+ },
+ {
+ "epoch": 0.930261636085149,
+ "grad_norm": 0.49251896142959595,
+ "learning_rate": 0.00015612768160123652,
+ "loss": 1.1556,
+ "step": 930
+ },
+ {
+ "epoch": 0.9312619174142728,
+ "grad_norm": 0.5187139511108398,
+ "learning_rate": 0.00015604089500826257,
+ "loss": 1.3623,
+ "step": 931
+ },
+ {
+ "epoch": 0.9322621987433966,
+ "grad_norm": 0.5004428029060364,
+ "learning_rate": 0.00015595404683648,
+ "loss": 1.185,
+ "step": 932
+ },
+ {
+ "epoch": 0.9332624800725204,
+ "grad_norm": 0.5750531554222107,
+ "learning_rate": 0.00015586713718131922,
+ "loss": 1.2999,
+ "step": 933
+ },
+ {
+ "epoch": 0.9342627614016442,
+ "grad_norm": 0.482732355594635,
+ "learning_rate": 0.0001557801661382782,
+ "loss": 1.2635,
+ "step": 934
+ },
+ {
+ "epoch": 0.935263042730768,
+ "grad_norm": 0.47854143381118774,
+ "learning_rate": 0.00015569313380292248,
+ "loss": 1.2833,
+ "step": 935
+ },
+ {
+ "epoch": 0.9362633240598919,
+ "grad_norm": 0.49382665753364563,
+ "learning_rate": 0.00015560604027088477,
+ "loss": 1.2327,
+ "step": 936
+ },
+ {
+ "epoch": 0.9372636053890157,
+ "grad_norm": 0.5009885430335999,
+ "learning_rate": 0.00015551888563786515,
+ "loss": 1.2967,
+ "step": 937
+ },
+ {
+ "epoch": 0.9382638867181394,
+ "grad_norm": 0.5012707114219666,
+ "learning_rate": 0.00015543166999963076,
+ "loss": 1.3231,
+ "step": 938
+ },
+ {
+ "epoch": 0.9392641680472633,
+ "grad_norm": 0.6908506751060486,
+ "learning_rate": 0.0001553443934520159,
+ "loss": 1.4055,
+ "step": 939
+ },
+ {
+ "epoch": 0.9402644493763871,
+ "grad_norm": 0.7104817032814026,
+ "learning_rate": 0.00015525705609092157,
+ "loss": 1.3435,
+ "step": 940
+ },
+ {
+ "epoch": 0.941264730705511,
+ "grad_norm": 0.49263522028923035,
+ "learning_rate": 0.00015516965801231586,
+ "loss": 1.2259,
+ "step": 941
+ },
+ {
+ "epoch": 0.9422650120346348,
+ "grad_norm": 0.5337693691253662,
+ "learning_rate": 0.0001550821993122334,
+ "loss": 1.2863,
+ "step": 942
+ },
+ {
+ "epoch": 0.9432652933637585,
+ "grad_norm": 0.5506749153137207,
+ "learning_rate": 0.0001549946800867755,
+ "loss": 1.4061,
+ "step": 943
+ },
+ {
+ "epoch": 0.9442655746928824,
+ "grad_norm": 0.5121364593505859,
+ "learning_rate": 0.00015490710043210997,
+ "loss": 1.3567,
+ "step": 944
+ },
+ {
+ "epoch": 0.9452658560220062,
+ "grad_norm": 0.5326678156852722,
+ "learning_rate": 0.00015481946044447099,
+ "loss": 1.2719,
+ "step": 945
+ },
+ {
+ "epoch": 0.94626613735113,
+ "grad_norm": 0.6023722290992737,
+ "learning_rate": 0.00015473176022015906,
+ "loss": 1.1512,
+ "step": 946
+ },
+ {
+ "epoch": 0.9472664186802539,
+ "grad_norm": 0.4953387975692749,
+ "learning_rate": 0.0001546439998555409,
+ "loss": 1.556,
+ "step": 947
+ },
+ {
+ "epoch": 0.9482667000093776,
+ "grad_norm": 0.5187799334526062,
+ "learning_rate": 0.0001545561794470492,
+ "loss": 1.279,
+ "step": 948
+ },
+ {
+ "epoch": 0.9492669813385014,
+ "grad_norm": 0.5788894295692444,
+ "learning_rate": 0.00015446829909118275,
+ "loss": 1.3246,
+ "step": 949
+ },
+ {
+ "epoch": 0.9502672626676253,
+ "grad_norm": 0.5551681518554688,
+ "learning_rate": 0.00015438035888450623,
+ "loss": 1.2231,
+ "step": 950
+ },
+ {
+ "epoch": 0.9512675439967491,
+ "grad_norm": 0.4898390471935272,
+ "learning_rate": 0.00015429235892364994,
+ "loss": 1.2036,
+ "step": 951
+ },
+ {
+ "epoch": 0.952267825325873,
+ "grad_norm": 0.5427507162094116,
+ "learning_rate": 0.00015420429930530996,
+ "loss": 1.3614,
+ "step": 952
+ },
+ {
+ "epoch": 0.9532681066549967,
+ "grad_norm": 0.557054340839386,
+ "learning_rate": 0.00015411618012624786,
+ "loss": 1.4249,
+ "step": 953
+ },
+ {
+ "epoch": 0.9542683879841205,
+ "grad_norm": 0.5793543457984924,
+ "learning_rate": 0.00015402800148329071,
+ "loss": 1.4341,
+ "step": 954
+ },
+ {
+ "epoch": 0.9552686693132444,
+ "grad_norm": 0.5993456244468689,
+ "learning_rate": 0.00015393976347333088,
+ "loss": 1.0259,
+ "step": 955
+ },
+ {
+ "epoch": 0.9562689506423682,
+ "grad_norm": 0.554904580116272,
+ "learning_rate": 0.00015385146619332596,
+ "loss": 1.3558,
+ "step": 956
+ },
+ {
+ "epoch": 0.9572692319714919,
+ "grad_norm": 0.5488478541374207,
+ "learning_rate": 0.00015376310974029873,
+ "loss": 1.358,
+ "step": 957
+ },
+ {
+ "epoch": 0.9582695133006158,
+ "grad_norm": 0.5108879208564758,
+ "learning_rate": 0.00015367469421133695,
+ "loss": 1.3865,
+ "step": 958
+ },
+ {
+ "epoch": 0.9592697946297396,
+ "grad_norm": 0.4606814682483673,
+ "learning_rate": 0.00015358621970359325,
+ "loss": 1.2055,
+ "step": 959
+ },
+ {
+ "epoch": 0.9602700759588634,
+ "grad_norm": 0.4974004328250885,
+ "learning_rate": 0.00015349768631428519,
+ "loss": 1.2541,
+ "step": 960
+ },
+ {
+ "epoch": 0.9612703572879873,
+ "grad_norm": 0.5107241272926331,
+ "learning_rate": 0.00015340909414069488,
+ "loss": 1.1624,
+ "step": 961
+ },
+ {
+ "epoch": 0.962270638617111,
+ "grad_norm": 0.5587212443351746,
+ "learning_rate": 0.00015332044328016914,
+ "loss": 1.349,
+ "step": 962
+ },
+ {
+ "epoch": 0.9632709199462349,
+ "grad_norm": 0.5209497809410095,
+ "learning_rate": 0.0001532317338301192,
+ "loss": 1.3695,
+ "step": 963
+ },
+ {
+ "epoch": 0.9642712012753587,
+ "grad_norm": 0.4985620677471161,
+ "learning_rate": 0.00015314296588802076,
+ "loss": 1.4597,
+ "step": 964
+ },
+ {
+ "epoch": 0.9652714826044825,
+ "grad_norm": 0.5065100789070129,
+ "learning_rate": 0.00015305413955141365,
+ "loss": 1.4225,
+ "step": 965
+ },
+ {
+ "epoch": 0.9662717639336064,
+ "grad_norm": 0.5079792737960815,
+ "learning_rate": 0.00015296525491790205,
+ "loss": 1.057,
+ "step": 966
+ },
+ {
+ "epoch": 0.9672720452627301,
+ "grad_norm": 0.4673600196838379,
+ "learning_rate": 0.00015287631208515406,
+ "loss": 1.2531,
+ "step": 967
+ },
+ {
+ "epoch": 0.9682723265918539,
+ "grad_norm": 0.5309945344924927,
+ "learning_rate": 0.00015278731115090171,
+ "loss": 1.374,
+ "step": 968
+ },
+ {
+ "epoch": 0.9692726079209778,
+ "grad_norm": 0.4792092442512512,
+ "learning_rate": 0.00015269825221294098,
+ "loss": 1.3018,
+ "step": 969
+ },
+ {
+ "epoch": 0.9702728892501016,
+ "grad_norm": 0.5222868323326111,
+ "learning_rate": 0.00015260913536913154,
+ "loss": 1.4063,
+ "step": 970
+ },
+ {
+ "epoch": 0.9712731705792254,
+ "grad_norm": 0.5373417139053345,
+ "learning_rate": 0.00015251996071739664,
+ "loss": 1.2183,
+ "step": 971
+ },
+ {
+ "epoch": 0.9722734519083492,
+ "grad_norm": 0.5624721050262451,
+ "learning_rate": 0.00015243072835572318,
+ "loss": 1.2696,
+ "step": 972
+ },
+ {
+ "epoch": 0.973273733237473,
+ "grad_norm": 0.46938082575798035,
+ "learning_rate": 0.0001523414383821613,
+ "loss": 1.3544,
+ "step": 973
+ },
+ {
+ "epoch": 0.9742740145665969,
+ "grad_norm": 0.45348694920539856,
+ "learning_rate": 0.00015225209089482462,
+ "loss": 1.2078,
+ "step": 974
+ },
+ {
+ "epoch": 0.9752742958957207,
+ "grad_norm": 0.48000606894493103,
+ "learning_rate": 0.0001521626859918898,
+ "loss": 1.1914,
+ "step": 975
+ },
+ {
+ "epoch": 0.9762745772248445,
+ "grad_norm": 0.5106796622276306,
+ "learning_rate": 0.00015207322377159668,
+ "loss": 1.3249,
+ "step": 976
+ },
+ {
+ "epoch": 0.9772748585539683,
+ "grad_norm": 0.49865373969078064,
+ "learning_rate": 0.00015198370433224805,
+ "loss": 1.2876,
+ "step": 977
+ },
+ {
+ "epoch": 0.9782751398830921,
+ "grad_norm": 0.5271755456924438,
+ "learning_rate": 0.00015189412777220958,
+ "loss": 1.3049,
+ "step": 978
+ },
+ {
+ "epoch": 0.9792754212122159,
+ "grad_norm": 0.49824708700180054,
+ "learning_rate": 0.00015180449418990976,
+ "loss": 1.1614,
+ "step": 979
+ },
+ {
+ "epoch": 0.9802757025413398,
+ "grad_norm": 0.7327549457550049,
+ "learning_rate": 0.00015171480368383964,
+ "loss": 1.2923,
+ "step": 980
+ },
+ {
+ "epoch": 0.9812759838704636,
+ "grad_norm": 0.5170425176620483,
+ "learning_rate": 0.00015162505635255287,
+ "loss": 1.3097,
+ "step": 981
+ },
+ {
+ "epoch": 0.9822762651995874,
+ "grad_norm": 0.47041526436805725,
+ "learning_rate": 0.00015153525229466555,
+ "loss": 1.3508,
+ "step": 982
+ },
+ {
+ "epoch": 0.9832765465287112,
+ "grad_norm": 0.4670693278312683,
+ "learning_rate": 0.00015144539160885613,
+ "loss": 1.3974,
+ "step": 983
+ },
+ {
+ "epoch": 0.984276827857835,
+ "grad_norm": 0.5745754837989807,
+ "learning_rate": 0.00015135547439386516,
+ "loss": 1.2977,
+ "step": 984
+ },
+ {
+ "epoch": 0.9852771091869589,
+ "grad_norm": 0.5845474004745483,
+ "learning_rate": 0.0001512655007484955,
+ "loss": 1.3384,
+ "step": 985
+ },
+ {
+ "epoch": 0.9862773905160827,
+ "grad_norm": 0.5627439618110657,
+ "learning_rate": 0.00015117547077161185,
+ "loss": 1.1756,
+ "step": 986
+ },
+ {
+ "epoch": 0.9872776718452064,
+ "grad_norm": 0.6411226987838745,
+ "learning_rate": 0.0001510853845621409,
+ "loss": 1.3441,
+ "step": 987
+ },
+ {
+ "epoch": 0.9882779531743303,
+ "grad_norm": 0.545659601688385,
+ "learning_rate": 0.00015099524221907107,
+ "loss": 1.3766,
+ "step": 988
+ },
+ {
+ "epoch": 0.9892782345034541,
+ "grad_norm": 0.5058498382568359,
+ "learning_rate": 0.0001509050438414525,
+ "loss": 1.3171,
+ "step": 989
+ },
+ {
+ "epoch": 0.9902785158325779,
+ "grad_norm": 0.6247567534446716,
+ "learning_rate": 0.00015081478952839693,
+ "loss": 1.2141,
+ "step": 990
+ },
+ {
+ "epoch": 0.9912787971617018,
+ "grad_norm": 0.5492308139801025,
+ "learning_rate": 0.00015072447937907753,
+ "loss": 1.1626,
+ "step": 991
+ },
+ {
+ "epoch": 0.9922790784908255,
+ "grad_norm": 0.4795534908771515,
+ "learning_rate": 0.00015063411349272877,
+ "loss": 1.218,
+ "step": 992
+ },
+ {
+ "epoch": 0.9932793598199494,
+ "grad_norm": 0.5527793169021606,
+ "learning_rate": 0.00015054369196864644,
+ "loss": 1.3816,
+ "step": 993
+ },
+ {
+ "epoch": 0.9942796411490732,
+ "grad_norm": 0.5297475457191467,
+ "learning_rate": 0.00015045321490618748,
+ "loss": 1.2515,
+ "step": 994
+ },
+ {
+ "epoch": 0.995279922478197,
+ "grad_norm": 0.518803596496582,
+ "learning_rate": 0.00015036268240476978,
+ "loss": 1.3631,
+ "step": 995
+ },
+ {
+ "epoch": 0.9962802038073209,
+ "grad_norm": 0.47196391224861145,
+ "learning_rate": 0.00015027209456387218,
+ "loss": 1.0932,
+ "step": 996
+ },
+ {
+ "epoch": 0.9972804851364446,
+ "grad_norm": 0.5369086861610413,
+ "learning_rate": 0.00015018145148303438,
+ "loss": 1.1181,
+ "step": 997
+ },
+ {
+ "epoch": 0.9982807664655684,
+ "grad_norm": 0.5940788388252258,
+ "learning_rate": 0.00015009075326185667,
+ "loss": 1.561,
+ "step": 998
+ },
+ {
+ "epoch": 0.9992810477946923,
+ "grad_norm": 0.5340734124183655,
+ "learning_rate": 0.00015000000000000001,
+ "loss": 1.2909,
+ "step": 999
+ },
+ {
+ "epoch": 1.0002813291238162,
+ "grad_norm": 0.5133704543113708,
+ "learning_rate": 0.00014990919179718584,
+ "loss": 1.0441,
+ "step": 1000
+ },
+ {
+ "epoch": 1.0012816104529398,
+ "grad_norm": 0.3812060058116913,
+ "learning_rate": 0.00014981832875319597,
+ "loss": 0.8215,
+ "step": 1001
+ },
+ {
+ "epoch": 1.0022818917820637,
+ "grad_norm": 0.40786364674568176,
+ "learning_rate": 0.00014972741096787242,
+ "loss": 0.8215,
+ "step": 1002
+ },
+ {
+ "epoch": 1.0032821731111876,
+ "grad_norm": 0.4328629672527313,
+ "learning_rate": 0.0001496364385411174,
+ "loss": 0.9506,
+ "step": 1003
+ },
+ {
+ "epoch": 1.0042824544403113,
+ "grad_norm": 0.4680945873260498,
+ "learning_rate": 0.0001495454115728932,
+ "loss": 0.8443,
+ "step": 1004
+ },
+ {
+ "epoch": 1.0052827357694352,
+ "grad_norm": 0.48512670397758484,
+ "learning_rate": 0.0001494543301632219,
+ "loss": 1.1143,
+ "step": 1005
+ },
+ {
+ "epoch": 1.006283017098559,
+ "grad_norm": 0.43949049711227417,
+ "learning_rate": 0.00014936319441218555,
+ "loss": 1.0257,
+ "step": 1006
+ },
+ {
+ "epoch": 1.0072832984276827,
+ "grad_norm": 0.5564325451850891,
+ "learning_rate": 0.0001492720044199259,
+ "loss": 0.967,
+ "step": 1007
+ },
+ {
+ "epoch": 1.0082835797568066,
+ "grad_norm": 0.47199952602386475,
+ "learning_rate": 0.0001491807602866442,
+ "loss": 1.0317,
+ "step": 1008
+ },
+ {
+ "epoch": 1.0092838610859305,
+ "grad_norm": 0.4625256657600403,
+ "learning_rate": 0.00014908946211260123,
+ "loss": 0.894,
+ "step": 1009
+ },
+ {
+ "epoch": 1.0102841424150542,
+ "grad_norm": 0.5081682801246643,
+ "learning_rate": 0.00014899810999811726,
+ "loss": 0.9647,
+ "step": 1010
+ },
+ {
+ "epoch": 1.011284423744178,
+ "grad_norm": 0.5240431427955627,
+ "learning_rate": 0.0001489067040435717,
+ "loss": 1.1076,
+ "step": 1011
+ },
+ {
+ "epoch": 1.012284705073302,
+ "grad_norm": 0.5996805429458618,
+ "learning_rate": 0.00014881524434940313,
+ "loss": 0.9063,
+ "step": 1012
+ },
+ {
+ "epoch": 1.0132849864024256,
+ "grad_norm": 0.4602286219596863,
+ "learning_rate": 0.0001487237310161093,
+ "loss": 0.8003,
+ "step": 1013
+ },
+ {
+ "epoch": 1.0142852677315495,
+ "grad_norm": 0.5298121571540833,
+ "learning_rate": 0.0001486321641442467,
+ "loss": 0.9616,
+ "step": 1014
+ },
+ {
+ "epoch": 1.0152855490606734,
+ "grad_norm": 0.47525477409362793,
+ "learning_rate": 0.00014854054383443081,
+ "loss": 1.0457,
+ "step": 1015
+ },
+ {
+ "epoch": 1.016285830389797,
+ "grad_norm": 0.5577285885810852,
+ "learning_rate": 0.00014844887018733582,
+ "loss": 0.8973,
+ "step": 1016
+ },
+ {
+ "epoch": 1.017286111718921,
+ "grad_norm": 0.5028079748153687,
+ "learning_rate": 0.00014835714330369446,
+ "loss": 1.0721,
+ "step": 1017
+ },
+ {
+ "epoch": 1.0182863930480448,
+ "grad_norm": 0.5401796102523804,
+ "learning_rate": 0.00014826536328429795,
+ "loss": 0.9595,
+ "step": 1018
+ },
+ {
+ "epoch": 1.0192866743771685,
+ "grad_norm": 0.4957962930202484,
+ "learning_rate": 0.000148173530229996,
+ "loss": 0.9871,
+ "step": 1019
+ },
+ {
+ "epoch": 1.0202869557062924,
+ "grad_norm": 0.4891825020313263,
+ "learning_rate": 0.00014808164424169647,
+ "loss": 0.9546,
+ "step": 1020
+ },
+ {
+ "epoch": 1.0212872370354162,
+ "grad_norm": 0.48703211545944214,
+ "learning_rate": 0.0001479897054203655,
+ "loss": 0.8863,
+ "step": 1021
+ },
+ {
+ "epoch": 1.0222875183645401,
+ "grad_norm": 0.5614656805992126,
+ "learning_rate": 0.00014789771386702717,
+ "loss": 0.9857,
+ "step": 1022
+ },
+ {
+ "epoch": 1.0232877996936638,
+ "grad_norm": 0.5903550982475281,
+ "learning_rate": 0.0001478056696827636,
+ "loss": 0.8347,
+ "step": 1023
+ },
+ {
+ "epoch": 1.0242880810227877,
+ "grad_norm": 0.47974926233291626,
+ "learning_rate": 0.0001477135729687147,
+ "loss": 1.0035,
+ "step": 1024
+ },
+ {
+ "epoch": 1.0252883623519116,
+ "grad_norm": 0.5049344897270203,
+ "learning_rate": 0.0001476214238260781,
+ "loss": 0.953,
+ "step": 1025
+ },
+ {
+ "epoch": 1.0262886436810352,
+ "grad_norm": 0.3981640636920929,
+ "learning_rate": 0.000147529222356109,
+ "loss": 0.7118,
+ "step": 1026
+ },
+ {
+ "epoch": 1.0272889250101591,
+ "grad_norm": 0.598785400390625,
+ "learning_rate": 0.0001474369686601202,
+ "loss": 0.9002,
+ "step": 1027
+ },
+ {
+ "epoch": 1.028289206339283,
+ "grad_norm": 0.5422918200492859,
+ "learning_rate": 0.0001473446628394818,
+ "loss": 1.192,
+ "step": 1028
+ },
+ {
+ "epoch": 1.0292894876684067,
+ "grad_norm": 0.592509925365448,
+ "learning_rate": 0.00014725230499562119,
+ "loss": 1.0989,
+ "step": 1029
+ },
+ {
+ "epoch": 1.0302897689975306,
+ "grad_norm": 0.5232793688774109,
+ "learning_rate": 0.00014715989523002296,
+ "loss": 1.0667,
+ "step": 1030
+ },
+ {
+ "epoch": 1.0312900503266544,
+ "grad_norm": 0.5362406373023987,
+ "learning_rate": 0.00014706743364422878,
+ "loss": 0.8933,
+ "step": 1031
+ },
+ {
+ "epoch": 1.032290331655778,
+ "grad_norm": 0.43486225605010986,
+ "learning_rate": 0.00014697492033983707,
+ "loss": 0.8525,
+ "step": 1032
+ },
+ {
+ "epoch": 1.033290612984902,
+ "grad_norm": 0.5187330842018127,
+ "learning_rate": 0.00014688235541850337,
+ "loss": 1.017,
+ "step": 1033
+ },
+ {
+ "epoch": 1.0342908943140259,
+ "grad_norm": 0.5081651210784912,
+ "learning_rate": 0.0001467897389819397,
+ "loss": 1.0135,
+ "step": 1034
+ },
+ {
+ "epoch": 1.0352911756431495,
+ "grad_norm": 0.49661391973495483,
+ "learning_rate": 0.00014669707113191483,
+ "loss": 0.8711,
+ "step": 1035
+ },
+ {
+ "epoch": 1.0362914569722734,
+ "grad_norm": 0.4899054169654846,
+ "learning_rate": 0.0001466043519702539,
+ "loss": 0.9924,
+ "step": 1036
+ },
+ {
+ "epoch": 1.0372917383013973,
+ "grad_norm": 0.47787439823150635,
+ "learning_rate": 0.00014651158159883855,
+ "loss": 0.9238,
+ "step": 1037
+ },
+ {
+ "epoch": 1.038292019630521,
+ "grad_norm": 0.509600818157196,
+ "learning_rate": 0.0001464187601196066,
+ "loss": 0.8854,
+ "step": 1038
+ },
+ {
+ "epoch": 1.0392923009596449,
+ "grad_norm": 0.3907245397567749,
+ "learning_rate": 0.00014632588763455212,
+ "loss": 0.8911,
+ "step": 1039
+ },
+ {
+ "epoch": 1.0402925822887688,
+ "grad_norm": 0.4939952492713928,
+ "learning_rate": 0.00014623296424572517,
+ "loss": 0.9069,
+ "step": 1040
+ },
+ {
+ "epoch": 1.0412928636178926,
+ "grad_norm": 0.4680919945240021,
+ "learning_rate": 0.00014613999005523174,
+ "loss": 0.9361,
+ "step": 1041
+ },
+ {
+ "epoch": 1.0422931449470163,
+ "grad_norm": 0.4871543347835541,
+ "learning_rate": 0.00014604696516523361,
+ "loss": 0.9268,
+ "step": 1042
+ },
+ {
+ "epoch": 1.0432934262761402,
+ "grad_norm": 0.5115481615066528,
+ "learning_rate": 0.00014595388967794835,
+ "loss": 0.9555,
+ "step": 1043
+ },
+ {
+ "epoch": 1.044293707605264,
+ "grad_norm": 0.5923699140548706,
+ "learning_rate": 0.00014586076369564908,
+ "loss": 1.0122,
+ "step": 1044
+ },
+ {
+ "epoch": 1.0452939889343877,
+ "grad_norm": 0.491161048412323,
+ "learning_rate": 0.00014576758732066442,
+ "loss": 0.9805,
+ "step": 1045
+ },
+ {
+ "epoch": 1.0462942702635116,
+ "grad_norm": 0.462168425321579,
+ "learning_rate": 0.00014567436065537835,
+ "loss": 0.9213,
+ "step": 1046
+ },
+ {
+ "epoch": 1.0472945515926355,
+ "grad_norm": 0.5082408785820007,
+ "learning_rate": 0.00014558108380223012,
+ "loss": 0.9073,
+ "step": 1047
+ },
+ {
+ "epoch": 1.0482948329217592,
+ "grad_norm": 0.6131752133369446,
+ "learning_rate": 0.00014548775686371412,
+ "loss": 0.9156,
+ "step": 1048
+ },
+ {
+ "epoch": 1.049295114250883,
+ "grad_norm": 0.6133660674095154,
+ "learning_rate": 0.00014539437994237977,
+ "loss": 1.2011,
+ "step": 1049
+ },
+ {
+ "epoch": 1.050295395580007,
+ "grad_norm": 0.542412519454956,
+ "learning_rate": 0.00014530095314083143,
+ "loss": 1.1075,
+ "step": 1050
+ },
+ {
+ "epoch": 1.0512956769091306,
+ "grad_norm": 0.5367622971534729,
+ "learning_rate": 0.00014520747656172824,
+ "loss": 1.0783,
+ "step": 1051
+ },
+ {
+ "epoch": 1.0522959582382545,
+ "grad_norm": 0.5243119597434998,
+ "learning_rate": 0.00014511395030778406,
+ "loss": 1.0865,
+ "step": 1052
+ },
+ {
+ "epoch": 1.0532962395673784,
+ "grad_norm": 0.5611020922660828,
+ "learning_rate": 0.00014502037448176734,
+ "loss": 0.9613,
+ "step": 1053
+ },
+ {
+ "epoch": 1.054296520896502,
+ "grad_norm": 0.506432294845581,
+ "learning_rate": 0.000144926749186501,
+ "loss": 1.1364,
+ "step": 1054
+ },
+ {
+ "epoch": 1.055296802225626,
+ "grad_norm": 0.5270103812217712,
+ "learning_rate": 0.00014483307452486227,
+ "loss": 1.042,
+ "step": 1055
+ },
+ {
+ "epoch": 1.0562970835547498,
+ "grad_norm": 0.5376967191696167,
+ "learning_rate": 0.0001447393505997827,
+ "loss": 0.9563,
+ "step": 1056
+ },
+ {
+ "epoch": 1.0572973648838735,
+ "grad_norm": 0.4821127653121948,
+ "learning_rate": 0.00014464557751424793,
+ "loss": 0.9241,
+ "step": 1057
+ },
+ {
+ "epoch": 1.0582976462129974,
+ "grad_norm": 0.6197866201400757,
+ "learning_rate": 0.00014455175537129758,
+ "loss": 1.0489,
+ "step": 1058
+ },
+ {
+ "epoch": 1.0592979275421213,
+ "grad_norm": 0.42820343375205994,
+ "learning_rate": 0.00014445788427402528,
+ "loss": 0.7755,
+ "step": 1059
+ },
+ {
+ "epoch": 1.0602982088712452,
+ "grad_norm": 0.49635690450668335,
+ "learning_rate": 0.00014436396432557835,
+ "loss": 0.8485,
+ "step": 1060
+ },
+ {
+ "epoch": 1.0612984902003688,
+ "grad_norm": 0.5529823899269104,
+ "learning_rate": 0.00014426999562915782,
+ "loss": 0.9589,
+ "step": 1061
+ },
+ {
+ "epoch": 1.0622987715294927,
+ "grad_norm": 0.5504932403564453,
+ "learning_rate": 0.00014417597828801832,
+ "loss": 0.9048,
+ "step": 1062
+ },
+ {
+ "epoch": 1.0632990528586166,
+ "grad_norm": 0.5755835175514221,
+ "learning_rate": 0.0001440819124054679,
+ "loss": 0.9542,
+ "step": 1063
+ },
+ {
+ "epoch": 1.0642993341877403,
+ "grad_norm": 0.4767759144306183,
+ "learning_rate": 0.00014398779808486793,
+ "loss": 0.9174,
+ "step": 1064
+ },
+ {
+ "epoch": 1.0652996155168641,
+ "grad_norm": 0.5343469381332397,
+ "learning_rate": 0.00014389363542963306,
+ "loss": 0.8493,
+ "step": 1065
+ },
+ {
+ "epoch": 1.066299896845988,
+ "grad_norm": 0.48161643743515015,
+ "learning_rate": 0.000143799424543231,
+ "loss": 0.8218,
+ "step": 1066
+ },
+ {
+ "epoch": 1.0673001781751117,
+ "grad_norm": 0.4958563446998596,
+ "learning_rate": 0.0001437051655291825,
+ "loss": 0.9849,
+ "step": 1067
+ },
+ {
+ "epoch": 1.0683004595042356,
+ "grad_norm": 0.5286628007888794,
+ "learning_rate": 0.0001436108584910611,
+ "loss": 0.8935,
+ "step": 1068
+ },
+ {
+ "epoch": 1.0693007408333595,
+ "grad_norm": 0.6096596121788025,
+ "learning_rate": 0.0001435165035324933,
+ "loss": 1.0577,
+ "step": 1069
+ },
+ {
+ "epoch": 1.0703010221624831,
+ "grad_norm": 0.4895448088645935,
+ "learning_rate": 0.000143422100757158,
+ "loss": 0.865,
+ "step": 1070
+ },
+ {
+ "epoch": 1.071301303491607,
+ "grad_norm": 0.5186201930046082,
+ "learning_rate": 0.00014332765026878687,
+ "loss": 0.8414,
+ "step": 1071
+ },
+ {
+ "epoch": 1.072301584820731,
+ "grad_norm": 0.5639254450798035,
+ "learning_rate": 0.0001432331521711639,
+ "loss": 0.9401,
+ "step": 1072
+ },
+ {
+ "epoch": 1.0733018661498546,
+ "grad_norm": 0.48865774273872375,
+ "learning_rate": 0.00014313860656812536,
+ "loss": 0.7894,
+ "step": 1073
+ },
+ {
+ "epoch": 1.0743021474789785,
+ "grad_norm": 0.4796544313430786,
+ "learning_rate": 0.00014304401356355983,
+ "loss": 0.8153,
+ "step": 1074
+ },
+ {
+ "epoch": 1.0753024288081023,
+ "grad_norm": 0.5578910708427429,
+ "learning_rate": 0.00014294937326140788,
+ "loss": 1.1675,
+ "step": 1075
+ },
+ {
+ "epoch": 1.076302710137226,
+ "grad_norm": 0.5607575178146362,
+ "learning_rate": 0.00014285468576566207,
+ "loss": 0.9133,
+ "step": 1076
+ },
+ {
+ "epoch": 1.07730299146635,
+ "grad_norm": 0.48808708786964417,
+ "learning_rate": 0.00014275995118036693,
+ "loss": 0.8884,
+ "step": 1077
+ },
+ {
+ "epoch": 1.0783032727954738,
+ "grad_norm": 0.4981604814529419,
+ "learning_rate": 0.00014266516960961852,
+ "loss": 0.9235,
+ "step": 1078
+ },
+ {
+ "epoch": 1.0793035541245974,
+ "grad_norm": 0.6323955655097961,
+ "learning_rate": 0.00014257034115756472,
+ "loss": 1.1617,
+ "step": 1079
+ },
+ {
+ "epoch": 1.0803038354537213,
+ "grad_norm": 0.5465244650840759,
+ "learning_rate": 0.0001424754659284048,
+ "loss": 1.0126,
+ "step": 1080
+ },
+ {
+ "epoch": 1.0813041167828452,
+ "grad_norm": 0.504200279712677,
+ "learning_rate": 0.0001423805440263895,
+ "loss": 1.0069,
+ "step": 1081
+ },
+ {
+ "epoch": 1.0823043981119689,
+ "grad_norm": 0.8698700070381165,
+ "learning_rate": 0.0001422855755558208,
+ "loss": 0.9653,
+ "step": 1082
+ },
+ {
+ "epoch": 1.0833046794410928,
+ "grad_norm": 0.41991496086120605,
+ "learning_rate": 0.00014219056062105193,
+ "loss": 1.089,
+ "step": 1083
+ },
+ {
+ "epoch": 1.0843049607702167,
+ "grad_norm": 0.5334717035293579,
+ "learning_rate": 0.0001420954993264871,
+ "loss": 1.0137,
+ "step": 1084
+ },
+ {
+ "epoch": 1.0853052420993405,
+ "grad_norm": 0.5418859124183655,
+ "learning_rate": 0.00014200039177658145,
+ "loss": 0.9302,
+ "step": 1085
+ },
+ {
+ "epoch": 1.0863055234284642,
+ "grad_norm": 0.515819251537323,
+ "learning_rate": 0.000141905238075841,
+ "loss": 1.0703,
+ "step": 1086
+ },
+ {
+ "epoch": 1.087305804757588,
+ "grad_norm": 0.43046239018440247,
+ "learning_rate": 0.00014181003832882248,
+ "loss": 1.0722,
+ "step": 1087
+ },
+ {
+ "epoch": 1.088306086086712,
+ "grad_norm": 0.6555958390235901,
+ "learning_rate": 0.00014171479264013311,
+ "loss": 0.806,
+ "step": 1088
+ },
+ {
+ "epoch": 1.0893063674158356,
+ "grad_norm": 0.5608332753181458,
+ "learning_rate": 0.00014161950111443077,
+ "loss": 0.9925,
+ "step": 1089
+ },
+ {
+ "epoch": 1.0903066487449595,
+ "grad_norm": 0.5866970419883728,
+ "learning_rate": 0.00014152416385642357,
+ "loss": 0.9278,
+ "step": 1090
+ },
+ {
+ "epoch": 1.0913069300740834,
+ "grad_norm": 0.4913788437843323,
+ "learning_rate": 0.00014142878097086995,
+ "loss": 0.7394,
+ "step": 1091
+ },
+ {
+ "epoch": 1.092307211403207,
+ "grad_norm": 0.4942512512207031,
+ "learning_rate": 0.0001413333525625784,
+ "loss": 0.8891,
+ "step": 1092
+ },
+ {
+ "epoch": 1.093307492732331,
+ "grad_norm": 0.5537131428718567,
+ "learning_rate": 0.00014123787873640754,
+ "loss": 0.9632,
+ "step": 1093
+ },
+ {
+ "epoch": 1.0943077740614549,
+ "grad_norm": 0.49271076917648315,
+ "learning_rate": 0.00014114235959726575,
+ "loss": 0.8708,
+ "step": 1094
+ },
+ {
+ "epoch": 1.0953080553905785,
+ "grad_norm": 0.448188841342926,
+ "learning_rate": 0.0001410467952501114,
+ "loss": 0.9727,
+ "step": 1095
+ },
+ {
+ "epoch": 1.0963083367197024,
+ "grad_norm": 0.4975283741950989,
+ "learning_rate": 0.00014095118579995235,
+ "loss": 0.9971,
+ "step": 1096
+ },
+ {
+ "epoch": 1.0973086180488263,
+ "grad_norm": 0.46382221579551697,
+ "learning_rate": 0.0001408555313518461,
+ "loss": 0.8853,
+ "step": 1097
+ },
+ {
+ "epoch": 1.09830889937795,
+ "grad_norm": 0.5071414113044739,
+ "learning_rate": 0.00014075983201089964,
+ "loss": 0.7723,
+ "step": 1098
+ },
+ {
+ "epoch": 1.0993091807070738,
+ "grad_norm": 0.41700050234794617,
+ "learning_rate": 0.0001406640878822692,
+ "loss": 0.7892,
+ "step": 1099
+ },
+ {
+ "epoch": 1.1003094620361977,
+ "grad_norm": 0.497175395488739,
+ "learning_rate": 0.00014056829907116024,
+ "loss": 0.9791,
+ "step": 1100
+ },
+ {
+ "epoch": 1.1013097433653214,
+ "grad_norm": 0.4512806236743927,
+ "learning_rate": 0.00014047246568282736,
+ "loss": 0.9878,
+ "step": 1101
+ },
+ {
+ "epoch": 1.1023100246944453,
+ "grad_norm": 0.5804361701011658,
+ "learning_rate": 0.00014037658782257414,
+ "loss": 1.1583,
+ "step": 1102
+ },
+ {
+ "epoch": 1.1033103060235692,
+ "grad_norm": 0.5334234237670898,
+ "learning_rate": 0.00014028066559575302,
+ "loss": 1.0705,
+ "step": 1103
+ },
+ {
+ "epoch": 1.104310587352693,
+ "grad_norm": 0.4683452844619751,
+ "learning_rate": 0.00014018469910776513,
+ "loss": 0.8608,
+ "step": 1104
+ },
+ {
+ "epoch": 1.1053108686818167,
+ "grad_norm": 0.5595771074295044,
+ "learning_rate": 0.0001400886884640603,
+ "loss": 1.0804,
+ "step": 1105
+ },
+ {
+ "epoch": 1.1063111500109406,
+ "grad_norm": 0.45048126578330994,
+ "learning_rate": 0.00013999263377013693,
+ "loss": 0.7782,
+ "step": 1106
+ },
+ {
+ "epoch": 1.1073114313400645,
+ "grad_norm": 0.4472745954990387,
+ "learning_rate": 0.00013989653513154165,
+ "loss": 0.8599,
+ "step": 1107
+ },
+ {
+ "epoch": 1.1083117126691882,
+ "grad_norm": 0.5168829560279846,
+ "learning_rate": 0.00013980039265386955,
+ "loss": 0.9984,
+ "step": 1108
+ },
+ {
+ "epoch": 1.109311993998312,
+ "grad_norm": 0.5712297558784485,
+ "learning_rate": 0.00013970420644276383,
+ "loss": 0.957,
+ "step": 1109
+ },
+ {
+ "epoch": 1.110312275327436,
+ "grad_norm": 0.5360589027404785,
+ "learning_rate": 0.0001396079766039157,
+ "loss": 1.0957,
+ "step": 1110
+ },
+ {
+ "epoch": 1.1113125566565596,
+ "grad_norm": 0.49815621972084045,
+ "learning_rate": 0.00013951170324306435,
+ "loss": 1.1143,
+ "step": 1111
+ },
+ {
+ "epoch": 1.1123128379856835,
+ "grad_norm": 0.45044735074043274,
+ "learning_rate": 0.00013941538646599687,
+ "loss": 0.8463,
+ "step": 1112
+ },
+ {
+ "epoch": 1.1133131193148074,
+ "grad_norm": 0.5086628198623657,
+ "learning_rate": 0.0001393190263785479,
+ "loss": 0.9061,
+ "step": 1113
+ },
+ {
+ "epoch": 1.114313400643931,
+ "grad_norm": 0.4669632315635681,
+ "learning_rate": 0.0001392226230865998,
+ "loss": 0.7891,
+ "step": 1114
+ },
+ {
+ "epoch": 1.115313681973055,
+ "grad_norm": 0.43681180477142334,
+ "learning_rate": 0.0001391261766960823,
+ "loss": 0.7687,
+ "step": 1115
+ },
+ {
+ "epoch": 1.1163139633021788,
+ "grad_norm": 0.47354501485824585,
+ "learning_rate": 0.00013902968731297255,
+ "loss": 1.0181,
+ "step": 1116
+ },
+ {
+ "epoch": 1.1173142446313025,
+ "grad_norm": 0.5224591493606567,
+ "learning_rate": 0.00013893315504329498,
+ "loss": 0.9072,
+ "step": 1117
+ },
+ {
+ "epoch": 1.1183145259604264,
+ "grad_norm": 0.5648715496063232,
+ "learning_rate": 0.00013883657999312109,
+ "loss": 1.0256,
+ "step": 1118
+ },
+ {
+ "epoch": 1.1193148072895502,
+ "grad_norm": 0.4603082239627838,
+ "learning_rate": 0.00013873996226856933,
+ "loss": 0.9129,
+ "step": 1119
+ },
+ {
+ "epoch": 1.120315088618674,
+ "grad_norm": 0.48259446024894714,
+ "learning_rate": 0.00013864330197580513,
+ "loss": 0.8335,
+ "step": 1120
+ },
+ {
+ "epoch": 1.1213153699477978,
+ "grad_norm": 0.5239295363426208,
+ "learning_rate": 0.0001385465992210407,
+ "loss": 1.1409,
+ "step": 1121
+ },
+ {
+ "epoch": 1.1223156512769217,
+ "grad_norm": 0.5242553949356079,
+ "learning_rate": 0.00013844985411053492,
+ "loss": 0.9542,
+ "step": 1122
+ },
+ {
+ "epoch": 1.1233159326060456,
+ "grad_norm": 0.5396201014518738,
+ "learning_rate": 0.00013835306675059308,
+ "loss": 1.0786,
+ "step": 1123
+ },
+ {
+ "epoch": 1.1243162139351692,
+ "grad_norm": 1.818426251411438,
+ "learning_rate": 0.00013825623724756704,
+ "loss": 0.9336,
+ "step": 1124
+ },
+ {
+ "epoch": 1.1253164952642931,
+ "grad_norm": 0.5364382863044739,
+ "learning_rate": 0.00013815936570785487,
+ "loss": 0.8096,
+ "step": 1125
+ },
+ {
+ "epoch": 1.1263167765934168,
+ "grad_norm": 0.47344619035720825,
+ "learning_rate": 0.00013806245223790088,
+ "loss": 0.8777,
+ "step": 1126
+ },
+ {
+ "epoch": 1.1273170579225407,
+ "grad_norm": 0.48119789361953735,
+ "learning_rate": 0.0001379654969441955,
+ "loss": 0.9965,
+ "step": 1127
+ },
+ {
+ "epoch": 1.1283173392516646,
+ "grad_norm": 0.5970126390457153,
+ "learning_rate": 0.000137868499933275,
+ "loss": 1.1389,
+ "step": 1128
+ },
+ {
+ "epoch": 1.1293176205807884,
+ "grad_norm": 0.5217893719673157,
+ "learning_rate": 0.00013777146131172162,
+ "loss": 1.1361,
+ "step": 1129
+ },
+ {
+ "epoch": 1.130317901909912,
+ "grad_norm": 0.4322263300418854,
+ "learning_rate": 0.00013767438118616318,
+ "loss": 0.8632,
+ "step": 1130
+ },
+ {
+ "epoch": 1.131318183239036,
+ "grad_norm": 0.49836596846580505,
+ "learning_rate": 0.00013757725966327322,
+ "loss": 0.9594,
+ "step": 1131
+ },
+ {
+ "epoch": 1.1323184645681599,
+ "grad_norm": 0.5220472812652588,
+ "learning_rate": 0.00013748009684977073,
+ "loss": 1.0783,
+ "step": 1132
+ },
+ {
+ "epoch": 1.1333187458972835,
+ "grad_norm": 0.5030301809310913,
+ "learning_rate": 0.0001373828928524201,
+ "loss": 0.9482,
+ "step": 1133
+ },
+ {
+ "epoch": 1.1343190272264074,
+ "grad_norm": 0.5477299094200134,
+ "learning_rate": 0.00013728564777803088,
+ "loss": 1.1119,
+ "step": 1134
+ },
+ {
+ "epoch": 1.1353193085555313,
+ "grad_norm": 0.5505563020706177,
+ "learning_rate": 0.00013718836173345783,
+ "loss": 1.0315,
+ "step": 1135
+ },
+ {
+ "epoch": 1.136319589884655,
+ "grad_norm": 0.5921071171760559,
+ "learning_rate": 0.00013709103482560078,
+ "loss": 0.98,
+ "step": 1136
+ },
+ {
+ "epoch": 1.1373198712137789,
+ "grad_norm": 0.4483082890510559,
+ "learning_rate": 0.00013699366716140435,
+ "loss": 0.9203,
+ "step": 1137
+ },
+ {
+ "epoch": 1.1383201525429028,
+ "grad_norm": 0.4304388165473938,
+ "learning_rate": 0.00013689625884785798,
+ "loss": 0.824,
+ "step": 1138
+ },
+ {
+ "epoch": 1.1393204338720264,
+ "grad_norm": 0.5273844003677368,
+ "learning_rate": 0.00013679880999199583,
+ "loss": 1.0061,
+ "step": 1139
+ },
+ {
+ "epoch": 1.1403207152011503,
+ "grad_norm": 0.5016499161720276,
+ "learning_rate": 0.00013670132070089653,
+ "loss": 0.8692,
+ "step": 1140
+ },
+ {
+ "epoch": 1.1413209965302742,
+ "grad_norm": 0.5045731067657471,
+ "learning_rate": 0.00013660379108168324,
+ "loss": 0.958,
+ "step": 1141
+ },
+ {
+ "epoch": 1.142321277859398,
+ "grad_norm": 0.484275221824646,
+ "learning_rate": 0.00013650622124152334,
+ "loss": 0.8589,
+ "step": 1142
+ },
+ {
+ "epoch": 1.1433215591885217,
+ "grad_norm": 0.6210789680480957,
+ "learning_rate": 0.0001364086112876284,
+ "loss": 0.931,
+ "step": 1143
+ },
+ {
+ "epoch": 1.1443218405176456,
+ "grad_norm": 0.59291011095047,
+ "learning_rate": 0.00013631096132725413,
+ "loss": 0.9706,
+ "step": 1144
+ },
+ {
+ "epoch": 1.1453221218467693,
+ "grad_norm": 0.48909759521484375,
+ "learning_rate": 0.00013621327146770025,
+ "loss": 0.9696,
+ "step": 1145
+ },
+ {
+ "epoch": 1.1463224031758932,
+ "grad_norm": 0.5022495985031128,
+ "learning_rate": 0.00013611554181631013,
+ "loss": 0.9349,
+ "step": 1146
+ },
+ {
+ "epoch": 1.147322684505017,
+ "grad_norm": 0.6155623197555542,
+ "learning_rate": 0.00013601777248047105,
+ "loss": 0.9161,
+ "step": 1147
+ },
+ {
+ "epoch": 1.148322965834141,
+ "grad_norm": 0.49372079968452454,
+ "learning_rate": 0.0001359199635676138,
+ "loss": 0.8598,
+ "step": 1148
+ },
+ {
+ "epoch": 1.1493232471632646,
+ "grad_norm": 0.504294753074646,
+ "learning_rate": 0.00013582211518521273,
+ "loss": 0.9334,
+ "step": 1149
+ },
+ {
+ "epoch": 1.1503235284923885,
+ "grad_norm": 0.44594088196754456,
+ "learning_rate": 0.00013572422744078551,
+ "loss": 1.0443,
+ "step": 1150
+ },
+ {
+ "epoch": 1.1513238098215124,
+ "grad_norm": 0.4689579904079437,
+ "learning_rate": 0.00013562630044189304,
+ "loss": 0.9192,
+ "step": 1151
+ },
+ {
+ "epoch": 1.152324091150636,
+ "grad_norm": 0.49370667338371277,
+ "learning_rate": 0.00013552833429613938,
+ "loss": 0.8638,
+ "step": 1152
+ },
+ {
+ "epoch": 1.15332437247976,
+ "grad_norm": 0.4459637403488159,
+ "learning_rate": 0.0001354303291111716,
+ "loss": 0.8719,
+ "step": 1153
+ },
+ {
+ "epoch": 1.1543246538088838,
+ "grad_norm": 0.41995370388031006,
+ "learning_rate": 0.0001353322849946797,
+ "loss": 0.9429,
+ "step": 1154
+ },
+ {
+ "epoch": 1.1553249351380075,
+ "grad_norm": 0.5358927249908447,
+ "learning_rate": 0.00013523420205439646,
+ "loss": 1.0724,
+ "step": 1155
+ },
+ {
+ "epoch": 1.1563252164671314,
+ "grad_norm": 0.48797738552093506,
+ "learning_rate": 0.0001351360803980972,
+ "loss": 1.0191,
+ "step": 1156
+ },
+ {
+ "epoch": 1.1573254977962553,
+ "grad_norm": 0.46079760789871216,
+ "learning_rate": 0.00013503792013359997,
+ "loss": 0.8731,
+ "step": 1157
+ },
+ {
+ "epoch": 1.158325779125379,
+ "grad_norm": 0.5278632044792175,
+ "learning_rate": 0.00013493972136876509,
+ "loss": 1.0689,
+ "step": 1158
+ },
+ {
+ "epoch": 1.1593260604545028,
+ "grad_norm": 0.6085927486419678,
+ "learning_rate": 0.00013484148421149527,
+ "loss": 1.0228,
+ "step": 1159
+ },
+ {
+ "epoch": 1.1603263417836267,
+ "grad_norm": 0.49424564838409424,
+ "learning_rate": 0.0001347432087697354,
+ "loss": 0.9622,
+ "step": 1160
+ },
+ {
+ "epoch": 1.1613266231127504,
+ "grad_norm": 0.4577535092830658,
+ "learning_rate": 0.00013464489515147238,
+ "loss": 0.795,
+ "step": 1161
+ },
+ {
+ "epoch": 1.1623269044418743,
+ "grad_norm": 0.5331981778144836,
+ "learning_rate": 0.0001345465434647351,
+ "loss": 1.2866,
+ "step": 1162
+ },
+ {
+ "epoch": 1.1633271857709981,
+ "grad_norm": 0.4657655954360962,
+ "learning_rate": 0.00013444815381759425,
+ "loss": 0.8171,
+ "step": 1163
+ },
+ {
+ "epoch": 1.1643274671001218,
+ "grad_norm": 0.44027647376060486,
+ "learning_rate": 0.00013434972631816235,
+ "loss": 0.9448,
+ "step": 1164
+ },
+ {
+ "epoch": 1.1653277484292457,
+ "grad_norm": 1.996617317199707,
+ "learning_rate": 0.0001342512610745933,
+ "loss": 0.8706,
+ "step": 1165
+ },
+ {
+ "epoch": 1.1663280297583696,
+ "grad_norm": 0.4826609790325165,
+ "learning_rate": 0.0001341527581950827,
+ "loss": 1.1075,
+ "step": 1166
+ },
+ {
+ "epoch": 1.1673283110874935,
+ "grad_norm": 0.4908469617366791,
+ "learning_rate": 0.00013405421778786737,
+ "loss": 0.835,
+ "step": 1167
+ },
+ {
+ "epoch": 1.1683285924166171,
+ "grad_norm": 0.5113404989242554,
+ "learning_rate": 0.00013395563996122537,
+ "loss": 0.8437,
+ "step": 1168
+ },
+ {
+ "epoch": 1.169328873745741,
+ "grad_norm": 0.5029433369636536,
+ "learning_rate": 0.00013385702482347593,
+ "loss": 1.1188,
+ "step": 1169
+ },
+ {
+ "epoch": 1.170329155074865,
+ "grad_norm": 0.4739987552165985,
+ "learning_rate": 0.00013375837248297926,
+ "loss": 0.9829,
+ "step": 1170
+ },
+ {
+ "epoch": 1.1713294364039886,
+ "grad_norm": 0.5853392481803894,
+ "learning_rate": 0.0001336596830481364,
+ "loss": 1.0384,
+ "step": 1171
+ },
+ {
+ "epoch": 1.1723297177331125,
+ "grad_norm": 0.5038638710975647,
+ "learning_rate": 0.0001335609566273892,
+ "loss": 0.9389,
+ "step": 1172
+ },
+ {
+ "epoch": 1.1733299990622363,
+ "grad_norm": 0.4367244243621826,
+ "learning_rate": 0.00013346219332922016,
+ "loss": 0.8182,
+ "step": 1173
+ },
+ {
+ "epoch": 1.17433028039136,
+ "grad_norm": 0.4453211724758148,
+ "learning_rate": 0.00013336339326215228,
+ "loss": 0.9289,
+ "step": 1174
+ },
+ {
+ "epoch": 1.175330561720484,
+ "grad_norm": 0.49941959977149963,
+ "learning_rate": 0.00013326455653474897,
+ "loss": 1.1277,
+ "step": 1175
+ },
+ {
+ "epoch": 1.1763308430496078,
+ "grad_norm": 0.553996205329895,
+ "learning_rate": 0.00013316568325561393,
+ "loss": 0.8582,
+ "step": 1176
+ },
+ {
+ "epoch": 1.1773311243787314,
+ "grad_norm": 0.5424408316612244,
+ "learning_rate": 0.00013306677353339098,
+ "loss": 1.0046,
+ "step": 1177
+ },
+ {
+ "epoch": 1.1783314057078553,
+ "grad_norm": 0.4373432695865631,
+ "learning_rate": 0.000132967827476764,
+ "loss": 0.9554,
+ "step": 1178
+ },
+ {
+ "epoch": 1.1793316870369792,
+ "grad_norm": 0.4744022786617279,
+ "learning_rate": 0.0001328688451944569,
+ "loss": 0.7784,
+ "step": 1179
+ },
+ {
+ "epoch": 1.1803319683661029,
+ "grad_norm": 0.5251059532165527,
+ "learning_rate": 0.00013276982679523322,
+ "loss": 0.8857,
+ "step": 1180
+ },
+ {
+ "epoch": 1.1813322496952268,
+ "grad_norm": 0.5108295679092407,
+ "learning_rate": 0.00013267077238789633,
+ "loss": 1.0711,
+ "step": 1181
+ },
+ {
+ "epoch": 1.1823325310243507,
+ "grad_norm": 0.49973955750465393,
+ "learning_rate": 0.00013257168208128908,
+ "loss": 1.0047,
+ "step": 1182
+ },
+ {
+ "epoch": 1.1833328123534743,
+ "grad_norm": 0.5143113732337952,
+ "learning_rate": 0.00013247255598429378,
+ "loss": 0.9294,
+ "step": 1183
+ },
+ {
+ "epoch": 1.1843330936825982,
+ "grad_norm": 0.5185163617134094,
+ "learning_rate": 0.00013237339420583212,
+ "loss": 0.9491,
+ "step": 1184
+ },
+ {
+ "epoch": 1.185333375011722,
+ "grad_norm": 0.49349021911621094,
+ "learning_rate": 0.00013227419685486492,
+ "loss": 0.812,
+ "step": 1185
+ },
+ {
+ "epoch": 1.186333656340846,
+ "grad_norm": 0.5210988521575928,
+ "learning_rate": 0.00013217496404039218,
+ "loss": 1.1228,
+ "step": 1186
+ },
+ {
+ "epoch": 1.1873339376699696,
+ "grad_norm": 0.46139585971832275,
+ "learning_rate": 0.0001320756958714528,
+ "loss": 0.7623,
+ "step": 1187
+ },
+ {
+ "epoch": 1.1883342189990935,
+ "grad_norm": 0.5365749597549438,
+ "learning_rate": 0.00013197639245712454,
+ "loss": 1.0785,
+ "step": 1188
+ },
+ {
+ "epoch": 1.1893345003282172,
+ "grad_norm": 0.4624418616294861,
+ "learning_rate": 0.00013187705390652388,
+ "loss": 1.0245,
+ "step": 1189
+ },
+ {
+ "epoch": 1.190334781657341,
+ "grad_norm": 0.4919735789299011,
+ "learning_rate": 0.00013177768032880593,
+ "loss": 0.9078,
+ "step": 1190
+ },
+ {
+ "epoch": 1.191335062986465,
+ "grad_norm": 0.5049088597297668,
+ "learning_rate": 0.0001316782718331643,
+ "loss": 0.8884,
+ "step": 1191
+ },
+ {
+ "epoch": 1.1923353443155889,
+ "grad_norm": 0.47496137022972107,
+ "learning_rate": 0.0001315788285288309,
+ "loss": 0.9414,
+ "step": 1192
+ },
+ {
+ "epoch": 1.1933356256447125,
+ "grad_norm": 0.4913059175014496,
+ "learning_rate": 0.00013147935052507597,
+ "loss": 0.8762,
+ "step": 1193
+ },
+ {
+ "epoch": 1.1943359069738364,
+ "grad_norm": 0.5643580555915833,
+ "learning_rate": 0.00013137983793120786,
+ "loss": 0.9556,
+ "step": 1194
+ },
+ {
+ "epoch": 1.1953361883029603,
+ "grad_norm": 0.5032216310501099,
+ "learning_rate": 0.0001312802908565729,
+ "loss": 1.1547,
+ "step": 1195
+ },
+ {
+ "epoch": 1.196336469632084,
+ "grad_norm": 0.5721387267112732,
+ "learning_rate": 0.0001311807094105553,
+ "loss": 0.97,
+ "step": 1196
+ },
+ {
+ "epoch": 1.1973367509612078,
+ "grad_norm": 0.47524675726890564,
+ "learning_rate": 0.00013108109370257712,
+ "loss": 0.9953,
+ "step": 1197
+ },
+ {
+ "epoch": 1.1983370322903317,
+ "grad_norm": 0.5769131183624268,
+ "learning_rate": 0.00013098144384209796,
+ "loss": 1.0578,
+ "step": 1198
+ },
+ {
+ "epoch": 1.1993373136194554,
+ "grad_norm": 0.4861721694469452,
+ "learning_rate": 0.000130881759938615,
+ "loss": 0.7542,
+ "step": 1199
+ },
+ {
+ "epoch": 1.2003375949485793,
+ "grad_norm": 0.4798511266708374,
+ "learning_rate": 0.00013078204210166278,
+ "loss": 0.9024,
+ "step": 1200
+ },
+ {
+ "epoch": 1.2013378762777032,
+ "grad_norm": 0.4447210729122162,
+ "learning_rate": 0.00013068229044081324,
+ "loss": 0.9703,
+ "step": 1201
+ },
+ {
+ "epoch": 1.2023381576068268,
+ "grad_norm": 0.5221365690231323,
+ "learning_rate": 0.0001305825050656754,
+ "loss": 1.0575,
+ "step": 1202
+ },
+ {
+ "epoch": 1.2033384389359507,
+ "grad_norm": 0.44786536693573,
+ "learning_rate": 0.00013048268608589533,
+ "loss": 0.9047,
+ "step": 1203
+ },
+ {
+ "epoch": 1.2043387202650746,
+ "grad_norm": 0.44534093141555786,
+ "learning_rate": 0.00013038283361115603,
+ "loss": 0.9156,
+ "step": 1204
+ },
+ {
+ "epoch": 1.2053390015941985,
+ "grad_norm": 0.5345563292503357,
+ "learning_rate": 0.0001302829477511773,
+ "loss": 0.9933,
+ "step": 1205
+ },
+ {
+ "epoch": 1.2063392829233222,
+ "grad_norm": 0.49175193905830383,
+ "learning_rate": 0.0001301830286157157,
+ "loss": 0.84,
+ "step": 1206
+ },
+ {
+ "epoch": 1.207339564252446,
+ "grad_norm": 0.5271350145339966,
+ "learning_rate": 0.0001300830763145642,
+ "loss": 0.8739,
+ "step": 1207
+ },
+ {
+ "epoch": 1.2083398455815697,
+ "grad_norm": 0.4891369342803955,
+ "learning_rate": 0.00012998309095755235,
+ "loss": 0.9923,
+ "step": 1208
+ },
+ {
+ "epoch": 1.2093401269106936,
+ "grad_norm": 0.44362354278564453,
+ "learning_rate": 0.00012988307265454597,
+ "loss": 0.911,
+ "step": 1209
+ },
+ {
+ "epoch": 1.2103404082398175,
+ "grad_norm": 0.46026211977005005,
+ "learning_rate": 0.0001297830215154471,
+ "loss": 0.8749,
+ "step": 1210
+ },
+ {
+ "epoch": 1.2113406895689414,
+ "grad_norm": 0.49236229062080383,
+ "learning_rate": 0.00012968293765019384,
+ "loss": 0.8959,
+ "step": 1211
+ },
+ {
+ "epoch": 1.212340970898065,
+ "grad_norm": 0.5326531529426575,
+ "learning_rate": 0.00012958282116876026,
+ "loss": 1.0464,
+ "step": 1212
+ },
+ {
+ "epoch": 1.213341252227189,
+ "grad_norm": 0.4658203721046448,
+ "learning_rate": 0.00012948267218115624,
+ "loss": 0.8895,
+ "step": 1213
+ },
+ {
+ "epoch": 1.2143415335563128,
+ "grad_norm": 0.5042040348052979,
+ "learning_rate": 0.00012938249079742743,
+ "loss": 0.889,
+ "step": 1214
+ },
+ {
+ "epoch": 1.2153418148854365,
+ "grad_norm": 0.5408799648284912,
+ "learning_rate": 0.00012928227712765504,
+ "loss": 0.9974,
+ "step": 1215
+ },
+ {
+ "epoch": 1.2163420962145604,
+ "grad_norm": 0.7056695818901062,
+ "learning_rate": 0.0001291820312819558,
+ "loss": 0.8644,
+ "step": 1216
+ },
+ {
+ "epoch": 1.2173423775436842,
+ "grad_norm": 0.5424172878265381,
+ "learning_rate": 0.00012908175337048174,
+ "loss": 1.0855,
+ "step": 1217
+ },
+ {
+ "epoch": 1.218342658872808,
+ "grad_norm": 0.4773527681827545,
+ "learning_rate": 0.00012898144350342015,
+ "loss": 1.014,
+ "step": 1218
+ },
+ {
+ "epoch": 1.2193429402019318,
+ "grad_norm": 0.5538880228996277,
+ "learning_rate": 0.0001288811017909934,
+ "loss": 1.0491,
+ "step": 1219
+ },
+ {
+ "epoch": 1.2203432215310557,
+ "grad_norm": 0.4497896730899811,
+ "learning_rate": 0.00012878072834345895,
+ "loss": 0.8591,
+ "step": 1220
+ },
+ {
+ "epoch": 1.2213435028601793,
+ "grad_norm": 0.5487242341041565,
+ "learning_rate": 0.00012868032327110904,
+ "loss": 0.9809,
+ "step": 1221
+ },
+ {
+ "epoch": 1.2223437841893032,
+ "grad_norm": 0.5900948643684387,
+ "learning_rate": 0.00012857988668427066,
+ "loss": 1.1435,
+ "step": 1222
+ },
+ {
+ "epoch": 1.2233440655184271,
+ "grad_norm": 0.5471523404121399,
+ "learning_rate": 0.0001284794186933055,
+ "loss": 1.0088,
+ "step": 1223
+ },
+ {
+ "epoch": 1.2243443468475508,
+ "grad_norm": 0.4625445604324341,
+ "learning_rate": 0.00012837891940860972,
+ "loss": 1.0452,
+ "step": 1224
+ },
+ {
+ "epoch": 1.2253446281766747,
+ "grad_norm": 0.4972693920135498,
+ "learning_rate": 0.00012827838894061377,
+ "loss": 1.0403,
+ "step": 1225
+ },
+ {
+ "epoch": 1.2263449095057986,
+ "grad_norm": 0.4823111295700073,
+ "learning_rate": 0.00012817782739978255,
+ "loss": 0.9439,
+ "step": 1226
+ },
+ {
+ "epoch": 1.2273451908349222,
+ "grad_norm": 0.5163894295692444,
+ "learning_rate": 0.00012807723489661495,
+ "loss": 1.031,
+ "step": 1227
+ },
+ {
+ "epoch": 1.228345472164046,
+ "grad_norm": 0.5085253119468689,
+ "learning_rate": 0.00012797661154164395,
+ "loss": 0.998,
+ "step": 1228
+ },
+ {
+ "epoch": 1.22934575349317,
+ "grad_norm": 0.4469011425971985,
+ "learning_rate": 0.00012787595744543647,
+ "loss": 0.8943,
+ "step": 1229
+ },
+ {
+ "epoch": 1.2303460348222939,
+ "grad_norm": 0.5117391347885132,
+ "learning_rate": 0.00012777527271859307,
+ "loss": 0.9817,
+ "step": 1230
+ },
+ {
+ "epoch": 1.2313463161514175,
+ "grad_norm": 0.44259950518608093,
+ "learning_rate": 0.0001276745574717481,
+ "loss": 0.7659,
+ "step": 1231
+ },
+ {
+ "epoch": 1.2323465974805414,
+ "grad_norm": 0.42978596687316895,
+ "learning_rate": 0.00012757381181556943,
+ "loss": 0.7313,
+ "step": 1232
+ },
+ {
+ "epoch": 1.2333468788096653,
+ "grad_norm": 0.5619105696678162,
+ "learning_rate": 0.0001274730358607583,
+ "loss": 0.9881,
+ "step": 1233
+ },
+ {
+ "epoch": 1.234347160138789,
+ "grad_norm": 0.5065141916275024,
+ "learning_rate": 0.00012737222971804924,
+ "loss": 0.9789,
+ "step": 1234
+ },
+ {
+ "epoch": 1.2353474414679129,
+ "grad_norm": 0.514705240726471,
+ "learning_rate": 0.00012727139349821,
+ "loss": 0.9278,
+ "step": 1235
+ },
+ {
+ "epoch": 1.2363477227970368,
+ "grad_norm": 0.48272448778152466,
+ "learning_rate": 0.0001271705273120413,
+ "loss": 0.9011,
+ "step": 1236
+ },
+ {
+ "epoch": 1.2373480041261604,
+ "grad_norm": 0.4993284344673157,
+ "learning_rate": 0.00012706963127037685,
+ "loss": 0.8341,
+ "step": 1237
+ },
+ {
+ "epoch": 1.2383482854552843,
+ "grad_norm": 0.44701850414276123,
+ "learning_rate": 0.00012696870548408316,
+ "loss": 0.8481,
+ "step": 1238
+ },
+ {
+ "epoch": 1.2393485667844082,
+ "grad_norm": 0.5611200332641602,
+ "learning_rate": 0.00012686775006405946,
+ "loss": 1.101,
+ "step": 1239
+ },
+ {
+ "epoch": 1.2403488481135319,
+ "grad_norm": 0.4962129592895508,
+ "learning_rate": 0.00012676676512123747,
+ "loss": 0.951,
+ "step": 1240
+ },
+ {
+ "epoch": 1.2413491294426557,
+ "grad_norm": 0.5547065734863281,
+ "learning_rate": 0.00012666575076658134,
+ "loss": 1.0228,
+ "step": 1241
+ },
+ {
+ "epoch": 1.2423494107717796,
+ "grad_norm": 0.5761319398880005,
+ "learning_rate": 0.00012656470711108764,
+ "loss": 1.0631,
+ "step": 1242
+ },
+ {
+ "epoch": 1.2433496921009033,
+ "grad_norm": 0.5202417969703674,
+ "learning_rate": 0.00012646363426578505,
+ "loss": 0.9623,
+ "step": 1243
+ },
+ {
+ "epoch": 1.2443499734300272,
+ "grad_norm": 0.561244547367096,
+ "learning_rate": 0.0001263625323417343,
+ "loss": 1.1666,
+ "step": 1244
+ },
+ {
+ "epoch": 1.245350254759151,
+ "grad_norm": 0.43389594554901123,
+ "learning_rate": 0.0001262614014500282,
+ "loss": 0.9473,
+ "step": 1245
+ },
+ {
+ "epoch": 1.2463505360882747,
+ "grad_norm": 0.5219054222106934,
+ "learning_rate": 0.00012616024170179126,
+ "loss": 1.0181,
+ "step": 1246
+ },
+ {
+ "epoch": 1.2473508174173986,
+ "grad_norm": 0.5179515480995178,
+ "learning_rate": 0.00012605905320817976,
+ "loss": 1.0851,
+ "step": 1247
+ },
+ {
+ "epoch": 1.2483510987465225,
+ "grad_norm": 0.5104801058769226,
+ "learning_rate": 0.00012595783608038155,
+ "loss": 0.9239,
+ "step": 1248
+ },
+ {
+ "epoch": 1.2493513800756464,
+ "grad_norm": 0.46918627619743347,
+ "learning_rate": 0.00012585659042961596,
+ "loss": 0.8361,
+ "step": 1249
+ },
+ {
+ "epoch": 1.25035166140477,
+ "grad_norm": 0.5275365710258484,
+ "learning_rate": 0.00012575531636713368,
+ "loss": 0.9256,
+ "step": 1250
+ },
+ {
+ "epoch": 1.251351942733894,
+ "grad_norm": 0.5006279349327087,
+ "learning_rate": 0.00012565401400421651,
+ "loss": 0.8748,
+ "step": 1251
+ },
+ {
+ "epoch": 1.2523522240630176,
+ "grad_norm": 0.466467022895813,
+ "learning_rate": 0.0001255526834521775,
+ "loss": 0.9217,
+ "step": 1252
+ },
+ {
+ "epoch": 1.2533525053921415,
+ "grad_norm": 0.45304587483406067,
+ "learning_rate": 0.00012545132482236055,
+ "loss": 0.8776,
+ "step": 1253
+ },
+ {
+ "epoch": 1.2543527867212654,
+ "grad_norm": 0.483394980430603,
+ "learning_rate": 0.0001253499382261405,
+ "loss": 0.9421,
+ "step": 1254
+ },
+ {
+ "epoch": 1.2553530680503893,
+ "grad_norm": 0.5117647051811218,
+ "learning_rate": 0.00012524852377492285,
+ "loss": 1.0033,
+ "step": 1255
+ },
+ {
+ "epoch": 1.256353349379513,
+ "grad_norm": 0.5712929964065552,
+ "learning_rate": 0.00012514708158014378,
+ "loss": 1.0216,
+ "step": 1256
+ },
+ {
+ "epoch": 1.2573536307086368,
+ "grad_norm": 0.49368858337402344,
+ "learning_rate": 0.00012504561175326985,
+ "loss": 0.8836,
+ "step": 1257
+ },
+ {
+ "epoch": 1.2583539120377607,
+ "grad_norm": 0.5303272008895874,
+ "learning_rate": 0.00012494411440579814,
+ "loss": 1.0138,
+ "step": 1258
+ },
+ {
+ "epoch": 1.2593541933668844,
+ "grad_norm": 0.47034743428230286,
+ "learning_rate": 0.0001248425896492558,
+ "loss": 0.9346,
+ "step": 1259
+ },
+ {
+ "epoch": 1.2603544746960083,
+ "grad_norm": 0.5398191809654236,
+ "learning_rate": 0.00012474103759520027,
+ "loss": 1.2548,
+ "step": 1260
+ },
+ {
+ "epoch": 1.2613547560251321,
+ "grad_norm": 0.4403116703033447,
+ "learning_rate": 0.00012463945835521878,
+ "loss": 0.8063,
+ "step": 1261
+ },
+ {
+ "epoch": 1.2623550373542558,
+ "grad_norm": 0.5504721999168396,
+ "learning_rate": 0.0001245378520409286,
+ "loss": 1.0888,
+ "step": 1262
+ },
+ {
+ "epoch": 1.2633553186833797,
+ "grad_norm": 0.46984589099884033,
+ "learning_rate": 0.0001244362187639767,
+ "loss": 0.9062,
+ "step": 1263
+ },
+ {
+ "epoch": 1.2643556000125036,
+ "grad_norm": 0.5573250651359558,
+ "learning_rate": 0.00012433455863603967,
+ "loss": 0.9474,
+ "step": 1264
+ },
+ {
+ "epoch": 1.2653558813416272,
+ "grad_norm": 0.5468732714653015,
+ "learning_rate": 0.00012423287176882358,
+ "loss": 0.9424,
+ "step": 1265
+ },
+ {
+ "epoch": 1.2663561626707511,
+ "grad_norm": 0.4921899437904358,
+ "learning_rate": 0.00012413115827406392,
+ "loss": 0.8568,
+ "step": 1266
+ },
+ {
+ "epoch": 1.267356443999875,
+ "grad_norm": 0.48769402503967285,
+ "learning_rate": 0.00012402941826352546,
+ "loss": 0.7579,
+ "step": 1267
+ },
+ {
+ "epoch": 1.268356725328999,
+ "grad_norm": 0.5462141633033752,
+ "learning_rate": 0.00012392765184900202,
+ "loss": 0.9946,
+ "step": 1268
+ },
+ {
+ "epoch": 1.2693570066581226,
+ "grad_norm": 0.5021050572395325,
+ "learning_rate": 0.0001238258591423165,
+ "loss": 0.8603,
+ "step": 1269
+ },
+ {
+ "epoch": 1.2703572879872465,
+ "grad_norm": 0.5272159576416016,
+ "learning_rate": 0.00012372404025532072,
+ "loss": 0.94,
+ "step": 1270
+ },
+ {
+ "epoch": 1.2713575693163701,
+ "grad_norm": 0.5332500338554382,
+ "learning_rate": 0.00012362219529989514,
+ "loss": 1.1609,
+ "step": 1271
+ },
+ {
+ "epoch": 1.272357850645494,
+ "grad_norm": 0.5058136582374573,
+ "learning_rate": 0.00012352032438794902,
+ "loss": 1.0013,
+ "step": 1272
+ },
+ {
+ "epoch": 1.273358131974618,
+ "grad_norm": 0.5055596828460693,
+ "learning_rate": 0.00012341842763142005,
+ "loss": 1.0121,
+ "step": 1273
+ },
+ {
+ "epoch": 1.2743584133037418,
+ "grad_norm": 0.5699402689933777,
+ "learning_rate": 0.00012331650514227425,
+ "loss": 1.1188,
+ "step": 1274
+ },
+ {
+ "epoch": 1.2753586946328654,
+ "grad_norm": 0.511233925819397,
+ "learning_rate": 0.00012321455703250616,
+ "loss": 1.0291,
+ "step": 1275
+ },
+ {
+ "epoch": 1.2763589759619893,
+ "grad_norm": 0.5304299592971802,
+ "learning_rate": 0.00012311258341413822,
+ "loss": 0.9619,
+ "step": 1276
+ },
+ {
+ "epoch": 1.277359257291113,
+ "grad_norm": 0.5318915247917175,
+ "learning_rate": 0.00012301058439922102,
+ "loss": 0.9669,
+ "step": 1277
+ },
+ {
+ "epoch": 1.2783595386202369,
+ "grad_norm": 0.510267436504364,
+ "learning_rate": 0.000122908560099833,
+ "loss": 1.0956,
+ "step": 1278
+ },
+ {
+ "epoch": 1.2793598199493608,
+ "grad_norm": 0.530360758304596,
+ "learning_rate": 0.00012280651062808047,
+ "loss": 1.02,
+ "step": 1279
+ },
+ {
+ "epoch": 1.2803601012784847,
+ "grad_norm": 0.5094459056854248,
+ "learning_rate": 0.00012270443609609729,
+ "loss": 0.9614,
+ "step": 1280
+ },
+ {
+ "epoch": 1.2813603826076083,
+ "grad_norm": 0.4430864453315735,
+ "learning_rate": 0.0001226023366160449,
+ "loss": 0.8188,
+ "step": 1281
+ },
+ {
+ "epoch": 1.2823606639367322,
+ "grad_norm": 0.4705411493778229,
+ "learning_rate": 0.00012250021230011225,
+ "loss": 0.8952,
+ "step": 1282
+ },
+ {
+ "epoch": 1.283360945265856,
+ "grad_norm": 0.5231715440750122,
+ "learning_rate": 0.00012239806326051539,
+ "loss": 0.941,
+ "step": 1283
+ },
+ {
+ "epoch": 1.2843612265949798,
+ "grad_norm": 0.5658493041992188,
+ "learning_rate": 0.00012229588960949771,
+ "loss": 1.0047,
+ "step": 1284
+ },
+ {
+ "epoch": 1.2853615079241036,
+ "grad_norm": 0.6016567349433899,
+ "learning_rate": 0.00012219369145932959,
+ "loss": 1.1764,
+ "step": 1285
+ },
+ {
+ "epoch": 1.2863617892532275,
+ "grad_norm": 0.6365408301353455,
+ "learning_rate": 0.00012209146892230822,
+ "loss": 0.9777,
+ "step": 1286
+ },
+ {
+ "epoch": 1.2873620705823514,
+ "grad_norm": 0.46536219120025635,
+ "learning_rate": 0.00012198922211075778,
+ "loss": 0.9826,
+ "step": 1287
+ },
+ {
+ "epoch": 1.288362351911475,
+ "grad_norm": 0.5130245089530945,
+ "learning_rate": 0.00012188695113702896,
+ "loss": 1.0255,
+ "step": 1288
+ },
+ {
+ "epoch": 1.289362633240599,
+ "grad_norm": 0.5321043133735657,
+ "learning_rate": 0.00012178465611349911,
+ "loss": 0.9973,
+ "step": 1289
+ },
+ {
+ "epoch": 1.2903629145697226,
+ "grad_norm": 0.48580724000930786,
+ "learning_rate": 0.00012168233715257194,
+ "loss": 0.8768,
+ "step": 1290
+ },
+ {
+ "epoch": 1.2913631958988465,
+ "grad_norm": 0.5140405297279358,
+ "learning_rate": 0.00012157999436667747,
+ "loss": 0.8985,
+ "step": 1291
+ },
+ {
+ "epoch": 1.2923634772279704,
+ "grad_norm": 0.4582030773162842,
+ "learning_rate": 0.00012147762786827193,
+ "loss": 0.9693,
+ "step": 1292
+ },
+ {
+ "epoch": 1.2933637585570943,
+ "grad_norm": 0.47397539019584656,
+ "learning_rate": 0.00012137523776983757,
+ "loss": 0.8348,
+ "step": 1293
+ },
+ {
+ "epoch": 1.294364039886218,
+ "grad_norm": 0.43932002782821655,
+ "learning_rate": 0.00012127282418388264,
+ "loss": 0.851,
+ "step": 1294
+ },
+ {
+ "epoch": 1.2953643212153418,
+ "grad_norm": 0.5559205412864685,
+ "learning_rate": 0.0001211703872229411,
+ "loss": 0.86,
+ "step": 1295
+ },
+ {
+ "epoch": 1.2963646025444655,
+ "grad_norm": 0.5433980226516724,
+ "learning_rate": 0.00012106792699957263,
+ "loss": 1.1181,
+ "step": 1296
+ },
+ {
+ "epoch": 1.2973648838735894,
+ "grad_norm": 0.5069502592086792,
+ "learning_rate": 0.00012096544362636255,
+ "loss": 0.9613,
+ "step": 1297
+ },
+ {
+ "epoch": 1.2983651652027133,
+ "grad_norm": 0.5588079690933228,
+ "learning_rate": 0.00012086293721592152,
+ "loss": 1.0741,
+ "step": 1298
+ },
+ {
+ "epoch": 1.2993654465318372,
+ "grad_norm": 0.6035181879997253,
+ "learning_rate": 0.00012076040788088554,
+ "loss": 1.0187,
+ "step": 1299
+ },
+ {
+ "epoch": 1.3003657278609608,
+ "grad_norm": 0.4385228455066681,
+ "learning_rate": 0.00012065785573391581,
+ "loss": 0.9293,
+ "step": 1300
+ },
+ {
+ "epoch": 1.3013660091900847,
+ "grad_norm": 0.5284578800201416,
+ "learning_rate": 0.00012055528088769861,
+ "loss": 0.9479,
+ "step": 1301
+ },
+ {
+ "epoch": 1.3023662905192086,
+ "grad_norm": 0.46655789017677307,
+ "learning_rate": 0.00012045268345494511,
+ "loss": 0.8702,
+ "step": 1302
+ },
+ {
+ "epoch": 1.3033665718483323,
+ "grad_norm": 0.5073155164718628,
+ "learning_rate": 0.00012035006354839133,
+ "loss": 0.8667,
+ "step": 1303
+ },
+ {
+ "epoch": 1.3043668531774562,
+ "grad_norm": 0.5954610109329224,
+ "learning_rate": 0.00012024742128079805,
+ "loss": 1.0998,
+ "step": 1304
+ },
+ {
+ "epoch": 1.30536713450658,
+ "grad_norm": 0.46617114543914795,
+ "learning_rate": 0.00012014475676495052,
+ "loss": 0.8853,
+ "step": 1305
+ },
+ {
+ "epoch": 1.306367415835704,
+ "grad_norm": 0.5705167055130005,
+ "learning_rate": 0.00012004207011365849,
+ "loss": 0.9094,
+ "step": 1306
+ },
+ {
+ "epoch": 1.3073676971648276,
+ "grad_norm": 0.4711546301841736,
+ "learning_rate": 0.00011993936143975599,
+ "loss": 0.9597,
+ "step": 1307
+ },
+ {
+ "epoch": 1.3083679784939515,
+ "grad_norm": 0.5322745442390442,
+ "learning_rate": 0.00011983663085610131,
+ "loss": 0.9221,
+ "step": 1308
+ },
+ {
+ "epoch": 1.3093682598230751,
+ "grad_norm": 0.4769452214241028,
+ "learning_rate": 0.00011973387847557676,
+ "loss": 0.7874,
+ "step": 1309
+ },
+ {
+ "epoch": 1.310368541152199,
+ "grad_norm": 0.5224636793136597,
+ "learning_rate": 0.00011963110441108863,
+ "loss": 0.8233,
+ "step": 1310
+ },
+ {
+ "epoch": 1.311368822481323,
+ "grad_norm": 0.5125696063041687,
+ "learning_rate": 0.000119528308775567,
+ "loss": 0.9894,
+ "step": 1311
+ },
+ {
+ "epoch": 1.3123691038104468,
+ "grad_norm": 0.5573001503944397,
+ "learning_rate": 0.00011942549168196575,
+ "loss": 0.9043,
+ "step": 1312
+ },
+ {
+ "epoch": 1.3133693851395705,
+ "grad_norm": 0.5493408441543579,
+ "learning_rate": 0.00011932265324326221,
+ "loss": 0.964,
+ "step": 1313
+ },
+ {
+ "epoch": 1.3143696664686944,
+ "grad_norm": 0.5327842235565186,
+ "learning_rate": 0.0001192197935724573,
+ "loss": 0.9196,
+ "step": 1314
+ },
+ {
+ "epoch": 1.315369947797818,
+ "grad_norm": 0.5743328332901001,
+ "learning_rate": 0.00011911691278257511,
+ "loss": 1.0504,
+ "step": 1315
+ },
+ {
+ "epoch": 1.316370229126942,
+ "grad_norm": 0.446932315826416,
+ "learning_rate": 0.0001190140109866631,
+ "loss": 0.8425,
+ "step": 1316
+ },
+ {
+ "epoch": 1.3173705104560658,
+ "grad_norm": 0.47306087613105774,
+ "learning_rate": 0.00011891108829779165,
+ "loss": 0.8726,
+ "step": 1317
+ },
+ {
+ "epoch": 1.3183707917851897,
+ "grad_norm": 0.566939115524292,
+ "learning_rate": 0.00011880814482905422,
+ "loss": 0.8747,
+ "step": 1318
+ },
+ {
+ "epoch": 1.3193710731143133,
+ "grad_norm": 0.5145870447158813,
+ "learning_rate": 0.00011870518069356709,
+ "loss": 0.9383,
+ "step": 1319
+ },
+ {
+ "epoch": 1.3203713544434372,
+ "grad_norm": 0.5228437185287476,
+ "learning_rate": 0.0001186021960044692,
+ "loss": 1.103,
+ "step": 1320
+ },
+ {
+ "epoch": 1.3213716357725611,
+ "grad_norm": 0.4844512939453125,
+ "learning_rate": 0.00011849919087492211,
+ "loss": 0.98,
+ "step": 1321
+ },
+ {
+ "epoch": 1.3223719171016848,
+ "grad_norm": 0.5099167227745056,
+ "learning_rate": 0.00011839616541810983,
+ "loss": 0.9023,
+ "step": 1322
+ },
+ {
+ "epoch": 1.3233721984308087,
+ "grad_norm": 0.4702555537223816,
+ "learning_rate": 0.00011829311974723867,
+ "loss": 0.8553,
+ "step": 1323
+ },
+ {
+ "epoch": 1.3243724797599326,
+ "grad_norm": 0.5219053030014038,
+ "learning_rate": 0.00011819005397553723,
+ "loss": 0.9446,
+ "step": 1324
+ },
+ {
+ "epoch": 1.3253727610890562,
+ "grad_norm": 0.48462843894958496,
+ "learning_rate": 0.00011808696821625613,
+ "loss": 0.9591,
+ "step": 1325
+ },
+ {
+ "epoch": 1.32637304241818,
+ "grad_norm": 0.5187227725982666,
+ "learning_rate": 0.000117983862582668,
+ "loss": 0.9413,
+ "step": 1326
+ },
+ {
+ "epoch": 1.327373323747304,
+ "grad_norm": 0.47444605827331543,
+ "learning_rate": 0.00011788073718806725,
+ "loss": 0.8979,
+ "step": 1327
+ },
+ {
+ "epoch": 1.3283736050764277,
+ "grad_norm": 0.5251137018203735,
+ "learning_rate": 0.00011777759214577006,
+ "loss": 1.0449,
+ "step": 1328
+ },
+ {
+ "epoch": 1.3293738864055515,
+ "grad_norm": 0.5007866024971008,
+ "learning_rate": 0.00011767442756911417,
+ "loss": 0.9907,
+ "step": 1329
+ },
+ {
+ "epoch": 1.3303741677346754,
+ "grad_norm": 0.8486194610595703,
+ "learning_rate": 0.00011757124357145881,
+ "loss": 1.0459,
+ "step": 1330
+ },
+ {
+ "epoch": 1.3313744490637993,
+ "grad_norm": 0.5153964161872864,
+ "learning_rate": 0.00011746804026618452,
+ "loss": 0.9911,
+ "step": 1331
+ },
+ {
+ "epoch": 1.332374730392923,
+ "grad_norm": 0.523077666759491,
+ "learning_rate": 0.00011736481776669306,
+ "loss": 1.0571,
+ "step": 1332
+ },
+ {
+ "epoch": 1.3333750117220469,
+ "grad_norm": 0.5242265462875366,
+ "learning_rate": 0.00011726157618640728,
+ "loss": 0.9057,
+ "step": 1333
+ },
+ {
+ "epoch": 1.3343752930511705,
+ "grad_norm": 0.524046778678894,
+ "learning_rate": 0.00011715831563877104,
+ "loss": 1.0413,
+ "step": 1334
+ },
+ {
+ "epoch": 1.3353755743802944,
+ "grad_norm": 0.5873232483863831,
+ "learning_rate": 0.00011705503623724898,
+ "loss": 1.1105,
+ "step": 1335
+ },
+ {
+ "epoch": 1.3363758557094183,
+ "grad_norm": 0.5559434294700623,
+ "learning_rate": 0.00011695173809532652,
+ "loss": 0.9045,
+ "step": 1336
+ },
+ {
+ "epoch": 1.3373761370385422,
+ "grad_norm": 0.5970155000686646,
+ "learning_rate": 0.00011684842132650957,
+ "loss": 1.1663,
+ "step": 1337
+ },
+ {
+ "epoch": 1.3383764183676659,
+ "grad_norm": 0.5005142092704773,
+ "learning_rate": 0.00011674508604432464,
+ "loss": 1.0695,
+ "step": 1338
+ },
+ {
+ "epoch": 1.3393766996967897,
+ "grad_norm": 0.49226582050323486,
+ "learning_rate": 0.00011664173236231848,
+ "loss": 1.0875,
+ "step": 1339
+ },
+ {
+ "epoch": 1.3403769810259134,
+ "grad_norm": 0.4792287349700928,
+ "learning_rate": 0.0001165383603940581,
+ "loss": 0.9102,
+ "step": 1340
+ },
+ {
+ "epoch": 1.3413772623550373,
+ "grad_norm": 0.4332147538661957,
+ "learning_rate": 0.00011643497025313061,
+ "loss": 0.8948,
+ "step": 1341
+ },
+ {
+ "epoch": 1.3423775436841612,
+ "grad_norm": 0.45502984523773193,
+ "learning_rate": 0.00011633156205314309,
+ "loss": 0.8538,
+ "step": 1342
+ },
+ {
+ "epoch": 1.343377825013285,
+ "grad_norm": 0.5594006776809692,
+ "learning_rate": 0.00011622813590772244,
+ "loss": 1.0178,
+ "step": 1343
+ },
+ {
+ "epoch": 1.3443781063424087,
+ "grad_norm": 0.4428876042366028,
+ "learning_rate": 0.00011612469193051525,
+ "loss": 0.856,
+ "step": 1344
+ },
+ {
+ "epoch": 1.3453783876715326,
+ "grad_norm": 0.4615425169467926,
+ "learning_rate": 0.00011602123023518779,
+ "loss": 0.8568,
+ "step": 1345
+ },
+ {
+ "epoch": 1.3463786690006565,
+ "grad_norm": 0.543389618396759,
+ "learning_rate": 0.00011591775093542572,
+ "loss": 0.8293,
+ "step": 1346
+ },
+ {
+ "epoch": 1.3473789503297802,
+ "grad_norm": 0.4740433394908905,
+ "learning_rate": 0.0001158142541449341,
+ "loss": 0.9163,
+ "step": 1347
+ },
+ {
+ "epoch": 1.348379231658904,
+ "grad_norm": 0.47938287258148193,
+ "learning_rate": 0.00011571073997743716,
+ "loss": 0.9745,
+ "step": 1348
+ },
+ {
+ "epoch": 1.349379512988028,
+ "grad_norm": 0.47510263323783875,
+ "learning_rate": 0.0001156072085466783,
+ "loss": 0.9536,
+ "step": 1349
+ },
+ {
+ "epoch": 1.3503797943171518,
+ "grad_norm": 0.5921860933303833,
+ "learning_rate": 0.00011550365996641979,
+ "loss": 0.8397,
+ "step": 1350
+ },
+ {
+ "epoch": 1.3513800756462755,
+ "grad_norm": 0.5436375737190247,
+ "learning_rate": 0.00011540009435044281,
+ "loss": 0.9381,
+ "step": 1351
+ },
+ {
+ "epoch": 1.3523803569753994,
+ "grad_norm": 0.4591434597969055,
+ "learning_rate": 0.00011529651181254723,
+ "loss": 1.0771,
+ "step": 1352
+ },
+ {
+ "epoch": 1.353380638304523,
+ "grad_norm": 0.533069372177124,
+ "learning_rate": 0.0001151929124665516,
+ "loss": 0.9103,
+ "step": 1353
+ },
+ {
+ "epoch": 1.354380919633647,
+ "grad_norm": 0.538324773311615,
+ "learning_rate": 0.00011508929642629274,
+ "loss": 1.0469,
+ "step": 1354
+ },
+ {
+ "epoch": 1.3553812009627708,
+ "grad_norm": 0.46198832988739014,
+ "learning_rate": 0.00011498566380562601,
+ "loss": 0.8242,
+ "step": 1355
+ },
+ {
+ "epoch": 1.3563814822918947,
+ "grad_norm": 0.573716402053833,
+ "learning_rate": 0.0001148820147184249,
+ "loss": 0.9437,
+ "step": 1356
+ },
+ {
+ "epoch": 1.3573817636210184,
+ "grad_norm": 0.5638802647590637,
+ "learning_rate": 0.00011477834927858104,
+ "loss": 0.9336,
+ "step": 1357
+ },
+ {
+ "epoch": 1.3583820449501423,
+ "grad_norm": 0.48780402541160583,
+ "learning_rate": 0.00011467466760000399,
+ "loss": 0.8859,
+ "step": 1358
+ },
+ {
+ "epoch": 1.359382326279266,
+ "grad_norm": 0.5441538095474243,
+ "learning_rate": 0.00011457096979662114,
+ "loss": 0.8804,
+ "step": 1359
+ },
+ {
+ "epoch": 1.3603826076083898,
+ "grad_norm": 0.5250831842422485,
+ "learning_rate": 0.00011446725598237767,
+ "loss": 0.9739,
+ "step": 1360
+ },
+ {
+ "epoch": 1.3613828889375137,
+ "grad_norm": 0.49177756905555725,
+ "learning_rate": 0.00011436352627123623,
+ "loss": 0.9586,
+ "step": 1361
+ },
+ {
+ "epoch": 1.3623831702666376,
+ "grad_norm": 0.5866628885269165,
+ "learning_rate": 0.00011425978077717709,
+ "loss": 1.0511,
+ "step": 1362
+ },
+ {
+ "epoch": 1.3633834515957612,
+ "grad_norm": 0.49350351095199585,
+ "learning_rate": 0.00011415601961419775,
+ "loss": 0.9637,
+ "step": 1363
+ },
+ {
+ "epoch": 1.3643837329248851,
+ "grad_norm": 0.5402287244796753,
+ "learning_rate": 0.00011405224289631295,
+ "loss": 1.0008,
+ "step": 1364
+ },
+ {
+ "epoch": 1.365384014254009,
+ "grad_norm": 0.5524907112121582,
+ "learning_rate": 0.00011394845073755455,
+ "loss": 1.0398,
+ "step": 1365
+ },
+ {
+ "epoch": 1.3663842955831327,
+ "grad_norm": 0.49948206543922424,
+ "learning_rate": 0.0001138446432519714,
+ "loss": 0.8577,
+ "step": 1366
+ },
+ {
+ "epoch": 1.3673845769122566,
+ "grad_norm": 0.500592052936554,
+ "learning_rate": 0.00011374082055362909,
+ "loss": 1.0053,
+ "step": 1367
+ },
+ {
+ "epoch": 1.3683848582413805,
+ "grad_norm": 0.4469926357269287,
+ "learning_rate": 0.00011363698275661001,
+ "loss": 0.8081,
+ "step": 1368
+ },
+ {
+ "epoch": 1.3693851395705043,
+ "grad_norm": 0.4939117431640625,
+ "learning_rate": 0.00011353312997501313,
+ "loss": 0.9559,
+ "step": 1369
+ },
+ {
+ "epoch": 1.370385420899628,
+ "grad_norm": 0.5091076493263245,
+ "learning_rate": 0.00011342926232295386,
+ "loss": 0.8962,
+ "step": 1370
+ },
+ {
+ "epoch": 1.371385702228752,
+ "grad_norm": 0.48055970668792725,
+ "learning_rate": 0.00011332537991456398,
+ "loss": 0.8686,
+ "step": 1371
+ },
+ {
+ "epoch": 1.3723859835578756,
+ "grad_norm": 0.4724258482456207,
+ "learning_rate": 0.00011322148286399147,
+ "loss": 0.8872,
+ "step": 1372
+ },
+ {
+ "epoch": 1.3733862648869994,
+ "grad_norm": 0.4945514500141144,
+ "learning_rate": 0.0001131175712854004,
+ "loss": 0.8766,
+ "step": 1373
+ },
+ {
+ "epoch": 1.3743865462161233,
+ "grad_norm": 0.4784204065799713,
+ "learning_rate": 0.00011301364529297079,
+ "loss": 0.8216,
+ "step": 1374
+ },
+ {
+ "epoch": 1.3753868275452472,
+ "grad_norm": 0.4669654667377472,
+ "learning_rate": 0.0001129097050008985,
+ "loss": 0.98,
+ "step": 1375
+ },
+ {
+ "epoch": 1.3763871088743709,
+ "grad_norm": 0.5275737047195435,
+ "learning_rate": 0.00011280575052339514,
+ "loss": 0.9391,
+ "step": 1376
+ },
+ {
+ "epoch": 1.3773873902034948,
+ "grad_norm": 0.47577112913131714,
+ "learning_rate": 0.00011270178197468789,
+ "loss": 0.8956,
+ "step": 1377
+ },
+ {
+ "epoch": 1.3783876715326184,
+ "grad_norm": 0.49086448550224304,
+ "learning_rate": 0.00011259779946901934,
+ "loss": 1.0058,
+ "step": 1378
+ },
+ {
+ "epoch": 1.3793879528617423,
+ "grad_norm": 0.5351247191429138,
+ "learning_rate": 0.0001124938031206475,
+ "loss": 1.0215,
+ "step": 1379
+ },
+ {
+ "epoch": 1.3803882341908662,
+ "grad_norm": 0.5512630343437195,
+ "learning_rate": 0.00011238979304384554,
+ "loss": 1.0254,
+ "step": 1380
+ },
+ {
+ "epoch": 1.38138851551999,
+ "grad_norm": 0.5598354339599609,
+ "learning_rate": 0.0001122857693529017,
+ "loss": 0.8707,
+ "step": 1381
+ },
+ {
+ "epoch": 1.3823887968491138,
+ "grad_norm": 0.5506719946861267,
+ "learning_rate": 0.0001121817321621192,
+ "loss": 0.9061,
+ "step": 1382
+ },
+ {
+ "epoch": 1.3833890781782376,
+ "grad_norm": 0.5244742035865784,
+ "learning_rate": 0.00011207768158581613,
+ "loss": 1.0017,
+ "step": 1383
+ },
+ {
+ "epoch": 1.3843893595073615,
+ "grad_norm": 0.480194091796875,
+ "learning_rate": 0.00011197361773832525,
+ "loss": 0.8132,
+ "step": 1384
+ },
+ {
+ "epoch": 1.3853896408364852,
+ "grad_norm": 0.5409587025642395,
+ "learning_rate": 0.00011186954073399387,
+ "loss": 1.0724,
+ "step": 1385
+ },
+ {
+ "epoch": 1.386389922165609,
+ "grad_norm": 0.5776751041412354,
+ "learning_rate": 0.00011176545068718385,
+ "loss": 0.9577,
+ "step": 1386
+ },
+ {
+ "epoch": 1.387390203494733,
+ "grad_norm": 0.4478171765804291,
+ "learning_rate": 0.0001116613477122713,
+ "loss": 0.7698,
+ "step": 1387
+ },
+ {
+ "epoch": 1.3883904848238566,
+ "grad_norm": 0.5580281615257263,
+ "learning_rate": 0.00011155723192364658,
+ "loss": 1.0065,
+ "step": 1388
+ },
+ {
+ "epoch": 1.3893907661529805,
+ "grad_norm": 0.5318020582199097,
+ "learning_rate": 0.00011145310343571411,
+ "loss": 0.9155,
+ "step": 1389
+ },
+ {
+ "epoch": 1.3903910474821044,
+ "grad_norm": 0.45960649847984314,
+ "learning_rate": 0.00011134896236289224,
+ "loss": 0.848,
+ "step": 1390
+ },
+ {
+ "epoch": 1.391391328811228,
+ "grad_norm": 0.49986693263053894,
+ "learning_rate": 0.0001112448088196132,
+ "loss": 1.0222,
+ "step": 1391
+ },
+ {
+ "epoch": 1.392391610140352,
+ "grad_norm": 0.6470636129379272,
+ "learning_rate": 0.00011114064292032282,
+ "loss": 0.8976,
+ "step": 1392
+ },
+ {
+ "epoch": 1.3933918914694758,
+ "grad_norm": 0.49885210394859314,
+ "learning_rate": 0.0001110364647794807,
+ "loss": 0.8872,
+ "step": 1393
+ },
+ {
+ "epoch": 1.3943921727985997,
+ "grad_norm": 0.48183003067970276,
+ "learning_rate": 0.00011093227451155974,
+ "loss": 0.7506,
+ "step": 1394
+ },
+ {
+ "epoch": 1.3953924541277234,
+ "grad_norm": 0.47776031494140625,
+ "learning_rate": 0.0001108280722310462,
+ "loss": 0.9945,
+ "step": 1395
+ },
+ {
+ "epoch": 1.3963927354568473,
+ "grad_norm": 0.5032552480697632,
+ "learning_rate": 0.0001107238580524395,
+ "loss": 0.9844,
+ "step": 1396
+ },
+ {
+ "epoch": 1.397393016785971,
+ "grad_norm": 0.5641827583312988,
+ "learning_rate": 0.00011061963209025223,
+ "loss": 0.9862,
+ "step": 1397
+ },
+ {
+ "epoch": 1.3983932981150948,
+ "grad_norm": 0.45950955152511597,
+ "learning_rate": 0.00011051539445900983,
+ "loss": 0.9878,
+ "step": 1398
+ },
+ {
+ "epoch": 1.3993935794442187,
+ "grad_norm": 0.48625022172927856,
+ "learning_rate": 0.00011041114527325065,
+ "loss": 0.9446,
+ "step": 1399
+ },
+ {
+ "epoch": 1.4003938607733426,
+ "grad_norm": 0.5851911902427673,
+ "learning_rate": 0.00011030688464752566,
+ "loss": 1.1538,
+ "step": 1400
+ },
+ {
+ "epoch": 1.4013941421024663,
+ "grad_norm": 0.45012837648391724,
+ "learning_rate": 0.00011020261269639842,
+ "loss": 0.8871,
+ "step": 1401
+ },
+ {
+ "epoch": 1.4023944234315902,
+ "grad_norm": 0.4794975221157074,
+ "learning_rate": 0.000110098329534445,
+ "loss": 0.912,
+ "step": 1402
+ },
+ {
+ "epoch": 1.4033947047607138,
+ "grad_norm": 0.5397909879684448,
+ "learning_rate": 0.00010999403527625367,
+ "loss": 1.015,
+ "step": 1403
+ },
+ {
+ "epoch": 1.4043949860898377,
+ "grad_norm": 0.5413039922714233,
+ "learning_rate": 0.00010988973003642499,
+ "loss": 1.0111,
+ "step": 1404
+ },
+ {
+ "epoch": 1.4053952674189616,
+ "grad_norm": 0.48752084374427795,
+ "learning_rate": 0.00010978541392957156,
+ "loss": 0.8649,
+ "step": 1405
+ },
+ {
+ "epoch": 1.4063955487480855,
+ "grad_norm": 0.5576539635658264,
+ "learning_rate": 0.00010968108707031792,
+ "loss": 0.8334,
+ "step": 1406
+ },
+ {
+ "epoch": 1.4073958300772091,
+ "grad_norm": 0.5292769074440002,
+ "learning_rate": 0.00010957674957330042,
+ "loss": 1.0312,
+ "step": 1407
+ },
+ {
+ "epoch": 1.408396111406333,
+ "grad_norm": 0.5971432328224182,
+ "learning_rate": 0.00010947240155316707,
+ "loss": 0.9367,
+ "step": 1408
+ },
+ {
+ "epoch": 1.409396392735457,
+ "grad_norm": 0.5620018839836121,
+ "learning_rate": 0.00010936804312457749,
+ "loss": 0.9493,
+ "step": 1409
+ },
+ {
+ "epoch": 1.4103966740645806,
+ "grad_norm": 0.456496000289917,
+ "learning_rate": 0.00010926367440220276,
+ "loss": 0.8532,
+ "step": 1410
+ },
+ {
+ "epoch": 1.4113969553937045,
+ "grad_norm": 0.47393882274627686,
+ "learning_rate": 0.00010915929550072517,
+ "loss": 0.8073,
+ "step": 1411
+ },
+ {
+ "epoch": 1.4123972367228284,
+ "grad_norm": 0.5321446061134338,
+ "learning_rate": 0.00010905490653483827,
+ "loss": 1.1076,
+ "step": 1412
+ },
+ {
+ "epoch": 1.4133975180519522,
+ "grad_norm": 0.4768468141555786,
+ "learning_rate": 0.00010895050761924668,
+ "loss": 0.9466,
+ "step": 1413
+ },
+ {
+ "epoch": 1.414397799381076,
+ "grad_norm": 0.5629300475120544,
+ "learning_rate": 0.00010884609886866588,
+ "loss": 1.0541,
+ "step": 1414
+ },
+ {
+ "epoch": 1.4153980807101998,
+ "grad_norm": 0.45907631516456604,
+ "learning_rate": 0.00010874168039782227,
+ "loss": 0.9156,
+ "step": 1415
+ },
+ {
+ "epoch": 1.4163983620393235,
+ "grad_norm": 0.5152727961540222,
+ "learning_rate": 0.00010863725232145286,
+ "loss": 1.0495,
+ "step": 1416
+ },
+ {
+ "epoch": 1.4173986433684473,
+ "grad_norm": 0.511647641658783,
+ "learning_rate": 0.00010853281475430517,
+ "loss": 0.7327,
+ "step": 1417
+ },
+ {
+ "epoch": 1.4183989246975712,
+ "grad_norm": 0.6430179476737976,
+ "learning_rate": 0.0001084283678111372,
+ "loss": 0.9831,
+ "step": 1418
+ },
+ {
+ "epoch": 1.4193992060266951,
+ "grad_norm": 0.5592547059059143,
+ "learning_rate": 0.00010832391160671729,
+ "loss": 0.9462,
+ "step": 1419
+ },
+ {
+ "epoch": 1.4203994873558188,
+ "grad_norm": 0.5079266428947449,
+ "learning_rate": 0.00010821944625582392,
+ "loss": 1.0473,
+ "step": 1420
+ },
+ {
+ "epoch": 1.4213997686849427,
+ "grad_norm": 0.5006073713302612,
+ "learning_rate": 0.00010811497187324555,
+ "loss": 0.8077,
+ "step": 1421
+ },
+ {
+ "epoch": 1.4224000500140663,
+ "grad_norm": 0.47260841727256775,
+ "learning_rate": 0.00010801048857378071,
+ "loss": 0.8069,
+ "step": 1422
+ },
+ {
+ "epoch": 1.4234003313431902,
+ "grad_norm": 0.5051037669181824,
+ "learning_rate": 0.00010790599647223763,
+ "loss": 1.0241,
+ "step": 1423
+ },
+ {
+ "epoch": 1.424400612672314,
+ "grad_norm": 0.5116690397262573,
+ "learning_rate": 0.0001078014956834342,
+ "loss": 1.0377,
+ "step": 1424
+ },
+ {
+ "epoch": 1.425400894001438,
+ "grad_norm": 0.48974907398223877,
+ "learning_rate": 0.00010769698632219794,
+ "loss": 1.0578,
+ "step": 1425
+ },
+ {
+ "epoch": 1.4264011753305617,
+ "grad_norm": 0.5071999430656433,
+ "learning_rate": 0.00010759246850336572,
+ "loss": 0.9072,
+ "step": 1426
+ },
+ {
+ "epoch": 1.4274014566596855,
+ "grad_norm": 0.6418463587760925,
+ "learning_rate": 0.0001074879423417837,
+ "loss": 1.1195,
+ "step": 1427
+ },
+ {
+ "epoch": 1.4284017379888094,
+ "grad_norm": 0.4854032099246979,
+ "learning_rate": 0.00010738340795230721,
+ "loss": 1.0776,
+ "step": 1428
+ },
+ {
+ "epoch": 1.429402019317933,
+ "grad_norm": 0.5330777764320374,
+ "learning_rate": 0.00010727886544980068,
+ "loss": 1.0851,
+ "step": 1429
+ },
+ {
+ "epoch": 1.430402300647057,
+ "grad_norm": 0.5281643271446228,
+ "learning_rate": 0.00010717431494913741,
+ "loss": 0.8663,
+ "step": 1430
+ },
+ {
+ "epoch": 1.4314025819761809,
+ "grad_norm": 0.47898662090301514,
+ "learning_rate": 0.00010706975656519946,
+ "loss": 0.9926,
+ "step": 1431
+ },
+ {
+ "epoch": 1.4324028633053048,
+ "grad_norm": 0.43927934765815735,
+ "learning_rate": 0.00010696519041287765,
+ "loss": 0.8698,
+ "step": 1432
+ },
+ {
+ "epoch": 1.4334031446344284,
+ "grad_norm": 0.5207253694534302,
+ "learning_rate": 0.0001068606166070712,
+ "loss": 0.9795,
+ "step": 1433
+ },
+ {
+ "epoch": 1.4344034259635523,
+ "grad_norm": 0.5264511704444885,
+ "learning_rate": 0.00010675603526268785,
+ "loss": 0.9593,
+ "step": 1434
+ },
+ {
+ "epoch": 1.435403707292676,
+ "grad_norm": 0.5435792803764343,
+ "learning_rate": 0.00010665144649464356,
+ "loss": 0.9436,
+ "step": 1435
+ },
+ {
+ "epoch": 1.4364039886217999,
+ "grad_norm": 0.5383104681968689,
+ "learning_rate": 0.00010654685041786249,
+ "loss": 0.9569,
+ "step": 1436
+ },
+ {
+ "epoch": 1.4374042699509237,
+ "grad_norm": 0.48762592673301697,
+ "learning_rate": 0.00010644224714727681,
+ "loss": 0.9235,
+ "step": 1437
+ },
+ {
+ "epoch": 1.4384045512800476,
+ "grad_norm": 0.4815019965171814,
+ "learning_rate": 0.0001063376367978266,
+ "loss": 0.8241,
+ "step": 1438
+ },
+ {
+ "epoch": 1.4394048326091713,
+ "grad_norm": 0.4944337010383606,
+ "learning_rate": 0.00010623301948445971,
+ "loss": 0.9169,
+ "step": 1439
+ },
+ {
+ "epoch": 1.4404051139382952,
+ "grad_norm": 0.5658552646636963,
+ "learning_rate": 0.00010612839532213164,
+ "loss": 1.044,
+ "step": 1440
+ },
+ {
+ "epoch": 1.4414053952674188,
+ "grad_norm": 0.5688045620918274,
+ "learning_rate": 0.00010602376442580544,
+ "loss": 0.9684,
+ "step": 1441
+ },
+ {
+ "epoch": 1.4424056765965427,
+ "grad_norm": 0.5434709787368774,
+ "learning_rate": 0.00010591912691045152,
+ "loss": 0.8741,
+ "step": 1442
+ },
+ {
+ "epoch": 1.4434059579256666,
+ "grad_norm": 0.583562433719635,
+ "learning_rate": 0.00010581448289104758,
+ "loss": 1.1651,
+ "step": 1443
+ },
+ {
+ "epoch": 1.4444062392547905,
+ "grad_norm": 0.566363513469696,
+ "learning_rate": 0.00010570983248257853,
+ "loss": 1.0091,
+ "step": 1444
+ },
+ {
+ "epoch": 1.4454065205839142,
+ "grad_norm": 0.527039647102356,
+ "learning_rate": 0.00010560517580003617,
+ "loss": 1.0666,
+ "step": 1445
+ },
+ {
+ "epoch": 1.446406801913038,
+ "grad_norm": 0.46389803290367126,
+ "learning_rate": 0.00010550051295841931,
+ "loss": 0.9344,
+ "step": 1446
+ },
+ {
+ "epoch": 1.447407083242162,
+ "grad_norm": 0.6291074752807617,
+ "learning_rate": 0.00010539584407273349,
+ "loss": 1.0388,
+ "step": 1447
+ },
+ {
+ "epoch": 1.4484073645712856,
+ "grad_norm": 0.5249356031417847,
+ "learning_rate": 0.00010529116925799085,
+ "loss": 0.97,
+ "step": 1448
+ },
+ {
+ "epoch": 1.4494076459004095,
+ "grad_norm": 0.4662008583545685,
+ "learning_rate": 0.00010518648862921012,
+ "loss": 0.8385,
+ "step": 1449
+ },
+ {
+ "epoch": 1.4504079272295334,
+ "grad_norm": 0.5730600953102112,
+ "learning_rate": 0.00010508180230141635,
+ "loss": 0.8747,
+ "step": 1450
+ },
+ {
+ "epoch": 1.451408208558657,
+ "grad_norm": 0.48082512617111206,
+ "learning_rate": 0.00010497711038964086,
+ "loss": 0.8624,
+ "step": 1451
+ },
+ {
+ "epoch": 1.452408489887781,
+ "grad_norm": 0.48900333046913147,
+ "learning_rate": 0.0001048724130089212,
+ "loss": 0.7826,
+ "step": 1452
+ },
+ {
+ "epoch": 1.4534087712169048,
+ "grad_norm": 0.4998112618923187,
+ "learning_rate": 0.00010476771027430086,
+ "loss": 0.8687,
+ "step": 1453
+ },
+ {
+ "epoch": 1.4544090525460285,
+ "grad_norm": 0.4872112572193146,
+ "learning_rate": 0.00010466300230082911,
+ "loss": 0.9185,
+ "step": 1454
+ },
+ {
+ "epoch": 1.4554093338751524,
+ "grad_norm": 0.5405575633049011,
+ "learning_rate": 0.00010455828920356115,
+ "loss": 0.9601,
+ "step": 1455
+ },
+ {
+ "epoch": 1.4564096152042763,
+ "grad_norm": 0.4496804475784302,
+ "learning_rate": 0.00010445357109755771,
+ "loss": 0.8606,
+ "step": 1456
+ },
+ {
+ "epoch": 1.4574098965334001,
+ "grad_norm": 0.49340635538101196,
+ "learning_rate": 0.00010434884809788508,
+ "loss": 1.1009,
+ "step": 1457
+ },
+ {
+ "epoch": 1.4584101778625238,
+ "grad_norm": 0.4692990481853485,
+ "learning_rate": 0.00010424412031961484,
+ "loss": 0.8011,
+ "step": 1458
+ },
+ {
+ "epoch": 1.4594104591916477,
+ "grad_norm": 0.5027800798416138,
+ "learning_rate": 0.00010413938787782394,
+ "loss": 0.8827,
+ "step": 1459
+ },
+ {
+ "epoch": 1.4604107405207714,
+ "grad_norm": 0.6764587163925171,
+ "learning_rate": 0.00010403465088759437,
+ "loss": 0.8513,
+ "step": 1460
+ },
+ {
+ "epoch": 1.4614110218498952,
+ "grad_norm": 0.558620035648346,
+ "learning_rate": 0.00010392990946401313,
+ "loss": 0.9881,
+ "step": 1461
+ },
+ {
+ "epoch": 1.4624113031790191,
+ "grad_norm": 0.603817343711853,
+ "learning_rate": 0.00010382516372217215,
+ "loss": 0.9869,
+ "step": 1462
+ },
+ {
+ "epoch": 1.463411584508143,
+ "grad_norm": 0.4486953020095825,
+ "learning_rate": 0.000103720413777168,
+ "loss": 0.8933,
+ "step": 1463
+ },
+ {
+ "epoch": 1.4644118658372667,
+ "grad_norm": 0.5756564736366272,
+ "learning_rate": 0.00010361565974410192,
+ "loss": 0.9974,
+ "step": 1464
+ },
+ {
+ "epoch": 1.4654121471663906,
+ "grad_norm": 0.4386444389820099,
+ "learning_rate": 0.00010351090173807969,
+ "loss": 0.8577,
+ "step": 1465
+ },
+ {
+ "epoch": 1.4664124284955142,
+ "grad_norm": 0.5308933258056641,
+ "learning_rate": 0.00010340613987421137,
+ "loss": 1.0539,
+ "step": 1466
+ },
+ {
+ "epoch": 1.4674127098246381,
+ "grad_norm": 0.6070798635482788,
+ "learning_rate": 0.00010330137426761135,
+ "loss": 0.9111,
+ "step": 1467
+ },
+ {
+ "epoch": 1.468412991153762,
+ "grad_norm": 0.5870214700698853,
+ "learning_rate": 0.00010319660503339808,
+ "loss": 0.9958,
+ "step": 1468
+ },
+ {
+ "epoch": 1.469413272482886,
+ "grad_norm": 0.5014438629150391,
+ "learning_rate": 0.00010309183228669397,
+ "loss": 0.987,
+ "step": 1469
+ },
+ {
+ "epoch": 1.4704135538120096,
+ "grad_norm": 0.47051525115966797,
+ "learning_rate": 0.00010298705614262532,
+ "loss": 1.0899,
+ "step": 1470
+ },
+ {
+ "epoch": 1.4714138351411334,
+ "grad_norm": 0.5500984787940979,
+ "learning_rate": 0.0001028822767163222,
+ "loss": 0.8882,
+ "step": 1471
+ },
+ {
+ "epoch": 1.4724141164702573,
+ "grad_norm": 0.4973205626010895,
+ "learning_rate": 0.00010277749412291824,
+ "loss": 0.9374,
+ "step": 1472
+ },
+ {
+ "epoch": 1.473414397799381,
+ "grad_norm": 0.4927331209182739,
+ "learning_rate": 0.00010267270847755048,
+ "loss": 0.9608,
+ "step": 1473
+ },
+ {
+ "epoch": 1.4744146791285049,
+ "grad_norm": 0.5539640188217163,
+ "learning_rate": 0.00010256791989535952,
+ "loss": 0.9339,
+ "step": 1474
+ },
+ {
+ "epoch": 1.4754149604576288,
+ "grad_norm": 0.48375800251960754,
+ "learning_rate": 0.00010246312849148899,
+ "loss": 0.8778,
+ "step": 1475
+ },
+ {
+ "epoch": 1.4764152417867527,
+ "grad_norm": 0.522544264793396,
+ "learning_rate": 0.00010235833438108571,
+ "loss": 0.9633,
+ "step": 1476
+ },
+ {
+ "epoch": 1.4774155231158763,
+ "grad_norm": 0.5747688412666321,
+ "learning_rate": 0.00010225353767929944,
+ "loss": 1.0206,
+ "step": 1477
+ },
+ {
+ "epoch": 1.4784158044450002,
+ "grad_norm": 0.4539598226547241,
+ "learning_rate": 0.00010214873850128282,
+ "loss": 0.7895,
+ "step": 1478
+ },
+ {
+ "epoch": 1.4794160857741239,
+ "grad_norm": 0.4290696978569031,
+ "learning_rate": 0.00010204393696219117,
+ "loss": 0.8718,
+ "step": 1479
+ },
+ {
+ "epoch": 1.4804163671032478,
+ "grad_norm": 0.43560928106307983,
+ "learning_rate": 0.00010193913317718244,
+ "loss": 0.8839,
+ "step": 1480
+ },
+ {
+ "epoch": 1.4814166484323716,
+ "grad_norm": 0.4937680661678314,
+ "learning_rate": 0.00010183432726141706,
+ "loss": 0.9615,
+ "step": 1481
+ },
+ {
+ "epoch": 1.4824169297614955,
+ "grad_norm": 0.5631589889526367,
+ "learning_rate": 0.00010172951933005775,
+ "loss": 1.0691,
+ "step": 1482
+ },
+ {
+ "epoch": 1.4834172110906192,
+ "grad_norm": 0.5049973726272583,
+ "learning_rate": 0.00010162470949826948,
+ "loss": 0.9107,
+ "step": 1483
+ },
+ {
+ "epoch": 1.484417492419743,
+ "grad_norm": 0.5362145304679871,
+ "learning_rate": 0.0001015198978812193,
+ "loss": 0.9762,
+ "step": 1484
+ },
+ {
+ "epoch": 1.4854177737488667,
+ "grad_norm": 0.4824192225933075,
+ "learning_rate": 0.00010141508459407623,
+ "loss": 0.8844,
+ "step": 1485
+ },
+ {
+ "epoch": 1.4864180550779906,
+ "grad_norm": 0.5116665959358215,
+ "learning_rate": 0.0001013102697520111,
+ "loss": 0.9461,
+ "step": 1486
+ },
+ {
+ "epoch": 1.4874183364071145,
+ "grad_norm": 0.5244630575180054,
+ "learning_rate": 0.00010120545347019647,
+ "loss": 1.0286,
+ "step": 1487
+ },
+ {
+ "epoch": 1.4884186177362384,
+ "grad_norm": 0.5252584218978882,
+ "learning_rate": 0.00010110063586380646,
+ "loss": 1.1083,
+ "step": 1488
+ },
+ {
+ "epoch": 1.489418899065362,
+ "grad_norm": 0.4909230172634125,
+ "learning_rate": 0.00010099581704801673,
+ "loss": 0.9338,
+ "step": 1489
+ },
+ {
+ "epoch": 1.490419180394486,
+ "grad_norm": 0.5618056654930115,
+ "learning_rate": 0.00010089099713800414,
+ "loss": 1.0513,
+ "step": 1490
+ },
+ {
+ "epoch": 1.4914194617236098,
+ "grad_norm": 0.48737892508506775,
+ "learning_rate": 0.00010078617624894684,
+ "loss": 0.8669,
+ "step": 1491
+ },
+ {
+ "epoch": 1.4924197430527335,
+ "grad_norm": 0.411451131105423,
+ "learning_rate": 0.000100681354496024,
+ "loss": 0.881,
+ "step": 1492
+ },
+ {
+ "epoch": 1.4934200243818574,
+ "grad_norm": 0.5821709632873535,
+ "learning_rate": 0.00010057653199441581,
+ "loss": 0.9359,
+ "step": 1493
+ },
+ {
+ "epoch": 1.4944203057109813,
+ "grad_norm": 0.4621860086917877,
+ "learning_rate": 0.00010047170885930324,
+ "loss": 0.8121,
+ "step": 1494
+ },
+ {
+ "epoch": 1.4954205870401052,
+ "grad_norm": 0.4658668339252472,
+ "learning_rate": 0.00010036688520586788,
+ "loss": 0.9806,
+ "step": 1495
+ },
+ {
+ "epoch": 1.4964208683692288,
+ "grad_norm": 0.49816030263900757,
+ "learning_rate": 0.00010026206114929209,
+ "loss": 0.9124,
+ "step": 1496
+ },
+ {
+ "epoch": 1.4974211496983527,
+ "grad_norm": 0.5228123068809509,
+ "learning_rate": 0.00010015723680475846,
+ "loss": 1.0132,
+ "step": 1497
+ },
+ {
+ "epoch": 1.4984214310274764,
+ "grad_norm": 0.4727514982223511,
+ "learning_rate": 0.00010005241228745004,
+ "loss": 0.8418,
+ "step": 1498
+ },
+ {
+ "epoch": 1.4994217123566003,
+ "grad_norm": 0.528904914855957,
+ "learning_rate": 9.994758771254997e-05,
+ "loss": 0.9702,
+ "step": 1499
+ },
+ {
+ "epoch": 1.5004219936857242,
+ "grad_norm": 0.5090524554252625,
+ "learning_rate": 9.984276319524154e-05,
+ "loss": 0.9927,
+ "step": 1500
+ },
+ {
+ "epoch": 1.501422275014848,
+ "grad_norm": 0.4553126096725464,
+ "learning_rate": 9.973793885070792e-05,
+ "loss": 0.9075,
+ "step": 1501
+ },
+ {
+ "epoch": 1.5024225563439717,
+ "grad_norm": 0.4887089133262634,
+ "learning_rate": 9.963311479413211e-05,
+ "loss": 0.9999,
+ "step": 1502
+ },
+ {
+ "epoch": 1.5034228376730956,
+ "grad_norm": 0.48520341515541077,
+ "learning_rate": 9.95282911406968e-05,
+ "loss": 1.0182,
+ "step": 1503
+ },
+ {
+ "epoch": 1.5044231190022193,
+ "grad_norm": 0.5554280877113342,
+ "learning_rate": 9.942346800558421e-05,
+ "loss": 0.9456,
+ "step": 1504
+ },
+ {
+ "epoch": 1.5054234003313431,
+ "grad_norm": 0.5199026465415955,
+ "learning_rate": 9.931864550397601e-05,
+ "loss": 1.0141,
+ "step": 1505
+ },
+ {
+ "epoch": 1.506423681660467,
+ "grad_norm": 0.5191763043403625,
+ "learning_rate": 9.921382375105318e-05,
+ "loss": 0.937,
+ "step": 1506
+ },
+ {
+ "epoch": 1.507423962989591,
+ "grad_norm": 0.5416325330734253,
+ "learning_rate": 9.910900286199587e-05,
+ "loss": 1.07,
+ "step": 1507
+ },
+ {
+ "epoch": 1.5084242443187148,
+ "grad_norm": 0.5193303227424622,
+ "learning_rate": 9.900418295198328e-05,
+ "loss": 0.9386,
+ "step": 1508
+ },
+ {
+ "epoch": 1.5094245256478385,
+ "grad_norm": 0.5433129072189331,
+ "learning_rate": 9.889936413619356e-05,
+ "loss": 0.8967,
+ "step": 1509
+ },
+ {
+ "epoch": 1.5104248069769621,
+ "grad_norm": 0.526980459690094,
+ "learning_rate": 9.879454652980358e-05,
+ "loss": 1.1135,
+ "step": 1510
+ },
+ {
+ "epoch": 1.511425088306086,
+ "grad_norm": 0.4468344449996948,
+ "learning_rate": 9.868973024798895e-05,
+ "loss": 0.9408,
+ "step": 1511
+ },
+ {
+ "epoch": 1.51242536963521,
+ "grad_norm": 0.5974569320678711,
+ "learning_rate": 9.858491540592382e-05,
+ "loss": 0.9747,
+ "step": 1512
+ },
+ {
+ "epoch": 1.5134256509643338,
+ "grad_norm": 0.5186171531677246,
+ "learning_rate": 9.848010211878074e-05,
+ "loss": 1.1012,
+ "step": 1513
+ },
+ {
+ "epoch": 1.5144259322934577,
+ "grad_norm": 0.5307335257530212,
+ "learning_rate": 9.837529050173052e-05,
+ "loss": 0.9548,
+ "step": 1514
+ },
+ {
+ "epoch": 1.5154262136225813,
+ "grad_norm": 0.469865083694458,
+ "learning_rate": 9.827048066994225e-05,
+ "loss": 0.8556,
+ "step": 1515
+ },
+ {
+ "epoch": 1.516426494951705,
+ "grad_norm": 0.4164840877056122,
+ "learning_rate": 9.816567273858296e-05,
+ "loss": 0.7429,
+ "step": 1516
+ },
+ {
+ "epoch": 1.517426776280829,
+ "grad_norm": 0.5811400413513184,
+ "learning_rate": 9.806086682281758e-05,
+ "loss": 1.066,
+ "step": 1517
+ },
+ {
+ "epoch": 1.5184270576099528,
+ "grad_norm": 0.4634648263454437,
+ "learning_rate": 9.795606303780885e-05,
+ "loss": 1.0048,
+ "step": 1518
+ },
+ {
+ "epoch": 1.5194273389390767,
+ "grad_norm": 0.45642492175102234,
+ "learning_rate": 9.785126149871722e-05,
+ "loss": 0.8776,
+ "step": 1519
+ },
+ {
+ "epoch": 1.5204276202682006,
+ "grad_norm": 0.5217366218566895,
+ "learning_rate": 9.77464623207006e-05,
+ "loss": 0.9806,
+ "step": 1520
+ },
+ {
+ "epoch": 1.5214279015973242,
+ "grad_norm": 0.4867999851703644,
+ "learning_rate": 9.764166561891432e-05,
+ "loss": 0.9539,
+ "step": 1521
+ },
+ {
+ "epoch": 1.522428182926448,
+ "grad_norm": 0.5579104423522949,
+ "learning_rate": 9.753687150851102e-05,
+ "loss": 1.0812,
+ "step": 1522
+ },
+ {
+ "epoch": 1.5234284642555718,
+ "grad_norm": 0.5152975916862488,
+ "learning_rate": 9.74320801046405e-05,
+ "loss": 0.8958,
+ "step": 1523
+ },
+ {
+ "epoch": 1.5244287455846957,
+ "grad_norm": 0.5229570269584656,
+ "learning_rate": 9.732729152244953e-05,
+ "loss": 1.1053,
+ "step": 1524
+ },
+ {
+ "epoch": 1.5254290269138195,
+ "grad_norm": 0.49501264095306396,
+ "learning_rate": 9.722250587708181e-05,
+ "loss": 0.8045,
+ "step": 1525
+ },
+ {
+ "epoch": 1.5264293082429434,
+ "grad_norm": 0.5376133918762207,
+ "learning_rate": 9.711772328367784e-05,
+ "loss": 1.0366,
+ "step": 1526
+ },
+ {
+ "epoch": 1.527429589572067,
+ "grad_norm": 0.5039237141609192,
+ "learning_rate": 9.70129438573747e-05,
+ "loss": 0.9531,
+ "step": 1527
+ },
+ {
+ "epoch": 1.528429870901191,
+ "grad_norm": 0.483420729637146,
+ "learning_rate": 9.690816771330608e-05,
+ "loss": 0.8635,
+ "step": 1528
+ },
+ {
+ "epoch": 1.5294301522303146,
+ "grad_norm": 0.5216282606124878,
+ "learning_rate": 9.680339496660192e-05,
+ "loss": 0.8885,
+ "step": 1529
+ },
+ {
+ "epoch": 1.5304304335594385,
+ "grad_norm": 0.4887123703956604,
+ "learning_rate": 9.669862573238863e-05,
+ "loss": 1.01,
+ "step": 1530
+ },
+ {
+ "epoch": 1.5314307148885624,
+ "grad_norm": 0.5213040113449097,
+ "learning_rate": 9.659386012578863e-05,
+ "loss": 0.8264,
+ "step": 1531
+ },
+ {
+ "epoch": 1.5324309962176863,
+ "grad_norm": 0.45882460474967957,
+ "learning_rate": 9.648909826192033e-05,
+ "loss": 0.9247,
+ "step": 1532
+ },
+ {
+ "epoch": 1.5334312775468102,
+ "grad_norm": 0.4360674023628235,
+ "learning_rate": 9.63843402558981e-05,
+ "loss": 0.9197,
+ "step": 1533
+ },
+ {
+ "epoch": 1.5344315588759339,
+ "grad_norm": 0.5070340633392334,
+ "learning_rate": 9.627958622283203e-05,
+ "loss": 0.9523,
+ "step": 1534
+ },
+ {
+ "epoch": 1.5354318402050575,
+ "grad_norm": 0.5255693197250366,
+ "learning_rate": 9.617483627782788e-05,
+ "loss": 1.1249,
+ "step": 1535
+ },
+ {
+ "epoch": 1.5364321215341814,
+ "grad_norm": 0.5451697707176208,
+ "learning_rate": 9.607009053598689e-05,
+ "loss": 1.0246,
+ "step": 1536
+ },
+ {
+ "epoch": 1.5374324028633053,
+ "grad_norm": 0.4846939742565155,
+ "learning_rate": 9.596534911240566e-05,
+ "loss": 0.8665,
+ "step": 1537
+ },
+ {
+ "epoch": 1.5384326841924292,
+ "grad_norm": 0.4528220295906067,
+ "learning_rate": 9.58606121221761e-05,
+ "loss": 0.9338,
+ "step": 1538
+ },
+ {
+ "epoch": 1.539432965521553,
+ "grad_norm": 0.4627808630466461,
+ "learning_rate": 9.57558796803852e-05,
+ "loss": 0.8086,
+ "step": 1539
+ },
+ {
+ "epoch": 1.5404332468506767,
+ "grad_norm": 0.47025686502456665,
+ "learning_rate": 9.565115190211497e-05,
+ "loss": 0.8745,
+ "step": 1540
+ },
+ {
+ "epoch": 1.5414335281798006,
+ "grad_norm": 0.5646499395370483,
+ "learning_rate": 9.554642890244233e-05,
+ "loss": 1.0445,
+ "step": 1541
+ },
+ {
+ "epoch": 1.5424338095089243,
+ "grad_norm": 0.48776212334632874,
+ "learning_rate": 9.54417107964389e-05,
+ "loss": 0.9189,
+ "step": 1542
+ },
+ {
+ "epoch": 1.5434340908380482,
+ "grad_norm": 0.4854126274585724,
+ "learning_rate": 9.533699769917092e-05,
+ "loss": 0.9359,
+ "step": 1543
+ },
+ {
+ "epoch": 1.544434372167172,
+ "grad_norm": 0.4896346926689148,
+ "learning_rate": 9.523228972569917e-05,
+ "loss": 0.8201,
+ "step": 1544
+ },
+ {
+ "epoch": 1.545434653496296,
+ "grad_norm": 0.5236535668373108,
+ "learning_rate": 9.512758699107879e-05,
+ "loss": 0.9501,
+ "step": 1545
+ },
+ {
+ "epoch": 1.5464349348254196,
+ "grad_norm": 0.607430636882782,
+ "learning_rate": 9.502288961035912e-05,
+ "loss": 0.8468,
+ "step": 1546
+ },
+ {
+ "epoch": 1.5474352161545435,
+ "grad_norm": 0.46944427490234375,
+ "learning_rate": 9.491819769858366e-05,
+ "loss": 0.8697,
+ "step": 1547
+ },
+ {
+ "epoch": 1.5484354974836672,
+ "grad_norm": 0.44860196113586426,
+ "learning_rate": 9.48135113707899e-05,
+ "loss": 0.9398,
+ "step": 1548
+ },
+ {
+ "epoch": 1.549435778812791,
+ "grad_norm": 0.45095279812812805,
+ "learning_rate": 9.470883074200916e-05,
+ "loss": 0.7818,
+ "step": 1549
+ },
+ {
+ "epoch": 1.550436060141915,
+ "grad_norm": 0.519603967666626,
+ "learning_rate": 9.460415592726653e-05,
+ "loss": 0.8663,
+ "step": 1550
+ },
+ {
+ "epoch": 1.5514363414710388,
+ "grad_norm": 0.4833553731441498,
+ "learning_rate": 9.449948704158071e-05,
+ "loss": 0.958,
+ "step": 1551
+ },
+ {
+ "epoch": 1.5524366228001627,
+ "grad_norm": 0.504408597946167,
+ "learning_rate": 9.439482419996384e-05,
+ "loss": 0.8795,
+ "step": 1552
+ },
+ {
+ "epoch": 1.5534369041292864,
+ "grad_norm": 0.45152923464775085,
+ "learning_rate": 9.42901675174215e-05,
+ "loss": 0.8427,
+ "step": 1553
+ },
+ {
+ "epoch": 1.55443718545841,
+ "grad_norm": 0.48051750659942627,
+ "learning_rate": 9.418551710895243e-05,
+ "loss": 0.8997,
+ "step": 1554
+ },
+ {
+ "epoch": 1.555437466787534,
+ "grad_norm": 0.41671374440193176,
+ "learning_rate": 9.408087308954853e-05,
+ "loss": 0.7823,
+ "step": 1555
+ },
+ {
+ "epoch": 1.5564377481166578,
+ "grad_norm": 0.4859127402305603,
+ "learning_rate": 9.397623557419461e-05,
+ "loss": 0.8865,
+ "step": 1556
+ },
+ {
+ "epoch": 1.5574380294457817,
+ "grad_norm": 0.492712140083313,
+ "learning_rate": 9.38716046778684e-05,
+ "loss": 0.8464,
+ "step": 1557
+ },
+ {
+ "epoch": 1.5584383107749056,
+ "grad_norm": 0.4976697564125061,
+ "learning_rate": 9.37669805155403e-05,
+ "loss": 0.948,
+ "step": 1558
+ },
+ {
+ "epoch": 1.5594385921040292,
+ "grad_norm": 0.5431742668151855,
+ "learning_rate": 9.366236320217339e-05,
+ "loss": 1.1718,
+ "step": 1559
+ },
+ {
+ "epoch": 1.5604388734331531,
+ "grad_norm": 0.49732932448387146,
+ "learning_rate": 9.355775285272318e-05,
+ "loss": 0.939,
+ "step": 1560
+ },
+ {
+ "epoch": 1.5614391547622768,
+ "grad_norm": 0.4857761859893799,
+ "learning_rate": 9.34531495821375e-05,
+ "loss": 0.9269,
+ "step": 1561
+ },
+ {
+ "epoch": 1.5624394360914007,
+ "grad_norm": 0.47211897373199463,
+ "learning_rate": 9.334855350535645e-05,
+ "loss": 1.0069,
+ "step": 1562
+ },
+ {
+ "epoch": 1.5634397174205246,
+ "grad_norm": 0.4433748126029968,
+ "learning_rate": 9.324396473731217e-05,
+ "loss": 0.866,
+ "step": 1563
+ },
+ {
+ "epoch": 1.5644399987496485,
+ "grad_norm": 0.5030574798583984,
+ "learning_rate": 9.313938339292883e-05,
+ "loss": 0.7763,
+ "step": 1564
+ },
+ {
+ "epoch": 1.5654402800787721,
+ "grad_norm": 0.46466779708862305,
+ "learning_rate": 9.303480958712239e-05,
+ "loss": 0.9033,
+ "step": 1565
+ },
+ {
+ "epoch": 1.566440561407896,
+ "grad_norm": 0.39663952589035034,
+ "learning_rate": 9.293024343480055e-05,
+ "loss": 0.7205,
+ "step": 1566
+ },
+ {
+ "epoch": 1.5674408427370197,
+ "grad_norm": 0.5455542206764221,
+ "learning_rate": 9.282568505086261e-05,
+ "loss": 0.8864,
+ "step": 1567
+ },
+ {
+ "epoch": 1.5684411240661436,
+ "grad_norm": 0.5139548778533936,
+ "learning_rate": 9.272113455019935e-05,
+ "loss": 0.9822,
+ "step": 1568
+ },
+ {
+ "epoch": 1.5694414053952674,
+ "grad_norm": 0.46824902296066284,
+ "learning_rate": 9.261659204769284e-05,
+ "loss": 0.8348,
+ "step": 1569
+ },
+ {
+ "epoch": 1.5704416867243913,
+ "grad_norm": 0.5223984122276306,
+ "learning_rate": 9.251205765821636e-05,
+ "loss": 0.9696,
+ "step": 1570
+ },
+ {
+ "epoch": 1.5714419680535152,
+ "grad_norm": 0.6279047727584839,
+ "learning_rate": 9.240753149663433e-05,
+ "loss": 1.009,
+ "step": 1571
+ },
+ {
+ "epoch": 1.5724422493826389,
+ "grad_norm": 0.49068430066108704,
+ "learning_rate": 9.230301367780208e-05,
+ "loss": 0.9984,
+ "step": 1572
+ },
+ {
+ "epoch": 1.5734425307117625,
+ "grad_norm": 0.4828907251358032,
+ "learning_rate": 9.219850431656579e-05,
+ "loss": 0.8535,
+ "step": 1573
+ },
+ {
+ "epoch": 1.5744428120408864,
+ "grad_norm": 0.4925834834575653,
+ "learning_rate": 9.209400352776237e-05,
+ "loss": 0.8849,
+ "step": 1574
+ },
+ {
+ "epoch": 1.5754430933700103,
+ "grad_norm": 0.5048914551734924,
+ "learning_rate": 9.198951142621929e-05,
+ "loss": 0.8767,
+ "step": 1575
+ },
+ {
+ "epoch": 1.5764433746991342,
+ "grad_norm": 0.44887635111808777,
+ "learning_rate": 9.188502812675446e-05,
+ "loss": 0.8687,
+ "step": 1576
+ },
+ {
+ "epoch": 1.577443656028258,
+ "grad_norm": 0.4909934401512146,
+ "learning_rate": 9.178055374417612e-05,
+ "loss": 0.8362,
+ "step": 1577
+ },
+ {
+ "epoch": 1.5784439373573818,
+ "grad_norm": 0.45031628012657166,
+ "learning_rate": 9.167608839328272e-05,
+ "loss": 0.902,
+ "step": 1578
+ },
+ {
+ "epoch": 1.5794442186865054,
+ "grad_norm": 0.5682864189147949,
+ "learning_rate": 9.15716321888628e-05,
+ "loss": 1.0558,
+ "step": 1579
+ },
+ {
+ "epoch": 1.5804445000156293,
+ "grad_norm": 0.4406115412712097,
+ "learning_rate": 9.146718524569487e-05,
+ "loss": 0.8283,
+ "step": 1580
+ },
+ {
+ "epoch": 1.5814447813447532,
+ "grad_norm": 0.4749000072479248,
+ "learning_rate": 9.136274767854716e-05,
+ "loss": 0.9342,
+ "step": 1581
+ },
+ {
+ "epoch": 1.582445062673877,
+ "grad_norm": 0.4785940945148468,
+ "learning_rate": 9.125831960217774e-05,
+ "loss": 0.9208,
+ "step": 1582
+ },
+ {
+ "epoch": 1.583445344003001,
+ "grad_norm": 0.572299599647522,
+ "learning_rate": 9.115390113133414e-05,
+ "loss": 0.8469,
+ "step": 1583
+ },
+ {
+ "epoch": 1.5844456253321246,
+ "grad_norm": 0.4829537570476532,
+ "learning_rate": 9.104949238075336e-05,
+ "loss": 0.9471,
+ "step": 1584
+ },
+ {
+ "epoch": 1.5854459066612485,
+ "grad_norm": 0.5315890908241272,
+ "learning_rate": 9.094509346516178e-05,
+ "loss": 0.9663,
+ "step": 1585
+ },
+ {
+ "epoch": 1.5864461879903722,
+ "grad_norm": 0.4654553532600403,
+ "learning_rate": 9.084070449927488e-05,
+ "loss": 0.7776,
+ "step": 1586
+ },
+ {
+ "epoch": 1.587446469319496,
+ "grad_norm": 0.5083040595054626,
+ "learning_rate": 9.07363255977973e-05,
+ "loss": 0.8438,
+ "step": 1587
+ },
+ {
+ "epoch": 1.58844675064862,
+ "grad_norm": 0.502129077911377,
+ "learning_rate": 9.063195687542249e-05,
+ "loss": 0.8481,
+ "step": 1588
+ },
+ {
+ "epoch": 1.5894470319777438,
+ "grad_norm": 0.517439067363739,
+ "learning_rate": 9.052759844683295e-05,
+ "loss": 0.9054,
+ "step": 1589
+ },
+ {
+ "epoch": 1.5904473133068675,
+ "grad_norm": 0.4777907431125641,
+ "learning_rate": 9.042325042669961e-05,
+ "loss": 0.9888,
+ "step": 1590
+ },
+ {
+ "epoch": 1.5914475946359914,
+ "grad_norm": 0.41228219866752625,
+ "learning_rate": 9.03189129296821e-05,
+ "loss": 0.5767,
+ "step": 1591
+ },
+ {
+ "epoch": 1.592447875965115,
+ "grad_norm": 0.45188775658607483,
+ "learning_rate": 9.021458607042845e-05,
+ "loss": 0.875,
+ "step": 1592
+ },
+ {
+ "epoch": 1.593448157294239,
+ "grad_norm": 0.46999362111091614,
+ "learning_rate": 9.011026996357503e-05,
+ "loss": 0.8739,
+ "step": 1593
+ },
+ {
+ "epoch": 1.5944484386233628,
+ "grad_norm": 0.5621476173400879,
+ "learning_rate": 9.000596472374637e-05,
+ "loss": 0.8978,
+ "step": 1594
+ },
+ {
+ "epoch": 1.5954487199524867,
+ "grad_norm": 0.4524415135383606,
+ "learning_rate": 8.990167046555504e-05,
+ "loss": 0.7987,
+ "step": 1595
+ },
+ {
+ "epoch": 1.5964490012816106,
+ "grad_norm": 0.42351627349853516,
+ "learning_rate": 8.97973873036016e-05,
+ "loss": 0.8705,
+ "step": 1596
+ },
+ {
+ "epoch": 1.5974492826107343,
+ "grad_norm": 0.45115014910697937,
+ "learning_rate": 8.969311535247438e-05,
+ "loss": 0.9235,
+ "step": 1597
+ },
+ {
+ "epoch": 1.598449563939858,
+ "grad_norm": 0.5297085642814636,
+ "learning_rate": 8.958885472674939e-05,
+ "loss": 0.9363,
+ "step": 1598
+ },
+ {
+ "epoch": 1.5994498452689818,
+ "grad_norm": 0.5296758413314819,
+ "learning_rate": 8.948460554099018e-05,
+ "loss": 0.9461,
+ "step": 1599
+ },
+ {
+ "epoch": 1.6004501265981057,
+ "grad_norm": 0.4951537251472473,
+ "learning_rate": 8.93803679097478e-05,
+ "loss": 0.9494,
+ "step": 1600
+ },
+ {
+ "epoch": 1.6014504079272296,
+ "grad_norm": 0.5380229949951172,
+ "learning_rate": 8.927614194756052e-05,
+ "loss": 0.8813,
+ "step": 1601
+ },
+ {
+ "epoch": 1.6024506892563535,
+ "grad_norm": 0.487196683883667,
+ "learning_rate": 8.917192776895382e-05,
+ "loss": 0.8183,
+ "step": 1602
+ },
+ {
+ "epoch": 1.6034509705854771,
+ "grad_norm": 0.450591504573822,
+ "learning_rate": 8.906772548844026e-05,
+ "loss": 0.9506,
+ "step": 1603
+ },
+ {
+ "epoch": 1.604451251914601,
+ "grad_norm": 0.5414707064628601,
+ "learning_rate": 8.896353522051928e-05,
+ "loss": 1.2171,
+ "step": 1604
+ },
+ {
+ "epoch": 1.6054515332437247,
+ "grad_norm": 0.5198320746421814,
+ "learning_rate": 8.885935707967716e-05,
+ "loss": 0.8762,
+ "step": 1605
+ },
+ {
+ "epoch": 1.6064518145728486,
+ "grad_norm": 0.4546220302581787,
+ "learning_rate": 8.875519118038684e-05,
+ "loss": 0.9634,
+ "step": 1606
+ },
+ {
+ "epoch": 1.6074520959019725,
+ "grad_norm": 0.5151107907295227,
+ "learning_rate": 8.865103763710777e-05,
+ "loss": 1.1038,
+ "step": 1607
+ },
+ {
+ "epoch": 1.6084523772310964,
+ "grad_norm": 0.46089720726013184,
+ "learning_rate": 8.854689656428591e-05,
+ "loss": 0.8706,
+ "step": 1608
+ },
+ {
+ "epoch": 1.60945265856022,
+ "grad_norm": 0.4554317593574524,
+ "learning_rate": 8.844276807635343e-05,
+ "loss": 0.7553,
+ "step": 1609
+ },
+ {
+ "epoch": 1.610452939889344,
+ "grad_norm": 0.5166018009185791,
+ "learning_rate": 8.833865228772871e-05,
+ "loss": 0.8954,
+ "step": 1610
+ },
+ {
+ "epoch": 1.6114532212184676,
+ "grad_norm": 0.45595693588256836,
+ "learning_rate": 8.823454931281616e-05,
+ "loss": 0.9015,
+ "step": 1611
+ },
+ {
+ "epoch": 1.6124535025475915,
+ "grad_norm": 0.4563496708869934,
+ "learning_rate": 8.813045926600615e-05,
+ "loss": 0.9071,
+ "step": 1612
+ },
+ {
+ "epoch": 1.6134537838767153,
+ "grad_norm": 0.44123467803001404,
+ "learning_rate": 8.802638226167479e-05,
+ "loss": 0.8316,
+ "step": 1613
+ },
+ {
+ "epoch": 1.6144540652058392,
+ "grad_norm": 0.5304034352302551,
+ "learning_rate": 8.792231841418391e-05,
+ "loss": 0.9965,
+ "step": 1614
+ },
+ {
+ "epoch": 1.6154543465349631,
+ "grad_norm": 0.5578649044036865,
+ "learning_rate": 8.781826783788084e-05,
+ "loss": 0.9171,
+ "step": 1615
+ },
+ {
+ "epoch": 1.6164546278640868,
+ "grad_norm": 0.5331206917762756,
+ "learning_rate": 8.771423064709837e-05,
+ "loss": 0.8648,
+ "step": 1616
+ },
+ {
+ "epoch": 1.6174549091932104,
+ "grad_norm": 0.5196745991706848,
+ "learning_rate": 8.76102069561545e-05,
+ "loss": 0.9136,
+ "step": 1617
+ },
+ {
+ "epoch": 1.6184551905223343,
+ "grad_norm": 0.5278195142745972,
+ "learning_rate": 8.750619687935251e-05,
+ "loss": 0.9105,
+ "step": 1618
+ },
+ {
+ "epoch": 1.6194554718514582,
+ "grad_norm": 0.4967080056667328,
+ "learning_rate": 8.740220053098067e-05,
+ "loss": 0.8975,
+ "step": 1619
+ },
+ {
+ "epoch": 1.620455753180582,
+ "grad_norm": 0.5626882910728455,
+ "learning_rate": 8.729821802531212e-05,
+ "loss": 1.0178,
+ "step": 1620
+ },
+ {
+ "epoch": 1.621456034509706,
+ "grad_norm": 0.4372572898864746,
+ "learning_rate": 8.719424947660487e-05,
+ "loss": 0.8344,
+ "step": 1621
+ },
+ {
+ "epoch": 1.6224563158388297,
+ "grad_norm": 0.5572327971458435,
+ "learning_rate": 8.70902949991015e-05,
+ "loss": 0.9831,
+ "step": 1622
+ },
+ {
+ "epoch": 1.6234565971679535,
+ "grad_norm": 0.43764790892601013,
+ "learning_rate": 8.698635470702923e-05,
+ "loss": 0.8901,
+ "step": 1623
+ },
+ {
+ "epoch": 1.6244568784970772,
+ "grad_norm": 0.5335058569908142,
+ "learning_rate": 8.688242871459963e-05,
+ "loss": 0.8063,
+ "step": 1624
+ },
+ {
+ "epoch": 1.625457159826201,
+ "grad_norm": 0.5070383548736572,
+ "learning_rate": 8.677851713600855e-05,
+ "loss": 1.1381,
+ "step": 1625
+ },
+ {
+ "epoch": 1.626457441155325,
+ "grad_norm": 0.5117019414901733,
+ "learning_rate": 8.667462008543603e-05,
+ "loss": 1.1598,
+ "step": 1626
+ },
+ {
+ "epoch": 1.6274577224844489,
+ "grad_norm": 0.4911440908908844,
+ "learning_rate": 8.657073767704615e-05,
+ "loss": 0.9673,
+ "step": 1627
+ },
+ {
+ "epoch": 1.6284580038135725,
+ "grad_norm": 0.4799586832523346,
+ "learning_rate": 8.646687002498692e-05,
+ "loss": 0.8415,
+ "step": 1628
+ },
+ {
+ "epoch": 1.6294582851426964,
+ "grad_norm": 0.5615330934524536,
+ "learning_rate": 8.636301724339004e-05,
+ "loss": 0.9751,
+ "step": 1629
+ },
+ {
+ "epoch": 1.63045856647182,
+ "grad_norm": 0.45118963718414307,
+ "learning_rate": 8.625917944637096e-05,
+ "loss": 0.9169,
+ "step": 1630
+ },
+ {
+ "epoch": 1.631458847800944,
+ "grad_norm": 0.49533525109291077,
+ "learning_rate": 8.615535674802865e-05,
+ "loss": 0.9739,
+ "step": 1631
+ },
+ {
+ "epoch": 1.6324591291300679,
+ "grad_norm": 0.5451453328132629,
+ "learning_rate": 8.605154926244543e-05,
+ "loss": 0.777,
+ "step": 1632
+ },
+ {
+ "epoch": 1.6334594104591917,
+ "grad_norm": 0.6013240814208984,
+ "learning_rate": 8.594775710368704e-05,
+ "loss": 0.9289,
+ "step": 1633
+ },
+ {
+ "epoch": 1.6344596917883156,
+ "grad_norm": 0.5311821699142456,
+ "learning_rate": 8.584398038580226e-05,
+ "loss": 0.9737,
+ "step": 1634
+ },
+ {
+ "epoch": 1.6354599731174393,
+ "grad_norm": 0.4836428165435791,
+ "learning_rate": 8.574021922282292e-05,
+ "loss": 0.9495,
+ "step": 1635
+ },
+ {
+ "epoch": 1.636460254446563,
+ "grad_norm": 0.5316966772079468,
+ "learning_rate": 8.563647372876378e-05,
+ "loss": 0.8871,
+ "step": 1636
+ },
+ {
+ "epoch": 1.6374605357756868,
+ "grad_norm": 0.4969998896121979,
+ "learning_rate": 8.553274401762237e-05,
+ "loss": 0.8881,
+ "step": 1637
+ },
+ {
+ "epoch": 1.6384608171048107,
+ "grad_norm": 0.48786112666130066,
+ "learning_rate": 8.542903020337887e-05,
+ "loss": 0.8859,
+ "step": 1638
+ },
+ {
+ "epoch": 1.6394610984339346,
+ "grad_norm": 0.4753643572330475,
+ "learning_rate": 8.532533239999602e-05,
+ "loss": 0.759,
+ "step": 1639
+ },
+ {
+ "epoch": 1.6404613797630585,
+ "grad_norm": 0.4672154486179352,
+ "learning_rate": 8.522165072141897e-05,
+ "loss": 0.8429,
+ "step": 1640
+ },
+ {
+ "epoch": 1.6414616610921822,
+ "grad_norm": 0.47218796610832214,
+ "learning_rate": 8.511798528157512e-05,
+ "loss": 0.7702,
+ "step": 1641
+ },
+ {
+ "epoch": 1.6424619424213058,
+ "grad_norm": 0.4409984052181244,
+ "learning_rate": 8.501433619437403e-05,
+ "loss": 0.7803,
+ "step": 1642
+ },
+ {
+ "epoch": 1.6434622237504297,
+ "grad_norm": 0.539503812789917,
+ "learning_rate": 8.49107035737073e-05,
+ "loss": 0.9739,
+ "step": 1643
+ },
+ {
+ "epoch": 1.6444625050795536,
+ "grad_norm": 0.5032373666763306,
+ "learning_rate": 8.480708753344846e-05,
+ "loss": 1.0876,
+ "step": 1644
+ },
+ {
+ "epoch": 1.6454627864086775,
+ "grad_norm": 0.4480466842651367,
+ "learning_rate": 8.470348818745278e-05,
+ "loss": 0.9183,
+ "step": 1645
+ },
+ {
+ "epoch": 1.6464630677378014,
+ "grad_norm": 0.49911466240882874,
+ "learning_rate": 8.459990564955721e-05,
+ "loss": 0.8048,
+ "step": 1646
+ },
+ {
+ "epoch": 1.647463349066925,
+ "grad_norm": 0.48236754536628723,
+ "learning_rate": 8.449634003358022e-05,
+ "loss": 0.9785,
+ "step": 1647
+ },
+ {
+ "epoch": 1.648463630396049,
+ "grad_norm": 0.5161852240562439,
+ "learning_rate": 8.43927914533217e-05,
+ "loss": 0.9626,
+ "step": 1648
+ },
+ {
+ "epoch": 1.6494639117251726,
+ "grad_norm": 0.5653015971183777,
+ "learning_rate": 8.428926002256283e-05,
+ "loss": 1.0785,
+ "step": 1649
+ },
+ {
+ "epoch": 1.6504641930542965,
+ "grad_norm": 0.5340739488601685,
+ "learning_rate": 8.418574585506591e-05,
+ "loss": 1.0613,
+ "step": 1650
+ },
+ {
+ "epoch": 1.6514644743834204,
+ "grad_norm": 0.4651111960411072,
+ "learning_rate": 8.408224906457429e-05,
+ "loss": 0.8313,
+ "step": 1651
+ },
+ {
+ "epoch": 1.6524647557125443,
+ "grad_norm": 0.5264735221862793,
+ "learning_rate": 8.397876976481224e-05,
+ "loss": 0.8187,
+ "step": 1652
+ },
+ {
+ "epoch": 1.653465037041668,
+ "grad_norm": 0.4576081335544586,
+ "learning_rate": 8.387530806948476e-05,
+ "loss": 0.8758,
+ "step": 1653
+ },
+ {
+ "epoch": 1.6544653183707918,
+ "grad_norm": 0.4851805567741394,
+ "learning_rate": 8.37718640922776e-05,
+ "loss": 0.877,
+ "step": 1654
+ },
+ {
+ "epoch": 1.6554655996999155,
+ "grad_norm": 0.48545941710472107,
+ "learning_rate": 8.366843794685695e-05,
+ "loss": 0.8988,
+ "step": 1655
+ },
+ {
+ "epoch": 1.6564658810290394,
+ "grad_norm": 0.5381633639335632,
+ "learning_rate": 8.356502974686941e-05,
+ "loss": 0.8958,
+ "step": 1656
+ },
+ {
+ "epoch": 1.6574661623581632,
+ "grad_norm": 0.5239037275314331,
+ "learning_rate": 8.346163960594193e-05,
+ "loss": 0.9698,
+ "step": 1657
+ },
+ {
+ "epoch": 1.6584664436872871,
+ "grad_norm": 0.5378285050392151,
+ "learning_rate": 8.335826763768156e-05,
+ "loss": 0.8765,
+ "step": 1658
+ },
+ {
+ "epoch": 1.659466725016411,
+ "grad_norm": 0.45296210050582886,
+ "learning_rate": 8.325491395567541e-05,
+ "loss": 0.8048,
+ "step": 1659
+ },
+ {
+ "epoch": 1.6604670063455347,
+ "grad_norm": 0.4575178325176239,
+ "learning_rate": 8.315157867349046e-05,
+ "loss": 0.8388,
+ "step": 1660
+ },
+ {
+ "epoch": 1.6614672876746583,
+ "grad_norm": 0.4762253165245056,
+ "learning_rate": 8.30482619046735e-05,
+ "loss": 0.9123,
+ "step": 1661
+ },
+ {
+ "epoch": 1.6624675690037822,
+ "grad_norm": 0.46717318892478943,
+ "learning_rate": 8.294496376275104e-05,
+ "loss": 0.9213,
+ "step": 1662
+ },
+ {
+ "epoch": 1.6634678503329061,
+ "grad_norm": 0.4792725741863251,
+ "learning_rate": 8.284168436122898e-05,
+ "loss": 0.793,
+ "step": 1663
+ },
+ {
+ "epoch": 1.66446813166203,
+ "grad_norm": 0.4854644238948822,
+ "learning_rate": 8.273842381359273e-05,
+ "loss": 0.9657,
+ "step": 1664
+ },
+ {
+ "epoch": 1.665468412991154,
+ "grad_norm": 0.44722744822502136,
+ "learning_rate": 8.263518223330697e-05,
+ "loss": 0.8159,
+ "step": 1665
+ },
+ {
+ "epoch": 1.6664686943202776,
+ "grad_norm": 0.5070934891700745,
+ "learning_rate": 8.253195973381552e-05,
+ "loss": 0.8971,
+ "step": 1666
+ },
+ {
+ "epoch": 1.6674689756494014,
+ "grad_norm": 0.4743734300136566,
+ "learning_rate": 8.242875642854121e-05,
+ "loss": 0.8042,
+ "step": 1667
+ },
+ {
+ "epoch": 1.668469256978525,
+ "grad_norm": 0.5857224464416504,
+ "learning_rate": 8.232557243088585e-05,
+ "loss": 1.0666,
+ "step": 1668
+ },
+ {
+ "epoch": 1.669469538307649,
+ "grad_norm": 0.5257895588874817,
+ "learning_rate": 8.222240785422996e-05,
+ "loss": 0.9619,
+ "step": 1669
+ },
+ {
+ "epoch": 1.6704698196367729,
+ "grad_norm": 0.5153073668479919,
+ "learning_rate": 8.211926281193277e-05,
+ "loss": 0.9189,
+ "step": 1670
+ },
+ {
+ "epoch": 1.6714701009658968,
+ "grad_norm": 0.49723324179649353,
+ "learning_rate": 8.201613741733203e-05,
+ "loss": 1.037,
+ "step": 1671
+ },
+ {
+ "epoch": 1.6724703822950204,
+ "grad_norm": 0.5014336705207825,
+ "learning_rate": 8.191303178374389e-05,
+ "loss": 0.8598,
+ "step": 1672
+ },
+ {
+ "epoch": 1.6734706636241443,
+ "grad_norm": 0.5031597018241882,
+ "learning_rate": 8.180994602446279e-05,
+ "loss": 0.9622,
+ "step": 1673
+ },
+ {
+ "epoch": 1.674470944953268,
+ "grad_norm": 0.4872223436832428,
+ "learning_rate": 8.170688025276134e-05,
+ "loss": 0.7971,
+ "step": 1674
+ },
+ {
+ "epoch": 1.6754712262823919,
+ "grad_norm": 0.5090667605400085,
+ "learning_rate": 8.160383458189022e-05,
+ "loss": 0.9825,
+ "step": 1675
+ },
+ {
+ "epoch": 1.6764715076115158,
+ "grad_norm": 0.49642691016197205,
+ "learning_rate": 8.15008091250779e-05,
+ "loss": 0.9541,
+ "step": 1676
+ },
+ {
+ "epoch": 1.6774717889406396,
+ "grad_norm": 0.7710174322128296,
+ "learning_rate": 8.13978039955308e-05,
+ "loss": 0.9036,
+ "step": 1677
+ },
+ {
+ "epoch": 1.6784720702697635,
+ "grad_norm": 0.551180362701416,
+ "learning_rate": 8.12948193064329e-05,
+ "loss": 0.931,
+ "step": 1678
+ },
+ {
+ "epoch": 1.6794723515988872,
+ "grad_norm": 0.540558934211731,
+ "learning_rate": 8.119185517094578e-05,
+ "loss": 0.8364,
+ "step": 1679
+ },
+ {
+ "epoch": 1.6804726329280109,
+ "grad_norm": 0.47380101680755615,
+ "learning_rate": 8.108891170220836e-05,
+ "loss": 0.8494,
+ "step": 1680
+ },
+ {
+ "epoch": 1.6814729142571347,
+ "grad_norm": 0.4427139461040497,
+ "learning_rate": 8.098598901333692e-05,
+ "loss": 0.8441,
+ "step": 1681
+ },
+ {
+ "epoch": 1.6824731955862586,
+ "grad_norm": 0.5092798471450806,
+ "learning_rate": 8.088308721742491e-05,
+ "loss": 0.9069,
+ "step": 1682
+ },
+ {
+ "epoch": 1.6834734769153825,
+ "grad_norm": 0.4453091621398926,
+ "learning_rate": 8.078020642754274e-05,
+ "loss": 0.8539,
+ "step": 1683
+ },
+ {
+ "epoch": 1.6844737582445064,
+ "grad_norm": 0.5102719068527222,
+ "learning_rate": 8.06773467567378e-05,
+ "loss": 0.808,
+ "step": 1684
+ },
+ {
+ "epoch": 1.68547403957363,
+ "grad_norm": 0.44998160004615784,
+ "learning_rate": 8.057450831803428e-05,
+ "loss": 0.9399,
+ "step": 1685
+ },
+ {
+ "epoch": 1.686474320902754,
+ "grad_norm": 0.47718214988708496,
+ "learning_rate": 8.047169122443302e-05,
+ "loss": 0.8851,
+ "step": 1686
+ },
+ {
+ "epoch": 1.6874746022318776,
+ "grad_norm": 0.5858275890350342,
+ "learning_rate": 8.036889558891142e-05,
+ "loss": 1.0813,
+ "step": 1687
+ },
+ {
+ "epoch": 1.6884748835610015,
+ "grad_norm": 0.6066718101501465,
+ "learning_rate": 8.026612152442329e-05,
+ "loss": 0.985,
+ "step": 1688
+ },
+ {
+ "epoch": 1.6894751648901254,
+ "grad_norm": 0.529468834400177,
+ "learning_rate": 8.016336914389874e-05,
+ "loss": 1.0599,
+ "step": 1689
+ },
+ {
+ "epoch": 1.6904754462192493,
+ "grad_norm": 0.5604698061943054,
+ "learning_rate": 8.006063856024405e-05,
+ "loss": 0.8511,
+ "step": 1690
+ },
+ {
+ "epoch": 1.691475727548373,
+ "grad_norm": 0.5078622102737427,
+ "learning_rate": 7.995792988634152e-05,
+ "loss": 0.8286,
+ "step": 1691
+ },
+ {
+ "epoch": 1.6924760088774968,
+ "grad_norm": 0.5138706564903259,
+ "learning_rate": 7.985524323504948e-05,
+ "loss": 0.9054,
+ "step": 1692
+ },
+ {
+ "epoch": 1.6934762902066205,
+ "grad_norm": 0.42073604464530945,
+ "learning_rate": 7.975257871920195e-05,
+ "loss": 0.8403,
+ "step": 1693
+ },
+ {
+ "epoch": 1.6944765715357444,
+ "grad_norm": 0.5249999761581421,
+ "learning_rate": 7.964993645160866e-05,
+ "loss": 0.8382,
+ "step": 1694
+ },
+ {
+ "epoch": 1.6954768528648683,
+ "grad_norm": 0.4233437478542328,
+ "learning_rate": 7.954731654505491e-05,
+ "loss": 0.7757,
+ "step": 1695
+ },
+ {
+ "epoch": 1.6964771341939922,
+ "grad_norm": 0.5192474722862244,
+ "learning_rate": 7.944471911230142e-05,
+ "loss": 0.9689,
+ "step": 1696
+ },
+ {
+ "epoch": 1.697477415523116,
+ "grad_norm": 0.5599137544631958,
+ "learning_rate": 7.93421442660842e-05,
+ "loss": 1.1277,
+ "step": 1697
+ },
+ {
+ "epoch": 1.6984776968522397,
+ "grad_norm": 0.4425784647464752,
+ "learning_rate": 7.923959211911449e-05,
+ "loss": 0.8822,
+ "step": 1698
+ },
+ {
+ "epoch": 1.6994779781813634,
+ "grad_norm": 0.48276057839393616,
+ "learning_rate": 7.91370627840785e-05,
+ "loss": 1.0073,
+ "step": 1699
+ },
+ {
+ "epoch": 1.7004782595104873,
+ "grad_norm": 0.5134496688842773,
+ "learning_rate": 7.903455637363746e-05,
+ "loss": 0.8437,
+ "step": 1700
+ },
+ {
+ "epoch": 1.7014785408396111,
+ "grad_norm": 0.49254342913627625,
+ "learning_rate": 7.89320730004274e-05,
+ "loss": 0.9512,
+ "step": 1701
+ },
+ {
+ "epoch": 1.702478822168735,
+ "grad_norm": 0.4442595839500427,
+ "learning_rate": 7.882961277705895e-05,
+ "loss": 0.8391,
+ "step": 1702
+ },
+ {
+ "epoch": 1.703479103497859,
+ "grad_norm": 0.5177878141403198,
+ "learning_rate": 7.872717581611741e-05,
+ "loss": 0.9012,
+ "step": 1703
+ },
+ {
+ "epoch": 1.7044793848269826,
+ "grad_norm": 0.4612918496131897,
+ "learning_rate": 7.862476223016246e-05,
+ "loss": 0.86,
+ "step": 1704
+ },
+ {
+ "epoch": 1.7054796661561062,
+ "grad_norm": 0.47172513604164124,
+ "learning_rate": 7.852237213172812e-05,
+ "loss": 0.8821,
+ "step": 1705
+ },
+ {
+ "epoch": 1.7064799474852301,
+ "grad_norm": 0.5113676190376282,
+ "learning_rate": 7.842000563332254e-05,
+ "loss": 0.8243,
+ "step": 1706
+ },
+ {
+ "epoch": 1.707480228814354,
+ "grad_norm": 0.5000366568565369,
+ "learning_rate": 7.831766284742807e-05,
+ "loss": 0.9887,
+ "step": 1707
+ },
+ {
+ "epoch": 1.708480510143478,
+ "grad_norm": 0.5838572978973389,
+ "learning_rate": 7.82153438865009e-05,
+ "loss": 0.9401,
+ "step": 1708
+ },
+ {
+ "epoch": 1.7094807914726018,
+ "grad_norm": 0.5229962468147278,
+ "learning_rate": 7.811304886297104e-05,
+ "loss": 1.0353,
+ "step": 1709
+ },
+ {
+ "epoch": 1.7104810728017255,
+ "grad_norm": 0.45854273438453674,
+ "learning_rate": 7.801077788924224e-05,
+ "loss": 0.8868,
+ "step": 1710
+ },
+ {
+ "epoch": 1.7114813541308493,
+ "grad_norm": 0.5133983492851257,
+ "learning_rate": 7.790853107769179e-05,
+ "loss": 0.9689,
+ "step": 1711
+ },
+ {
+ "epoch": 1.712481635459973,
+ "grad_norm": 0.5269356369972229,
+ "learning_rate": 7.780630854067045e-05,
+ "loss": 0.8751,
+ "step": 1712
+ },
+ {
+ "epoch": 1.713481916789097,
+ "grad_norm": 0.523595929145813,
+ "learning_rate": 7.77041103905023e-05,
+ "loss": 0.9806,
+ "step": 1713
+ },
+ {
+ "epoch": 1.7144821981182208,
+ "grad_norm": 0.6217412352561951,
+ "learning_rate": 7.760193673948461e-05,
+ "loss": 0.8298,
+ "step": 1714
+ },
+ {
+ "epoch": 1.7154824794473447,
+ "grad_norm": 0.47979483008384705,
+ "learning_rate": 7.749978769988778e-05,
+ "loss": 0.8578,
+ "step": 1715
+ },
+ {
+ "epoch": 1.7164827607764683,
+ "grad_norm": 0.4971829652786255,
+ "learning_rate": 7.739766338395511e-05,
+ "loss": 0.9794,
+ "step": 1716
+ },
+ {
+ "epoch": 1.7174830421055922,
+ "grad_norm": 0.5164886116981506,
+ "learning_rate": 7.729556390390275e-05,
+ "loss": 0.9267,
+ "step": 1717
+ },
+ {
+ "epoch": 1.7184833234347159,
+ "grad_norm": 0.5067420601844788,
+ "learning_rate": 7.719348937191957e-05,
+ "loss": 0.951,
+ "step": 1718
+ },
+ {
+ "epoch": 1.7194836047638398,
+ "grad_norm": 0.5390254259109497,
+ "learning_rate": 7.709143990016702e-05,
+ "loss": 0.8409,
+ "step": 1719
+ },
+ {
+ "epoch": 1.7204838860929637,
+ "grad_norm": 0.4631121754646301,
+ "learning_rate": 7.698941560077899e-05,
+ "loss": 0.704,
+ "step": 1720
+ },
+ {
+ "epoch": 1.7214841674220875,
+ "grad_norm": 0.5231932997703552,
+ "learning_rate": 7.688741658586178e-05,
+ "loss": 1.0912,
+ "step": 1721
+ },
+ {
+ "epoch": 1.7224844487512114,
+ "grad_norm": 0.4563293755054474,
+ "learning_rate": 7.678544296749384e-05,
+ "loss": 0.8444,
+ "step": 1722
+ },
+ {
+ "epoch": 1.723484730080335,
+ "grad_norm": 0.4844750463962555,
+ "learning_rate": 7.668349485772572e-05,
+ "loss": 0.9234,
+ "step": 1723
+ },
+ {
+ "epoch": 1.7244850114094588,
+ "grad_norm": 0.45698872208595276,
+ "learning_rate": 7.658157236857999e-05,
+ "loss": 0.8608,
+ "step": 1724
+ },
+ {
+ "epoch": 1.7254852927385826,
+ "grad_norm": 0.46694663166999817,
+ "learning_rate": 7.6479675612051e-05,
+ "loss": 0.9628,
+ "step": 1725
+ },
+ {
+ "epoch": 1.7264855740677065,
+ "grad_norm": 0.46077099442481995,
+ "learning_rate": 7.637780470010487e-05,
+ "loss": 0.8173,
+ "step": 1726
+ },
+ {
+ "epoch": 1.7274858553968304,
+ "grad_norm": 0.5198522210121155,
+ "learning_rate": 7.62759597446793e-05,
+ "loss": 0.8813,
+ "step": 1727
+ },
+ {
+ "epoch": 1.7284861367259543,
+ "grad_norm": 0.48385483026504517,
+ "learning_rate": 7.617414085768351e-05,
+ "loss": 0.7007,
+ "step": 1728
+ },
+ {
+ "epoch": 1.729486418055078,
+ "grad_norm": 0.5622795224189758,
+ "learning_rate": 7.607234815099802e-05,
+ "loss": 1.0422,
+ "step": 1729
+ },
+ {
+ "epoch": 1.7304866993842019,
+ "grad_norm": 0.5077874660491943,
+ "learning_rate": 7.597058173647458e-05,
+ "loss": 1.014,
+ "step": 1730
+ },
+ {
+ "epoch": 1.7314869807133255,
+ "grad_norm": 0.598760724067688,
+ "learning_rate": 7.586884172593609e-05,
+ "loss": 0.8979,
+ "step": 1731
+ },
+ {
+ "epoch": 1.7324872620424494,
+ "grad_norm": 0.6116266846656799,
+ "learning_rate": 7.576712823117645e-05,
+ "loss": 0.9121,
+ "step": 1732
+ },
+ {
+ "epoch": 1.7334875433715733,
+ "grad_norm": 0.6157407164573669,
+ "learning_rate": 7.566544136396037e-05,
+ "loss": 0.9361,
+ "step": 1733
+ },
+ {
+ "epoch": 1.7344878247006972,
+ "grad_norm": 0.5174565315246582,
+ "learning_rate": 7.556378123602334e-05,
+ "loss": 1.1858,
+ "step": 1734
+ },
+ {
+ "epoch": 1.7354881060298208,
+ "grad_norm": 0.42541515827178955,
+ "learning_rate": 7.54621479590714e-05,
+ "loss": 0.7425,
+ "step": 1735
+ },
+ {
+ "epoch": 1.7364883873589447,
+ "grad_norm": 0.49402132630348206,
+ "learning_rate": 7.536054164478123e-05,
+ "loss": 0.8158,
+ "step": 1736
+ },
+ {
+ "epoch": 1.7374886686880684,
+ "grad_norm": 0.4637628197669983,
+ "learning_rate": 7.525896240479976e-05,
+ "loss": 0.7859,
+ "step": 1737
+ },
+ {
+ "epoch": 1.7384889500171923,
+ "grad_norm": 0.5475689172744751,
+ "learning_rate": 7.51574103507442e-05,
+ "loss": 0.825,
+ "step": 1738
+ },
+ {
+ "epoch": 1.7394892313463162,
+ "grad_norm": 0.5652226209640503,
+ "learning_rate": 7.505588559420189e-05,
+ "loss": 0.9051,
+ "step": 1739
+ },
+ {
+ "epoch": 1.74048951267544,
+ "grad_norm": 0.4930717647075653,
+ "learning_rate": 7.495438824673016e-05,
+ "loss": 0.7797,
+ "step": 1740
+ },
+ {
+ "epoch": 1.741489794004564,
+ "grad_norm": 0.4611824154853821,
+ "learning_rate": 7.485291841985626e-05,
+ "loss": 1.014,
+ "step": 1741
+ },
+ {
+ "epoch": 1.7424900753336876,
+ "grad_norm": 0.4652807414531708,
+ "learning_rate": 7.475147622507717e-05,
+ "loss": 0.7601,
+ "step": 1742
+ },
+ {
+ "epoch": 1.7434903566628113,
+ "grad_norm": 0.5227355360984802,
+ "learning_rate": 7.465006177385953e-05,
+ "loss": 0.8616,
+ "step": 1743
+ },
+ {
+ "epoch": 1.7444906379919352,
+ "grad_norm": 0.42283377051353455,
+ "learning_rate": 7.454867517763948e-05,
+ "loss": 0.8647,
+ "step": 1744
+ },
+ {
+ "epoch": 1.745490919321059,
+ "grad_norm": 0.45151621103286743,
+ "learning_rate": 7.444731654782253e-05,
+ "loss": 0.8619,
+ "step": 1745
+ },
+ {
+ "epoch": 1.746491200650183,
+ "grad_norm": 0.6146779656410217,
+ "learning_rate": 7.434598599578351e-05,
+ "loss": 0.9479,
+ "step": 1746
+ },
+ {
+ "epoch": 1.7474914819793068,
+ "grad_norm": 0.4988139271736145,
+ "learning_rate": 7.424468363286634e-05,
+ "loss": 0.9136,
+ "step": 1747
+ },
+ {
+ "epoch": 1.7484917633084305,
+ "grad_norm": 0.5271700024604797,
+ "learning_rate": 7.414340957038406e-05,
+ "loss": 1.0416,
+ "step": 1748
+ },
+ {
+ "epoch": 1.7494920446375544,
+ "grad_norm": 0.46806615591049194,
+ "learning_rate": 7.404216391961847e-05,
+ "loss": 0.8376,
+ "step": 1749
+ },
+ {
+ "epoch": 1.750492325966678,
+ "grad_norm": 0.4781439006328583,
+ "learning_rate": 7.394094679182024e-05,
+ "loss": 0.9669,
+ "step": 1750
+ },
+ {
+ "epoch": 1.751492607295802,
+ "grad_norm": 0.49085667729377747,
+ "learning_rate": 7.383975829820874e-05,
+ "loss": 0.9279,
+ "step": 1751
+ },
+ {
+ "epoch": 1.7524928886249258,
+ "grad_norm": 0.4937964379787445,
+ "learning_rate": 7.37385985499718e-05,
+ "loss": 1.1126,
+ "step": 1752
+ },
+ {
+ "epoch": 1.7534931699540497,
+ "grad_norm": 0.3883766233921051,
+ "learning_rate": 7.36374676582657e-05,
+ "loss": 0.7398,
+ "step": 1753
+ },
+ {
+ "epoch": 1.7544934512831734,
+ "grad_norm": 0.4864053726196289,
+ "learning_rate": 7.353636573421496e-05,
+ "loss": 0.8172,
+ "step": 1754
+ },
+ {
+ "epoch": 1.7554937326122972,
+ "grad_norm": 0.48342639207839966,
+ "learning_rate": 7.343529288891239e-05,
+ "loss": 0.8957,
+ "step": 1755
+ },
+ {
+ "epoch": 1.756494013941421,
+ "grad_norm": 0.47928398847579956,
+ "learning_rate": 7.333424923341868e-05,
+ "loss": 0.8414,
+ "step": 1756
+ },
+ {
+ "epoch": 1.7574942952705448,
+ "grad_norm": 0.46736687421798706,
+ "learning_rate": 7.323323487876257e-05,
+ "loss": 0.7661,
+ "step": 1757
+ },
+ {
+ "epoch": 1.7584945765996687,
+ "grad_norm": 0.5184097290039062,
+ "learning_rate": 7.313224993594057e-05,
+ "loss": 0.8719,
+ "step": 1758
+ },
+ {
+ "epoch": 1.7594948579287926,
+ "grad_norm": 0.526541531085968,
+ "learning_rate": 7.303129451591686e-05,
+ "loss": 0.8801,
+ "step": 1759
+ },
+ {
+ "epoch": 1.7604951392579165,
+ "grad_norm": 0.5191768407821655,
+ "learning_rate": 7.29303687296232e-05,
+ "loss": 0.9343,
+ "step": 1760
+ },
+ {
+ "epoch": 1.7614954205870401,
+ "grad_norm": 0.5041552186012268,
+ "learning_rate": 7.282947268795877e-05,
+ "loss": 0.9369,
+ "step": 1761
+ },
+ {
+ "epoch": 1.7624957019161638,
+ "grad_norm": 0.4530990719795227,
+ "learning_rate": 7.272860650179006e-05,
+ "loss": 0.9629,
+ "step": 1762
+ },
+ {
+ "epoch": 1.7634959832452877,
+ "grad_norm": 0.42898643016815186,
+ "learning_rate": 7.262777028195081e-05,
+ "loss": 0.7658,
+ "step": 1763
+ },
+ {
+ "epoch": 1.7644962645744116,
+ "grad_norm": 0.4350574314594269,
+ "learning_rate": 7.252696413924174e-05,
+ "loss": 0.7273,
+ "step": 1764
+ },
+ {
+ "epoch": 1.7654965459035354,
+ "grad_norm": 0.517660915851593,
+ "learning_rate": 7.242618818443056e-05,
+ "loss": 0.9021,
+ "step": 1765
+ },
+ {
+ "epoch": 1.7664968272326593,
+ "grad_norm": 0.5530719757080078,
+ "learning_rate": 7.232544252825189e-05,
+ "loss": 0.8532,
+ "step": 1766
+ },
+ {
+ "epoch": 1.767497108561783,
+ "grad_norm": 0.41731134057044983,
+ "learning_rate": 7.222472728140695e-05,
+ "loss": 0.6834,
+ "step": 1767
+ },
+ {
+ "epoch": 1.7684973898909067,
+ "grad_norm": 0.4782492518424988,
+ "learning_rate": 7.212404255456357e-05,
+ "loss": 0.8692,
+ "step": 1768
+ },
+ {
+ "epoch": 1.7694976712200305,
+ "grad_norm": 0.5327005386352539,
+ "learning_rate": 7.202338845835606e-05,
+ "loss": 0.92,
+ "step": 1769
+ },
+ {
+ "epoch": 1.7704979525491544,
+ "grad_norm": 0.48882028460502625,
+ "learning_rate": 7.192276510338507e-05,
+ "loss": 0.8545,
+ "step": 1770
+ },
+ {
+ "epoch": 1.7714982338782783,
+ "grad_norm": 0.5156509280204773,
+ "learning_rate": 7.182217260021749e-05,
+ "loss": 0.9533,
+ "step": 1771
+ },
+ {
+ "epoch": 1.7724985152074022,
+ "grad_norm": 0.49955782294273376,
+ "learning_rate": 7.172161105938624e-05,
+ "loss": 0.7701,
+ "step": 1772
+ },
+ {
+ "epoch": 1.7734987965365259,
+ "grad_norm": 0.4707096219062805,
+ "learning_rate": 7.162108059139032e-05,
+ "loss": 0.9093,
+ "step": 1773
+ },
+ {
+ "epoch": 1.7744990778656498,
+ "grad_norm": 0.5026343464851379,
+ "learning_rate": 7.15205813066945e-05,
+ "loss": 1.0551,
+ "step": 1774
+ },
+ {
+ "epoch": 1.7754993591947734,
+ "grad_norm": 0.4696865975856781,
+ "learning_rate": 7.142011331572936e-05,
+ "loss": 0.8701,
+ "step": 1775
+ },
+ {
+ "epoch": 1.7764996405238973,
+ "grad_norm": 0.4939334988594055,
+ "learning_rate": 7.131967672889101e-05,
+ "loss": 0.9638,
+ "step": 1776
+ },
+ {
+ "epoch": 1.7774999218530212,
+ "grad_norm": 0.4661426842212677,
+ "learning_rate": 7.121927165654109e-05,
+ "loss": 0.8687,
+ "step": 1777
+ },
+ {
+ "epoch": 1.778500203182145,
+ "grad_norm": 0.48258379101753235,
+ "learning_rate": 7.111889820900664e-05,
+ "loss": 0.9335,
+ "step": 1778
+ },
+ {
+ "epoch": 1.7795004845112687,
+ "grad_norm": 0.4910578727722168,
+ "learning_rate": 7.101855649657991e-05,
+ "loss": 0.9632,
+ "step": 1779
+ },
+ {
+ "epoch": 1.7805007658403926,
+ "grad_norm": 0.46052396297454834,
+ "learning_rate": 7.091824662951827e-05,
+ "loss": 0.7958,
+ "step": 1780
+ },
+ {
+ "epoch": 1.7815010471695163,
+ "grad_norm": 0.4766314625740051,
+ "learning_rate": 7.08179687180442e-05,
+ "loss": 0.7427,
+ "step": 1781
+ },
+ {
+ "epoch": 1.7825013284986402,
+ "grad_norm": 0.4556989371776581,
+ "learning_rate": 7.071772287234497e-05,
+ "loss": 0.7899,
+ "step": 1782
+ },
+ {
+ "epoch": 1.783501609827764,
+ "grad_norm": 0.5186169743537903,
+ "learning_rate": 7.06175092025726e-05,
+ "loss": 0.9758,
+ "step": 1783
+ },
+ {
+ "epoch": 1.784501891156888,
+ "grad_norm": 0.5379285216331482,
+ "learning_rate": 7.051732781884378e-05,
+ "loss": 0.8966,
+ "step": 1784
+ },
+ {
+ "epoch": 1.7855021724860118,
+ "grad_norm": 0.520286500453949,
+ "learning_rate": 7.041717883123977e-05,
+ "loss": 0.9421,
+ "step": 1785
+ },
+ {
+ "epoch": 1.7865024538151355,
+ "grad_norm": 0.5489597916603088,
+ "learning_rate": 7.031706234980617e-05,
+ "loss": 0.936,
+ "step": 1786
+ },
+ {
+ "epoch": 1.7875027351442592,
+ "grad_norm": 0.5182730555534363,
+ "learning_rate": 7.021697848455291e-05,
+ "loss": 0.953,
+ "step": 1787
+ },
+ {
+ "epoch": 1.788503016473383,
+ "grad_norm": 0.5181865692138672,
+ "learning_rate": 7.011692734545403e-05,
+ "loss": 0.8688,
+ "step": 1788
+ },
+ {
+ "epoch": 1.789503297802507,
+ "grad_norm": 0.44486725330352783,
+ "learning_rate": 7.001690904244767e-05,
+ "loss": 0.8014,
+ "step": 1789
+ },
+ {
+ "epoch": 1.7905035791316308,
+ "grad_norm": 0.5337903499603271,
+ "learning_rate": 6.991692368543584e-05,
+ "loss": 0.9003,
+ "step": 1790
+ },
+ {
+ "epoch": 1.7915038604607547,
+ "grad_norm": 0.5147045254707336,
+ "learning_rate": 6.981697138428434e-05,
+ "loss": 0.9162,
+ "step": 1791
+ },
+ {
+ "epoch": 1.7925041417898784,
+ "grad_norm": 0.5204777121543884,
+ "learning_rate": 6.971705224882271e-05,
+ "loss": 0.8938,
+ "step": 1792
+ },
+ {
+ "epoch": 1.7935044231190023,
+ "grad_norm": 0.45608311891555786,
+ "learning_rate": 6.9617166388844e-05,
+ "loss": 0.7545,
+ "step": 1793
+ },
+ {
+ "epoch": 1.794504704448126,
+ "grad_norm": 0.47650712728500366,
+ "learning_rate": 6.951731391410468e-05,
+ "loss": 0.8237,
+ "step": 1794
+ },
+ {
+ "epoch": 1.7955049857772498,
+ "grad_norm": 0.5793735384941101,
+ "learning_rate": 6.94174949343246e-05,
+ "loss": 1.1272,
+ "step": 1795
+ },
+ {
+ "epoch": 1.7965052671063737,
+ "grad_norm": 0.4923813045024872,
+ "learning_rate": 6.931770955918674e-05,
+ "loss": 1.0535,
+ "step": 1796
+ },
+ {
+ "epoch": 1.7975055484354976,
+ "grad_norm": 0.515476405620575,
+ "learning_rate": 6.921795789833723e-05,
+ "loss": 0.986,
+ "step": 1797
+ },
+ {
+ "epoch": 1.7985058297646213,
+ "grad_norm": 0.4697955250740051,
+ "learning_rate": 6.911824006138503e-05,
+ "loss": 0.8236,
+ "step": 1798
+ },
+ {
+ "epoch": 1.7995061110937451,
+ "grad_norm": 0.48255470395088196,
+ "learning_rate": 6.901855615790206e-05,
+ "loss": 0.8308,
+ "step": 1799
+ },
+ {
+ "epoch": 1.8005063924228688,
+ "grad_norm": 0.5010727047920227,
+ "learning_rate": 6.891890629742288e-05,
+ "loss": 0.9338,
+ "step": 1800
+ },
+ {
+ "epoch": 1.8015066737519927,
+ "grad_norm": 0.5230937004089355,
+ "learning_rate": 6.88192905894447e-05,
+ "loss": 0.7253,
+ "step": 1801
+ },
+ {
+ "epoch": 1.8025069550811166,
+ "grad_norm": 0.4493248164653778,
+ "learning_rate": 6.871970914342712e-05,
+ "loss": 0.9527,
+ "step": 1802
+ },
+ {
+ "epoch": 1.8035072364102405,
+ "grad_norm": 0.4727008044719696,
+ "learning_rate": 6.862016206879216e-05,
+ "loss": 0.9527,
+ "step": 1803
+ },
+ {
+ "epoch": 1.8045075177393644,
+ "grad_norm": 0.457698255777359,
+ "learning_rate": 6.852064947492405e-05,
+ "loss": 0.8424,
+ "step": 1804
+ },
+ {
+ "epoch": 1.805507799068488,
+ "grad_norm": 0.6156003475189209,
+ "learning_rate": 6.842117147116913e-05,
+ "loss": 0.9988,
+ "step": 1805
+ },
+ {
+ "epoch": 1.8065080803976117,
+ "grad_norm": 0.5174852609634399,
+ "learning_rate": 6.832172816683575e-05,
+ "loss": 0.8635,
+ "step": 1806
+ },
+ {
+ "epoch": 1.8075083617267356,
+ "grad_norm": 0.5165886878967285,
+ "learning_rate": 6.82223196711941e-05,
+ "loss": 0.8721,
+ "step": 1807
+ },
+ {
+ "epoch": 1.8085086430558595,
+ "grad_norm": 0.4866868853569031,
+ "learning_rate": 6.812294609347615e-05,
+ "loss": 0.8819,
+ "step": 1808
+ },
+ {
+ "epoch": 1.8095089243849833,
+ "grad_norm": 0.4991300404071808,
+ "learning_rate": 6.802360754287547e-05,
+ "loss": 0.8644,
+ "step": 1809
+ },
+ {
+ "epoch": 1.8105092057141072,
+ "grad_norm": 0.501853346824646,
+ "learning_rate": 6.79243041285472e-05,
+ "loss": 0.824,
+ "step": 1810
+ },
+ {
+ "epoch": 1.811509487043231,
+ "grad_norm": 0.5272979140281677,
+ "learning_rate": 6.782503595960782e-05,
+ "loss": 1.0178,
+ "step": 1811
+ },
+ {
+ "epoch": 1.8125097683723548,
+ "grad_norm": 0.5986105799674988,
+ "learning_rate": 6.772580314513508e-05,
+ "loss": 0.949,
+ "step": 1812
+ },
+ {
+ "epoch": 1.8135100497014784,
+ "grad_norm": 0.5391054153442383,
+ "learning_rate": 6.762660579416791e-05,
+ "loss": 1.0698,
+ "step": 1813
+ },
+ {
+ "epoch": 1.8145103310306023,
+ "grad_norm": 0.48486262559890747,
+ "learning_rate": 6.752744401570625e-05,
+ "loss": 0.9986,
+ "step": 1814
+ },
+ {
+ "epoch": 1.8155106123597262,
+ "grad_norm": 0.5090842843055725,
+ "learning_rate": 6.742831791871096e-05,
+ "loss": 0.8459,
+ "step": 1815
+ },
+ {
+ "epoch": 1.81651089368885,
+ "grad_norm": 0.408403605222702,
+ "learning_rate": 6.732922761210369e-05,
+ "loss": 0.7093,
+ "step": 1816
+ },
+ {
+ "epoch": 1.8175111750179738,
+ "grad_norm": 0.5082786083221436,
+ "learning_rate": 6.723017320476679e-05,
+ "loss": 0.8289,
+ "step": 1817
+ },
+ {
+ "epoch": 1.8185114563470977,
+ "grad_norm": 0.4834018647670746,
+ "learning_rate": 6.713115480554313e-05,
+ "loss": 0.9766,
+ "step": 1818
+ },
+ {
+ "epoch": 1.8195117376762213,
+ "grad_norm": 0.5373227596282959,
+ "learning_rate": 6.7032172523236e-05,
+ "loss": 1.0396,
+ "step": 1819
+ },
+ {
+ "epoch": 1.8205120190053452,
+ "grad_norm": 0.49561604857444763,
+ "learning_rate": 6.693322646660906e-05,
+ "loss": 0.9774,
+ "step": 1820
+ },
+ {
+ "epoch": 1.821512300334469,
+ "grad_norm": 0.47309985756874084,
+ "learning_rate": 6.683431674438612e-05,
+ "loss": 0.8266,
+ "step": 1821
+ },
+ {
+ "epoch": 1.822512581663593,
+ "grad_norm": 0.5706244707107544,
+ "learning_rate": 6.673544346525107e-05,
+ "loss": 1.027,
+ "step": 1822
+ },
+ {
+ "epoch": 1.8235128629927169,
+ "grad_norm": 0.5383077263832092,
+ "learning_rate": 6.663660673784777e-05,
+ "loss": 1.0545,
+ "step": 1823
+ },
+ {
+ "epoch": 1.8245131443218405,
+ "grad_norm": 0.5760438442230225,
+ "learning_rate": 6.653780667077985e-05,
+ "loss": 0.8955,
+ "step": 1824
+ },
+ {
+ "epoch": 1.8255134256509642,
+ "grad_norm": 0.45533323287963867,
+ "learning_rate": 6.643904337261082e-05,
+ "loss": 0.9149,
+ "step": 1825
+ },
+ {
+ "epoch": 1.826513706980088,
+ "grad_norm": 0.43935853242874146,
+ "learning_rate": 6.634031695186362e-05,
+ "loss": 0.8231,
+ "step": 1826
+ },
+ {
+ "epoch": 1.827513988309212,
+ "grad_norm": 0.4752298593521118,
+ "learning_rate": 6.624162751702076e-05,
+ "loss": 0.7823,
+ "step": 1827
+ },
+ {
+ "epoch": 1.8285142696383359,
+ "grad_norm": 0.5012879371643066,
+ "learning_rate": 6.614297517652409e-05,
+ "loss": 0.9586,
+ "step": 1828
+ },
+ {
+ "epoch": 1.8295145509674597,
+ "grad_norm": 0.4421415328979492,
+ "learning_rate": 6.604436003877464e-05,
+ "loss": 0.7812,
+ "step": 1829
+ },
+ {
+ "epoch": 1.8305148322965834,
+ "grad_norm": 0.5347071290016174,
+ "learning_rate": 6.594578221213265e-05,
+ "loss": 0.8906,
+ "step": 1830
+ },
+ {
+ "epoch": 1.831515113625707,
+ "grad_norm": 0.5177352428436279,
+ "learning_rate": 6.58472418049173e-05,
+ "loss": 1.0046,
+ "step": 1831
+ },
+ {
+ "epoch": 1.832515394954831,
+ "grad_norm": 0.5403003096580505,
+ "learning_rate": 6.574873892540671e-05,
+ "loss": 0.9588,
+ "step": 1832
+ },
+ {
+ "epoch": 1.8335156762839548,
+ "grad_norm": 0.5138882994651794,
+ "learning_rate": 6.565027368183769e-05,
+ "loss": 0.9824,
+ "step": 1833
+ },
+ {
+ "epoch": 1.8345159576130787,
+ "grad_norm": 0.4976009726524353,
+ "learning_rate": 6.555184618240577e-05,
+ "loss": 0.9454,
+ "step": 1834
+ },
+ {
+ "epoch": 1.8355162389422026,
+ "grad_norm": 0.5282961130142212,
+ "learning_rate": 6.545345653526495e-05,
+ "loss": 1.0134,
+ "step": 1835
+ },
+ {
+ "epoch": 1.8365165202713263,
+ "grad_norm": 0.47592097520828247,
+ "learning_rate": 6.535510484852767e-05,
+ "loss": 0.9658,
+ "step": 1836
+ },
+ {
+ "epoch": 1.8375168016004502,
+ "grad_norm": 0.5012205839157104,
+ "learning_rate": 6.525679123026463e-05,
+ "loss": 0.8937,
+ "step": 1837
+ },
+ {
+ "epoch": 1.8385170829295738,
+ "grad_norm": 0.47777363657951355,
+ "learning_rate": 6.515851578850474e-05,
+ "loss": 0.9862,
+ "step": 1838
+ },
+ {
+ "epoch": 1.8395173642586977,
+ "grad_norm": 0.4610724449157715,
+ "learning_rate": 6.506027863123492e-05,
+ "loss": 0.9208,
+ "step": 1839
+ },
+ {
+ "epoch": 1.8405176455878216,
+ "grad_norm": 0.5747025609016418,
+ "learning_rate": 6.496207986640004e-05,
+ "loss": 0.9366,
+ "step": 1840
+ },
+ {
+ "epoch": 1.8415179269169455,
+ "grad_norm": 0.48486635088920593,
+ "learning_rate": 6.48639196019028e-05,
+ "loss": 0.7989,
+ "step": 1841
+ },
+ {
+ "epoch": 1.8425182082460692,
+ "grad_norm": 0.4930958151817322,
+ "learning_rate": 6.476579794560356e-05,
+ "loss": 0.7846,
+ "step": 1842
+ },
+ {
+ "epoch": 1.843518489575193,
+ "grad_norm": 0.5363168120384216,
+ "learning_rate": 6.46677150053203e-05,
+ "loss": 0.9519,
+ "step": 1843
+ },
+ {
+ "epoch": 1.8445187709043167,
+ "grad_norm": 0.39670878648757935,
+ "learning_rate": 6.45696708888284e-05,
+ "loss": 0.8245,
+ "step": 1844
+ },
+ {
+ "epoch": 1.8455190522334406,
+ "grad_norm": 0.5151652693748474,
+ "learning_rate": 6.447166570386063e-05,
+ "loss": 0.9517,
+ "step": 1845
+ },
+ {
+ "epoch": 1.8465193335625645,
+ "grad_norm": 0.47063514590263367,
+ "learning_rate": 6.437369955810699e-05,
+ "loss": 0.8235,
+ "step": 1846
+ },
+ {
+ "epoch": 1.8475196148916884,
+ "grad_norm": 0.5120642185211182,
+ "learning_rate": 6.42757725592145e-05,
+ "loss": 1.1862,
+ "step": 1847
+ },
+ {
+ "epoch": 1.8485198962208123,
+ "grad_norm": 0.491312175989151,
+ "learning_rate": 6.417788481478728e-05,
+ "loss": 0.8375,
+ "step": 1848
+ },
+ {
+ "epoch": 1.849520177549936,
+ "grad_norm": 0.5049518346786499,
+ "learning_rate": 6.40800364323862e-05,
+ "loss": 0.8419,
+ "step": 1849
+ },
+ {
+ "epoch": 1.8505204588790596,
+ "grad_norm": 0.4442373514175415,
+ "learning_rate": 6.398222751952899e-05,
+ "loss": 0.8519,
+ "step": 1850
+ },
+ {
+ "epoch": 1.8515207402081835,
+ "grad_norm": 0.5219951868057251,
+ "learning_rate": 6.388445818368991e-05,
+ "loss": 0.8146,
+ "step": 1851
+ },
+ {
+ "epoch": 1.8525210215373074,
+ "grad_norm": 0.5035893321037292,
+ "learning_rate": 6.378672853229981e-05,
+ "loss": 0.908,
+ "step": 1852
+ },
+ {
+ "epoch": 1.8535213028664312,
+ "grad_norm": 0.4742502272129059,
+ "learning_rate": 6.368903867274585e-05,
+ "loss": 0.9503,
+ "step": 1853
+ },
+ {
+ "epoch": 1.8545215841955551,
+ "grad_norm": 0.504763126373291,
+ "learning_rate": 6.35913887123716e-05,
+ "loss": 0.8487,
+ "step": 1854
+ },
+ {
+ "epoch": 1.8555218655246788,
+ "grad_norm": 0.5125763416290283,
+ "learning_rate": 6.34937787584767e-05,
+ "loss": 0.9596,
+ "step": 1855
+ },
+ {
+ "epoch": 1.8565221468538027,
+ "grad_norm": 1.090164065361023,
+ "learning_rate": 6.339620891831678e-05,
+ "loss": 0.8088,
+ "step": 1856
+ },
+ {
+ "epoch": 1.8575224281829263,
+ "grad_norm": 0.4670305550098419,
+ "learning_rate": 6.329867929910347e-05,
+ "loss": 0.9748,
+ "step": 1857
+ },
+ {
+ "epoch": 1.8585227095120502,
+ "grad_norm": 0.49796226620674133,
+ "learning_rate": 6.32011900080042e-05,
+ "loss": 0.7566,
+ "step": 1858
+ },
+ {
+ "epoch": 1.8595229908411741,
+ "grad_norm": 0.5040385723114014,
+ "learning_rate": 6.310374115214204e-05,
+ "loss": 0.8959,
+ "step": 1859
+ },
+ {
+ "epoch": 1.860523272170298,
+ "grad_norm": 0.5290741920471191,
+ "learning_rate": 6.30063328385957e-05,
+ "loss": 0.9035,
+ "step": 1860
+ },
+ {
+ "epoch": 1.8615235534994217,
+ "grad_norm": 0.5860772728919983,
+ "learning_rate": 6.290896517439925e-05,
+ "loss": 1.0858,
+ "step": 1861
+ },
+ {
+ "epoch": 1.8625238348285456,
+ "grad_norm": 0.4714392423629761,
+ "learning_rate": 6.281163826654218e-05,
+ "loss": 0.9652,
+ "step": 1862
+ },
+ {
+ "epoch": 1.8635241161576692,
+ "grad_norm": 0.4995323717594147,
+ "learning_rate": 6.271435222196916e-05,
+ "loss": 0.9808,
+ "step": 1863
+ },
+ {
+ "epoch": 1.864524397486793,
+ "grad_norm": 0.5379069447517395,
+ "learning_rate": 6.261710714757994e-05,
+ "loss": 0.9893,
+ "step": 1864
+ },
+ {
+ "epoch": 1.865524678815917,
+ "grad_norm": 0.5350576043128967,
+ "learning_rate": 6.251990315022927e-05,
+ "loss": 1.1355,
+ "step": 1865
+ },
+ {
+ "epoch": 1.8665249601450409,
+ "grad_norm": 0.5047613382339478,
+ "learning_rate": 6.24227403367268e-05,
+ "loss": 1.0258,
+ "step": 1866
+ },
+ {
+ "epoch": 1.8675252414741648,
+ "grad_norm": 0.5068250894546509,
+ "learning_rate": 6.232561881383687e-05,
+ "loss": 1.0832,
+ "step": 1867
+ },
+ {
+ "epoch": 1.8685255228032884,
+ "grad_norm": 0.5315554738044739,
+ "learning_rate": 6.222853868827839e-05,
+ "loss": 0.8883,
+ "step": 1868
+ },
+ {
+ "epoch": 1.869525804132412,
+ "grad_norm": 0.47088900208473206,
+ "learning_rate": 6.213150006672499e-05,
+ "loss": 1.0515,
+ "step": 1869
+ },
+ {
+ "epoch": 1.870526085461536,
+ "grad_norm": 0.450911283493042,
+ "learning_rate": 6.20345030558045e-05,
+ "loss": 0.8933,
+ "step": 1870
+ },
+ {
+ "epoch": 1.8715263667906599,
+ "grad_norm": 0.5612505674362183,
+ "learning_rate": 6.193754776209911e-05,
+ "loss": 0.8822,
+ "step": 1871
+ },
+ {
+ "epoch": 1.8725266481197838,
+ "grad_norm": 0.6027489900588989,
+ "learning_rate": 6.184063429214515e-05,
+ "loss": 0.849,
+ "step": 1872
+ },
+ {
+ "epoch": 1.8735269294489076,
+ "grad_norm": 0.5527409315109253,
+ "learning_rate": 6.174376275243299e-05,
+ "loss": 0.9841,
+ "step": 1873
+ },
+ {
+ "epoch": 1.8745272107780313,
+ "grad_norm": 0.4340353310108185,
+ "learning_rate": 6.164693324940694e-05,
+ "loss": 0.7171,
+ "step": 1874
+ },
+ {
+ "epoch": 1.8755274921071552,
+ "grad_norm": 0.46394628286361694,
+ "learning_rate": 6.15501458894651e-05,
+ "loss": 0.8439,
+ "step": 1875
+ },
+ {
+ "epoch": 1.8765277734362789,
+ "grad_norm": 0.46649280190467834,
+ "learning_rate": 6.145340077895929e-05,
+ "loss": 0.9665,
+ "step": 1876
+ },
+ {
+ "epoch": 1.8775280547654027,
+ "grad_norm": 0.45841577649116516,
+ "learning_rate": 6.135669802419488e-05,
+ "loss": 0.8537,
+ "step": 1877
+ },
+ {
+ "epoch": 1.8785283360945266,
+ "grad_norm": 0.46763482689857483,
+ "learning_rate": 6.126003773143072e-05,
+ "loss": 0.8314,
+ "step": 1878
+ },
+ {
+ "epoch": 1.8795286174236505,
+ "grad_norm": 0.47747811675071716,
+ "learning_rate": 6.116342000687896e-05,
+ "loss": 0.9612,
+ "step": 1879
+ },
+ {
+ "epoch": 1.8805288987527742,
+ "grad_norm": 0.5145304203033447,
+ "learning_rate": 6.106684495670506e-05,
+ "loss": 0.9628,
+ "step": 1880
+ },
+ {
+ "epoch": 1.881529180081898,
+ "grad_norm": 0.4443700313568115,
+ "learning_rate": 6.097031268702746e-05,
+ "loss": 0.7482,
+ "step": 1881
+ },
+ {
+ "epoch": 1.8825294614110217,
+ "grad_norm": 0.4143758714199066,
+ "learning_rate": 6.087382330391774e-05,
+ "loss": 0.6993,
+ "step": 1882
+ },
+ {
+ "epoch": 1.8835297427401456,
+ "grad_norm": 0.5006669759750366,
+ "learning_rate": 6.077737691340023e-05,
+ "loss": 0.8514,
+ "step": 1883
+ },
+ {
+ "epoch": 1.8845300240692695,
+ "grad_norm": 0.48067471385002136,
+ "learning_rate": 6.0680973621452105e-05,
+ "loss": 0.8319,
+ "step": 1884
+ },
+ {
+ "epoch": 1.8855303053983934,
+ "grad_norm": 0.47147560119628906,
+ "learning_rate": 6.0584613534003144e-05,
+ "loss": 0.9822,
+ "step": 1885
+ },
+ {
+ "epoch": 1.8865305867275173,
+ "grad_norm": 0.48229023814201355,
+ "learning_rate": 6.0488296756935636e-05,
+ "loss": 0.8972,
+ "step": 1886
+ },
+ {
+ "epoch": 1.887530868056641,
+ "grad_norm": 0.487932026386261,
+ "learning_rate": 6.039202339608432e-05,
+ "loss": 0.8976,
+ "step": 1887
+ },
+ {
+ "epoch": 1.8885311493857646,
+ "grad_norm": 0.49332642555236816,
+ "learning_rate": 6.0295793557236203e-05,
+ "loss": 0.8734,
+ "step": 1888
+ },
+ {
+ "epoch": 1.8895314307148885,
+ "grad_norm": 0.4834604263305664,
+ "learning_rate": 6.019960734613047e-05,
+ "loss": 0.8414,
+ "step": 1889
+ },
+ {
+ "epoch": 1.8905317120440124,
+ "grad_norm": 0.46540340781211853,
+ "learning_rate": 6.010346486845837e-05,
+ "loss": 0.848,
+ "step": 1890
+ },
+ {
+ "epoch": 1.8915319933731363,
+ "grad_norm": 0.4350258409976959,
+ "learning_rate": 6.0007366229863117e-05,
+ "loss": 0.8143,
+ "step": 1891
+ },
+ {
+ "epoch": 1.8925322747022602,
+ "grad_norm": 0.4675842821598053,
+ "learning_rate": 5.991131153593971e-05,
+ "loss": 0.8136,
+ "step": 1892
+ },
+ {
+ "epoch": 1.8935325560313838,
+ "grad_norm": 0.560526430606842,
+ "learning_rate": 5.981530089223489e-05,
+ "loss": 1.097,
+ "step": 1893
+ },
+ {
+ "epoch": 1.8945328373605075,
+ "grad_norm": 0.48588961362838745,
+ "learning_rate": 5.971933440424703e-05,
+ "loss": 0.8046,
+ "step": 1894
+ },
+ {
+ "epoch": 1.8955331186896314,
+ "grad_norm": 0.47677376866340637,
+ "learning_rate": 5.9623412177425886e-05,
+ "loss": 0.8202,
+ "step": 1895
+ },
+ {
+ "epoch": 1.8965334000187553,
+ "grad_norm": 0.49400967359542847,
+ "learning_rate": 5.952753431717268e-05,
+ "loss": 0.8114,
+ "step": 1896
+ },
+ {
+ "epoch": 1.8975336813478791,
+ "grad_norm": 0.4729720652103424,
+ "learning_rate": 5.9431700928839805e-05,
+ "loss": 0.7848,
+ "step": 1897
+ },
+ {
+ "epoch": 1.898533962677003,
+ "grad_norm": 0.4910169541835785,
+ "learning_rate": 5.933591211773082e-05,
+ "loss": 0.8424,
+ "step": 1898
+ },
+ {
+ "epoch": 1.8995342440061267,
+ "grad_norm": 0.4618901014328003,
+ "learning_rate": 5.924016798910037e-05,
+ "loss": 0.8423,
+ "step": 1899
+ },
+ {
+ "epoch": 1.9005345253352506,
+ "grad_norm": 0.562754213809967,
+ "learning_rate": 5.914446864815388e-05,
+ "loss": 0.8016,
+ "step": 1900
+ },
+ {
+ "epoch": 1.9015348066643742,
+ "grad_norm": 0.479568749666214,
+ "learning_rate": 5.9048814200047675e-05,
+ "loss": 0.9471,
+ "step": 1901
+ },
+ {
+ "epoch": 1.9025350879934981,
+ "grad_norm": 0.5435795187950134,
+ "learning_rate": 5.895320474988864e-05,
+ "loss": 0.94,
+ "step": 1902
+ },
+ {
+ "epoch": 1.903535369322622,
+ "grad_norm": 0.711804211139679,
+ "learning_rate": 5.885764040273426e-05,
+ "loss": 0.9192,
+ "step": 1903
+ },
+ {
+ "epoch": 1.904535650651746,
+ "grad_norm": 0.49941113591194153,
+ "learning_rate": 5.876212126359251e-05,
+ "loss": 0.8541,
+ "step": 1904
+ },
+ {
+ "epoch": 1.9055359319808696,
+ "grad_norm": 0.4437618553638458,
+ "learning_rate": 5.866664743742162e-05,
+ "loss": 0.935,
+ "step": 1905
+ },
+ {
+ "epoch": 1.9065362133099935,
+ "grad_norm": 0.4949079751968384,
+ "learning_rate": 5.857121902913008e-05,
+ "loss": 0.965,
+ "step": 1906
+ },
+ {
+ "epoch": 1.9075364946391171,
+ "grad_norm": 0.5047593712806702,
+ "learning_rate": 5.8475836143576433e-05,
+ "loss": 0.9078,
+ "step": 1907
+ },
+ {
+ "epoch": 1.908536775968241,
+ "grad_norm": 0.4645143449306488,
+ "learning_rate": 5.838049888556925e-05,
+ "loss": 0.7848,
+ "step": 1908
+ },
+ {
+ "epoch": 1.909537057297365,
+ "grad_norm": 0.45980706810951233,
+ "learning_rate": 5.8285207359866936e-05,
+ "loss": 0.8297,
+ "step": 1909
+ },
+ {
+ "epoch": 1.9105373386264888,
+ "grad_norm": 0.566573441028595,
+ "learning_rate": 5.8189961671177574e-05,
+ "loss": 1.0823,
+ "step": 1910
+ },
+ {
+ "epoch": 1.9115376199556127,
+ "grad_norm": 0.46022629737854004,
+ "learning_rate": 5.809476192415905e-05,
+ "loss": 0.8634,
+ "step": 1911
+ },
+ {
+ "epoch": 1.9125379012847363,
+ "grad_norm": 0.493632048368454,
+ "learning_rate": 5.7999608223418534e-05,
+ "loss": 0.7264,
+ "step": 1912
+ },
+ {
+ "epoch": 1.91353818261386,
+ "grad_norm": 0.4561927616596222,
+ "learning_rate": 5.790450067351291e-05,
+ "loss": 0.8736,
+ "step": 1913
+ },
+ {
+ "epoch": 1.9145384639429839,
+ "grad_norm": 0.5217312574386597,
+ "learning_rate": 5.780943937894805e-05,
+ "loss": 0.9918,
+ "step": 1914
+ },
+ {
+ "epoch": 1.9155387452721078,
+ "grad_norm": 0.500164806842804,
+ "learning_rate": 5.771442444417918e-05,
+ "loss": 0.784,
+ "step": 1915
+ },
+ {
+ "epoch": 1.9165390266012317,
+ "grad_norm": 0.4723392724990845,
+ "learning_rate": 5.761945597361054e-05,
+ "loss": 0.9225,
+ "step": 1916
+ },
+ {
+ "epoch": 1.9175393079303555,
+ "grad_norm": 0.4722166359424591,
+ "learning_rate": 5.752453407159522e-05,
+ "loss": 0.8516,
+ "step": 1917
+ },
+ {
+ "epoch": 1.9185395892594792,
+ "grad_norm": 0.4163341522216797,
+ "learning_rate": 5.742965884243532e-05,
+ "loss": 0.7709,
+ "step": 1918
+ },
+ {
+ "epoch": 1.919539870588603,
+ "grad_norm": 0.5236088037490845,
+ "learning_rate": 5.733483039038149e-05,
+ "loss": 0.9662,
+ "step": 1919
+ },
+ {
+ "epoch": 1.9205401519177268,
+ "grad_norm": 0.5264710783958435,
+ "learning_rate": 5.724004881963311e-05,
+ "loss": 0.8918,
+ "step": 1920
+ },
+ {
+ "epoch": 1.9215404332468506,
+ "grad_norm": 0.43993479013442993,
+ "learning_rate": 5.714531423433791e-05,
+ "loss": 0.9233,
+ "step": 1921
+ },
+ {
+ "epoch": 1.9225407145759745,
+ "grad_norm": 0.4552697241306305,
+ "learning_rate": 5.705062673859216e-05,
+ "loss": 0.8593,
+ "step": 1922
+ },
+ {
+ "epoch": 1.9235409959050984,
+ "grad_norm": 0.5186688899993896,
+ "learning_rate": 5.69559864364402e-05,
+ "loss": 0.906,
+ "step": 1923
+ },
+ {
+ "epoch": 1.924541277234222,
+ "grad_norm": 0.5140933990478516,
+ "learning_rate": 5.6861393431874675e-05,
+ "loss": 1.0488,
+ "step": 1924
+ },
+ {
+ "epoch": 1.925541558563346,
+ "grad_norm": 0.4874193072319031,
+ "learning_rate": 5.676684782883615e-05,
+ "loss": 0.8875,
+ "step": 1925
+ },
+ {
+ "epoch": 1.9265418398924696,
+ "grad_norm": 0.5220529437065125,
+ "learning_rate": 5.667234973121317e-05,
+ "loss": 0.8561,
+ "step": 1926
+ },
+ {
+ "epoch": 1.9275421212215935,
+ "grad_norm": 0.43269822001457214,
+ "learning_rate": 5.6577899242842025e-05,
+ "loss": 0.9039,
+ "step": 1927
+ },
+ {
+ "epoch": 1.9285424025507174,
+ "grad_norm": 0.5126697421073914,
+ "learning_rate": 5.648349646750673e-05,
+ "loss": 0.941,
+ "step": 1928
+ },
+ {
+ "epoch": 1.9295426838798413,
+ "grad_norm": 0.5042800307273865,
+ "learning_rate": 5.6389141508938903e-05,
+ "loss": 0.9901,
+ "step": 1929
+ },
+ {
+ "epoch": 1.9305429652089652,
+ "grad_norm": 0.5381462574005127,
+ "learning_rate": 5.629483447081751e-05,
+ "loss": 0.9661,
+ "step": 1930
+ },
+ {
+ "epoch": 1.9315432465380888,
+ "grad_norm": 0.5455595254898071,
+ "learning_rate": 5.620057545676901e-05,
+ "loss": 0.8618,
+ "step": 1931
+ },
+ {
+ "epoch": 1.9325435278672125,
+ "grad_norm": 0.45534226298332214,
+ "learning_rate": 5.610636457036693e-05,
+ "loss": 0.827,
+ "step": 1932
+ },
+ {
+ "epoch": 1.9335438091963364,
+ "grad_norm": 0.4841485917568207,
+ "learning_rate": 5.601220191513208e-05,
+ "loss": 0.8979,
+ "step": 1933
+ },
+ {
+ "epoch": 1.9345440905254603,
+ "grad_norm": 0.4711385667324066,
+ "learning_rate": 5.591808759453214e-05,
+ "loss": 0.9841,
+ "step": 1934
+ },
+ {
+ "epoch": 1.9355443718545842,
+ "grad_norm": 0.44583311676979065,
+ "learning_rate": 5.5824021711981686e-05,
+ "loss": 0.7455,
+ "step": 1935
+ },
+ {
+ "epoch": 1.936544653183708,
+ "grad_norm": 0.5186678171157837,
+ "learning_rate": 5.573000437084221e-05,
+ "loss": 0.8881,
+ "step": 1936
+ },
+ {
+ "epoch": 1.9375449345128317,
+ "grad_norm": 0.5111430287361145,
+ "learning_rate": 5.563603567442168e-05,
+ "loss": 0.97,
+ "step": 1937
+ },
+ {
+ "epoch": 1.9385452158419556,
+ "grad_norm": 0.5185840129852295,
+ "learning_rate": 5.554211572597477e-05,
+ "loss": 0.9864,
+ "step": 1938
+ },
+ {
+ "epoch": 1.9395454971710793,
+ "grad_norm": 0.590919554233551,
+ "learning_rate": 5.544824462870244e-05,
+ "loss": 0.9917,
+ "step": 1939
+ },
+ {
+ "epoch": 1.9405457785002032,
+ "grad_norm": 0.5174764394760132,
+ "learning_rate": 5.5354422485752125e-05,
+ "loss": 0.91,
+ "step": 1940
+ },
+ {
+ "epoch": 1.941546059829327,
+ "grad_norm": 0.4679591953754425,
+ "learning_rate": 5.5260649400217326e-05,
+ "loss": 0.8743,
+ "step": 1941
+ },
+ {
+ "epoch": 1.942546341158451,
+ "grad_norm": 0.5026495456695557,
+ "learning_rate": 5.5166925475137735e-05,
+ "loss": 0.9652,
+ "step": 1942
+ },
+ {
+ "epoch": 1.9435466224875746,
+ "grad_norm": 0.48180490732192993,
+ "learning_rate": 5.507325081349903e-05,
+ "loss": 0.9213,
+ "step": 1943
+ },
+ {
+ "epoch": 1.9445469038166985,
+ "grad_norm": 0.467143714427948,
+ "learning_rate": 5.497962551823266e-05,
+ "loss": 0.901,
+ "step": 1944
+ },
+ {
+ "epoch": 1.9455471851458221,
+ "grad_norm": 0.4535980820655823,
+ "learning_rate": 5.488604969221597e-05,
+ "loss": 0.8284,
+ "step": 1945
+ },
+ {
+ "epoch": 1.946547466474946,
+ "grad_norm": 0.5203812122344971,
+ "learning_rate": 5.479252343827178e-05,
+ "loss": 0.8001,
+ "step": 1946
+ },
+ {
+ "epoch": 1.94754774780407,
+ "grad_norm": 0.4892285168170929,
+ "learning_rate": 5.469904685916861e-05,
+ "loss": 0.7415,
+ "step": 1947
+ },
+ {
+ "epoch": 1.9485480291331938,
+ "grad_norm": 0.5130967497825623,
+ "learning_rate": 5.460562005762024e-05,
+ "loss": 0.9661,
+ "step": 1948
+ },
+ {
+ "epoch": 1.9495483104623177,
+ "grad_norm": 0.47101548314094543,
+ "learning_rate": 5.4512243136285915e-05,
+ "loss": 0.85,
+ "step": 1949
+ },
+ {
+ "epoch": 1.9505485917914414,
+ "grad_norm": 0.4335457384586334,
+ "learning_rate": 5.441891619776987e-05,
+ "loss": 0.8406,
+ "step": 1950
+ },
+ {
+ "epoch": 1.951548873120565,
+ "grad_norm": 0.45771148800849915,
+ "learning_rate": 5.432563934462166e-05,
+ "loss": 0.9252,
+ "step": 1951
+ },
+ {
+ "epoch": 1.952549154449689,
+ "grad_norm": 0.5619480013847351,
+ "learning_rate": 5.423241267933557e-05,
+ "loss": 0.844,
+ "step": 1952
+ },
+ {
+ "epoch": 1.9535494357788128,
+ "grad_norm": 0.4204142391681671,
+ "learning_rate": 5.4139236304350935e-05,
+ "loss": 0.8829,
+ "step": 1953
+ },
+ {
+ "epoch": 1.9545497171079367,
+ "grad_norm": 0.4862264394760132,
+ "learning_rate": 5.404611032205169e-05,
+ "loss": 0.9882,
+ "step": 1954
+ },
+ {
+ "epoch": 1.9555499984370606,
+ "grad_norm": 0.46490079164505005,
+ "learning_rate": 5.3953034834766416e-05,
+ "loss": 0.978,
+ "step": 1955
+ },
+ {
+ "epoch": 1.9565502797661842,
+ "grad_norm": 0.5944529175758362,
+ "learning_rate": 5.386000994476832e-05,
+ "loss": 0.8706,
+ "step": 1956
+ },
+ {
+ "epoch": 1.957550561095308,
+ "grad_norm": 0.5310636162757874,
+ "learning_rate": 5.376703575427481e-05,
+ "loss": 0.9472,
+ "step": 1957
+ },
+ {
+ "epoch": 1.9585508424244318,
+ "grad_norm": 0.49689510464668274,
+ "learning_rate": 5.367411236544786e-05,
+ "loss": 1.0081,
+ "step": 1958
+ },
+ {
+ "epoch": 1.9595511237535557,
+ "grad_norm": 0.5658974647521973,
+ "learning_rate": 5.3581239880393375e-05,
+ "loss": 1.0419,
+ "step": 1959
+ },
+ {
+ "epoch": 1.9605514050826796,
+ "grad_norm": 0.4068913757801056,
+ "learning_rate": 5.3488418401161475e-05,
+ "loss": 0.7635,
+ "step": 1960
+ },
+ {
+ "epoch": 1.9615516864118034,
+ "grad_norm": 0.6318438053131104,
+ "learning_rate": 5.339564802974615e-05,
+ "loss": 0.8508,
+ "step": 1961
+ },
+ {
+ "epoch": 1.962551967740927,
+ "grad_norm": 0.5346115827560425,
+ "learning_rate": 5.33029288680852e-05,
+ "loss": 0.8885,
+ "step": 1962
+ },
+ {
+ "epoch": 1.963552249070051,
+ "grad_norm": 0.4659571051597595,
+ "learning_rate": 5.321026101806032e-05,
+ "loss": 0.8957,
+ "step": 1963
+ },
+ {
+ "epoch": 1.9645525303991747,
+ "grad_norm": 0.502803385257721,
+ "learning_rate": 5.311764458149664e-05,
+ "loss": 0.8807,
+ "step": 1964
+ },
+ {
+ "epoch": 1.9655528117282985,
+ "grad_norm": 0.482771098613739,
+ "learning_rate": 5.302507966016295e-05,
+ "loss": 0.9404,
+ "step": 1965
+ },
+ {
+ "epoch": 1.9665530930574224,
+ "grad_norm": 0.48509371280670166,
+ "learning_rate": 5.293256635577126e-05,
+ "loss": 0.8903,
+ "step": 1966
+ },
+ {
+ "epoch": 1.9675533743865463,
+ "grad_norm": 0.5044885873794556,
+ "learning_rate": 5.284010476997705e-05,
+ "loss": 0.8193,
+ "step": 1967
+ },
+ {
+ "epoch": 1.96855365571567,
+ "grad_norm": 0.5143056511878967,
+ "learning_rate": 5.274769500437882e-05,
+ "loss": 0.9903,
+ "step": 1968
+ },
+ {
+ "epoch": 1.9695539370447939,
+ "grad_norm": 0.4803191125392914,
+ "learning_rate": 5.265533716051825e-05,
+ "loss": 0.815,
+ "step": 1969
+ },
+ {
+ "epoch": 1.9705542183739175,
+ "grad_norm": 0.4977998435497284,
+ "learning_rate": 5.256303133987982e-05,
+ "loss": 0.8749,
+ "step": 1970
+ },
+ {
+ "epoch": 1.9715544997030414,
+ "grad_norm": 0.4953812062740326,
+ "learning_rate": 5.247077764389099e-05,
+ "loss": 0.8228,
+ "step": 1971
+ },
+ {
+ "epoch": 1.9725547810321653,
+ "grad_norm": 0.4795776903629303,
+ "learning_rate": 5.2378576173921934e-05,
+ "loss": 0.8692,
+ "step": 1972
+ },
+ {
+ "epoch": 1.9735550623612892,
+ "grad_norm": 0.6318855881690979,
+ "learning_rate": 5.22864270312853e-05,
+ "loss": 1.0387,
+ "step": 1973
+ },
+ {
+ "epoch": 1.974555343690413,
+ "grad_norm": 0.4658355116844177,
+ "learning_rate": 5.219433031723641e-05,
+ "loss": 0.7585,
+ "step": 1974
+ },
+ {
+ "epoch": 1.9755556250195367,
+ "grad_norm": 0.41826239228248596,
+ "learning_rate": 5.210228613297281e-05,
+ "loss": 0.7991,
+ "step": 1975
+ },
+ {
+ "epoch": 1.9765559063486604,
+ "grad_norm": 0.4662337005138397,
+ "learning_rate": 5.201029457963451e-05,
+ "loss": 0.9127,
+ "step": 1976
+ },
+ {
+ "epoch": 1.9775561876777843,
+ "grad_norm": 0.4976811408996582,
+ "learning_rate": 5.191835575830352e-05,
+ "loss": 1.104,
+ "step": 1977
+ },
+ {
+ "epoch": 1.9785564690069082,
+ "grad_norm": 0.5814425945281982,
+ "learning_rate": 5.1826469770004026e-05,
+ "loss": 0.9479,
+ "step": 1978
+ },
+ {
+ "epoch": 1.979556750336032,
+ "grad_norm": 0.5100698471069336,
+ "learning_rate": 5.1734636715702043e-05,
+ "loss": 0.8778,
+ "step": 1979
+ },
+ {
+ "epoch": 1.980557031665156,
+ "grad_norm": 0.5200473070144653,
+ "learning_rate": 5.1642856696305575e-05,
+ "loss": 0.9684,
+ "step": 1980
+ },
+ {
+ "epoch": 1.9815573129942796,
+ "grad_norm": 0.728875458240509,
+ "learning_rate": 5.155112981266422e-05,
+ "loss": 1.1052,
+ "step": 1981
+ },
+ {
+ "epoch": 1.9825575943234035,
+ "grad_norm": 0.504478931427002,
+ "learning_rate": 5.145945616556921e-05,
+ "loss": 0.9089,
+ "step": 1982
+ },
+ {
+ "epoch": 1.9835578756525272,
+ "grad_norm": 0.48226889967918396,
+ "learning_rate": 5.136783585575336e-05,
+ "loss": 0.8765,
+ "step": 1983
+ },
+ {
+ "epoch": 1.984558156981651,
+ "grad_norm": 0.47124215960502625,
+ "learning_rate": 5.127626898389075e-05,
+ "loss": 0.8909,
+ "step": 1984
+ },
+ {
+ "epoch": 1.985558438310775,
+ "grad_norm": 0.4892251491546631,
+ "learning_rate": 5.118475565059691e-05,
+ "loss": 0.7596,
+ "step": 1985
+ },
+ {
+ "epoch": 1.9865587196398988,
+ "grad_norm": 0.550346851348877,
+ "learning_rate": 5.109329595642829e-05,
+ "loss": 1.0297,
+ "step": 1986
+ },
+ {
+ "epoch": 1.9875590009690225,
+ "grad_norm": 0.5535497069358826,
+ "learning_rate": 5.1001890001882734e-05,
+ "loss": 0.8995,
+ "step": 1987
+ },
+ {
+ "epoch": 1.9885592822981464,
+ "grad_norm": 0.4945215582847595,
+ "learning_rate": 5.091053788739878e-05,
+ "loss": 0.8223,
+ "step": 1988
+ },
+ {
+ "epoch": 1.98955956362727,
+ "grad_norm": 0.46689140796661377,
+ "learning_rate": 5.081923971335582e-05,
+ "loss": 0.7746,
+ "step": 1989
+ },
+ {
+ "epoch": 1.990559844956394,
+ "grad_norm": 0.46213075518608093,
+ "learning_rate": 5.072799558007415e-05,
+ "loss": 0.9981,
+ "step": 1990
+ },
+ {
+ "epoch": 1.9915601262855178,
+ "grad_norm": 0.4265044629573822,
+ "learning_rate": 5.063680558781445e-05,
+ "loss": 0.7414,
+ "step": 1991
+ },
+ {
+ "epoch": 1.9925604076146417,
+ "grad_norm": 0.47252804040908813,
+ "learning_rate": 5.0545669836778144e-05,
+ "loss": 0.9779,
+ "step": 1992
+ },
+ {
+ "epoch": 1.9935606889437656,
+ "grad_norm": 0.49390360713005066,
+ "learning_rate": 5.045458842710684e-05,
+ "loss": 1.047,
+ "step": 1993
+ },
+ {
+ "epoch": 1.9945609702728893,
+ "grad_norm": 0.48533156514167786,
+ "learning_rate": 5.036356145888263e-05,
+ "loss": 0.784,
+ "step": 1994
+ },
+ {
+ "epoch": 1.995561251602013,
+ "grad_norm": 0.4855436086654663,
+ "learning_rate": 5.0272589032127594e-05,
+ "loss": 1.0186,
+ "step": 1995
+ },
+ {
+ "epoch": 1.9965615329311368,
+ "grad_norm": 0.48796966671943665,
+ "learning_rate": 5.0181671246804064e-05,
+ "loss": 0.931,
+ "step": 1996
+ },
+ {
+ "epoch": 1.9975618142602607,
+ "grad_norm": 0.476491242647171,
+ "learning_rate": 5.009080820281415e-05,
+ "loss": 0.7653,
+ "step": 1997
+ },
+ {
+ "epoch": 1.9985620955893846,
+ "grad_norm": 0.48085761070251465,
+ "learning_rate": 5.000000000000002e-05,
+ "loss": 0.7846,
+ "step": 1998
+ },
+ {
+ "epoch": 1.9995623769185085,
+ "grad_norm": 0.454314261674881,
+ "learning_rate": 4.990924673814336e-05,
+ "loss": 0.8582,
+ "step": 1999
+ },
+ {
+ "epoch": 2.0005626582476324,
+ "grad_norm": 0.3911774158477783,
+ "learning_rate": 4.981854851696568e-05,
+ "loss": 0.5936,
+ "step": 2000
+ },
+ {
+ "epoch": 2.001562939576756,
+ "grad_norm": 0.44177675247192383,
+ "learning_rate": 4.972790543612783e-05,
+ "loss": 0.7308,
+ "step": 2001
+ },
+ {
+ "epoch": 2.0025632209058797,
+ "grad_norm": 0.43666279315948486,
+ "learning_rate": 4.963731759523022e-05,
+ "loss": 0.7415,
+ "step": 2002
+ },
+ {
+ "epoch": 2.0035635022350036,
+ "grad_norm": 0.4072078466415405,
+ "learning_rate": 4.954678509381253e-05,
+ "loss": 0.5694,
+ "step": 2003
+ },
+ {
+ "epoch": 2.0045637835641275,
+ "grad_norm": 0.4740023612976074,
+ "learning_rate": 4.945630803135354e-05,
+ "loss": 0.608,
+ "step": 2004
+ },
+ {
+ "epoch": 2.0055640648932513,
+ "grad_norm": 0.37969714403152466,
+ "learning_rate": 4.9365886507271243e-05,
+ "loss": 0.5037,
+ "step": 2005
+ },
+ {
+ "epoch": 2.0065643462223752,
+ "grad_norm": 0.4192529618740082,
+ "learning_rate": 4.9275520620922477e-05,
+ "loss": 0.7473,
+ "step": 2006
+ },
+ {
+ "epoch": 2.0075646275514987,
+ "grad_norm": 0.42636537551879883,
+ "learning_rate": 4.918521047160308e-05,
+ "loss": 0.5791,
+ "step": 2007
+ },
+ {
+ "epoch": 2.0085649088806226,
+ "grad_norm": 0.44065889716148376,
+ "learning_rate": 4.9094956158547535e-05,
+ "loss": 0.571,
+ "step": 2008
+ },
+ {
+ "epoch": 2.0095651902097464,
+ "grad_norm": 0.4889744520187378,
+ "learning_rate": 4.900475778092897e-05,
+ "loss": 0.6856,
+ "step": 2009
+ },
+ {
+ "epoch": 2.0105654715388703,
+ "grad_norm": 0.4938597083091736,
+ "learning_rate": 4.891461543785917e-05,
+ "loss": 0.7002,
+ "step": 2010
+ },
+ {
+ "epoch": 2.011565752867994,
+ "grad_norm": 0.45282644033432007,
+ "learning_rate": 4.882452922838818e-05,
+ "loss": 0.6107,
+ "step": 2011
+ },
+ {
+ "epoch": 2.012566034197118,
+ "grad_norm": 0.3883883059024811,
+ "learning_rate": 4.873449925150455e-05,
+ "loss": 0.5482,
+ "step": 2012
+ },
+ {
+ "epoch": 2.0135663155262415,
+ "grad_norm": 0.4271782636642456,
+ "learning_rate": 4.864452560613485e-05,
+ "loss": 0.6037,
+ "step": 2013
+ },
+ {
+ "epoch": 2.0145665968553654,
+ "grad_norm": 0.46755480766296387,
+ "learning_rate": 4.855460839114392e-05,
+ "loss": 0.5501,
+ "step": 2014
+ },
+ {
+ "epoch": 2.0155668781844893,
+ "grad_norm": 0.4328460693359375,
+ "learning_rate": 4.846474770533446e-05,
+ "loss": 0.5598,
+ "step": 2015
+ },
+ {
+ "epoch": 2.016567159513613,
+ "grad_norm": 0.45182377099990845,
+ "learning_rate": 4.837494364744711e-05,
+ "loss": 0.6259,
+ "step": 2016
+ },
+ {
+ "epoch": 2.017567440842737,
+ "grad_norm": 0.44600123167037964,
+ "learning_rate": 4.828519631616037e-05,
+ "loss": 0.5913,
+ "step": 2017
+ },
+ {
+ "epoch": 2.018567722171861,
+ "grad_norm": 0.44893786311149597,
+ "learning_rate": 4.8195505810090246e-05,
+ "loss": 0.5569,
+ "step": 2018
+ },
+ {
+ "epoch": 2.019568003500985,
+ "grad_norm": 0.45915499329566956,
+ "learning_rate": 4.810587222779043e-05,
+ "loss": 0.3957,
+ "step": 2019
+ },
+ {
+ "epoch": 2.0205682848301083,
+ "grad_norm": 0.43786874413490295,
+ "learning_rate": 4.801629566775196e-05,
+ "loss": 0.672,
+ "step": 2020
+ },
+ {
+ "epoch": 2.021568566159232,
+ "grad_norm": 0.47288084030151367,
+ "learning_rate": 4.792677622840336e-05,
+ "loss": 0.664,
+ "step": 2021
+ },
+ {
+ "epoch": 2.022568847488356,
+ "grad_norm": 0.4936700165271759,
+ "learning_rate": 4.783731400811022e-05,
+ "loss": 0.6865,
+ "step": 2022
+ },
+ {
+ "epoch": 2.02356912881748,
+ "grad_norm": 0.4503854215145111,
+ "learning_rate": 4.774790910517541e-05,
+ "loss": 0.5091,
+ "step": 2023
+ },
+ {
+ "epoch": 2.024569410146604,
+ "grad_norm": 0.4443006217479706,
+ "learning_rate": 4.7658561617838684e-05,
+ "loss": 0.5552,
+ "step": 2024
+ },
+ {
+ "epoch": 2.0255696914757277,
+ "grad_norm": 0.4622330069541931,
+ "learning_rate": 4.756927164427685e-05,
+ "loss": 0.6221,
+ "step": 2025
+ },
+ {
+ "epoch": 2.026569972804851,
+ "grad_norm": 0.44454696774482727,
+ "learning_rate": 4.748003928260335e-05,
+ "loss": 0.6339,
+ "step": 2026
+ },
+ {
+ "epoch": 2.027570254133975,
+ "grad_norm": 0.5022785067558289,
+ "learning_rate": 4.73908646308685e-05,
+ "loss": 0.6527,
+ "step": 2027
+ },
+ {
+ "epoch": 2.028570535463099,
+ "grad_norm": 0.4373007118701935,
+ "learning_rate": 4.730174778705908e-05,
+ "loss": 0.6287,
+ "step": 2028
+ },
+ {
+ "epoch": 2.029570816792223,
+ "grad_norm": 0.40241381525993347,
+ "learning_rate": 4.721268884909833e-05,
+ "loss": 0.5972,
+ "step": 2029
+ },
+ {
+ "epoch": 2.0305710981213467,
+ "grad_norm": 0.44283151626586914,
+ "learning_rate": 4.712368791484597e-05,
+ "loss": 0.4885,
+ "step": 2030
+ },
+ {
+ "epoch": 2.0315713794504706,
+ "grad_norm": 0.47259289026260376,
+ "learning_rate": 4.703474508209793e-05,
+ "loss": 0.6326,
+ "step": 2031
+ },
+ {
+ "epoch": 2.032571660779594,
+ "grad_norm": 0.4327373802661896,
+ "learning_rate": 4.694586044858633e-05,
+ "loss": 0.5445,
+ "step": 2032
+ },
+ {
+ "epoch": 2.033571942108718,
+ "grad_norm": 0.42448604106903076,
+ "learning_rate": 4.6857034111979235e-05,
+ "loss": 0.5654,
+ "step": 2033
+ },
+ {
+ "epoch": 2.034572223437842,
+ "grad_norm": 0.450679212808609,
+ "learning_rate": 4.6768266169880804e-05,
+ "loss": 0.6663,
+ "step": 2034
+ },
+ {
+ "epoch": 2.0355725047669657,
+ "grad_norm": 0.4411293864250183,
+ "learning_rate": 4.66795567198309e-05,
+ "loss": 0.6251,
+ "step": 2035
+ },
+ {
+ "epoch": 2.0365727860960896,
+ "grad_norm": 0.4397091269493103,
+ "learning_rate": 4.6590905859305135e-05,
+ "loss": 0.5921,
+ "step": 2036
+ },
+ {
+ "epoch": 2.0375730674252135,
+ "grad_norm": 0.4734553396701813,
+ "learning_rate": 4.650231368571486e-05,
+ "loss": 0.6071,
+ "step": 2037
+ },
+ {
+ "epoch": 2.038573348754337,
+ "grad_norm": 0.4945426285266876,
+ "learning_rate": 4.6413780296406764e-05,
+ "loss": 0.6295,
+ "step": 2038
+ },
+ {
+ "epoch": 2.039573630083461,
+ "grad_norm": 0.47174322605133057,
+ "learning_rate": 4.6325305788663096e-05,
+ "loss": 0.7296,
+ "step": 2039
+ },
+ {
+ "epoch": 2.0405739114125847,
+ "grad_norm": 0.4360683262348175,
+ "learning_rate": 4.623689025970128e-05,
+ "loss": 0.5333,
+ "step": 2040
+ },
+ {
+ "epoch": 2.0415741927417086,
+ "grad_norm": 0.481152206659317,
+ "learning_rate": 4.6148533806674074e-05,
+ "loss": 0.6103,
+ "step": 2041
+ },
+ {
+ "epoch": 2.0425744740708325,
+ "grad_norm": 0.553936243057251,
+ "learning_rate": 4.606023652666915e-05,
+ "loss": 0.6623,
+ "step": 2042
+ },
+ {
+ "epoch": 2.0435747553999564,
+ "grad_norm": 0.4619155526161194,
+ "learning_rate": 4.597199851670932e-05,
+ "loss": 0.7526,
+ "step": 2043
+ },
+ {
+ "epoch": 2.0445750367290803,
+ "grad_norm": 0.4538067877292633,
+ "learning_rate": 4.5883819873752156e-05,
+ "loss": 0.6401,
+ "step": 2044
+ },
+ {
+ "epoch": 2.0455753180582037,
+ "grad_norm": 0.46386808156967163,
+ "learning_rate": 4.5795700694690046e-05,
+ "loss": 0.7433,
+ "step": 2045
+ },
+ {
+ "epoch": 2.0465755993873276,
+ "grad_norm": 0.490567147731781,
+ "learning_rate": 4.5707641076350074e-05,
+ "loss": 0.6046,
+ "step": 2046
+ },
+ {
+ "epoch": 2.0475758807164515,
+ "grad_norm": 0.45558422803878784,
+ "learning_rate": 4.5619641115493774e-05,
+ "loss": 0.6133,
+ "step": 2047
+ },
+ {
+ "epoch": 2.0485761620455754,
+ "grad_norm": 0.49613502621650696,
+ "learning_rate": 4.553170090881724e-05,
+ "loss": 0.7413,
+ "step": 2048
+ },
+ {
+ "epoch": 2.0495764433746992,
+ "grad_norm": 0.6731177568435669,
+ "learning_rate": 4.54438205529508e-05,
+ "loss": 0.5993,
+ "step": 2049
+ },
+ {
+ "epoch": 2.050576724703823,
+ "grad_norm": 0.4620528817176819,
+ "learning_rate": 4.535600014445914e-05,
+ "loss": 0.6223,
+ "step": 2050
+ },
+ {
+ "epoch": 2.0515770060329466,
+ "grad_norm": 0.47041627764701843,
+ "learning_rate": 4.5268239779840935e-05,
+ "loss": 0.6265,
+ "step": 2051
+ },
+ {
+ "epoch": 2.0525772873620705,
+ "grad_norm": 0.4700336754322052,
+ "learning_rate": 4.518053955552903e-05,
+ "loss": 0.7044,
+ "step": 2052
+ },
+ {
+ "epoch": 2.0535775686911943,
+ "grad_norm": 0.4150082468986511,
+ "learning_rate": 4.5092899567890035e-05,
+ "loss": 0.5772,
+ "step": 2053
+ },
+ {
+ "epoch": 2.0545778500203182,
+ "grad_norm": 0.4649240970611572,
+ "learning_rate": 4.5005319913224506e-05,
+ "loss": 0.5031,
+ "step": 2054
+ },
+ {
+ "epoch": 2.055578131349442,
+ "grad_norm": 0.5402297973632812,
+ "learning_rate": 4.491780068776663e-05,
+ "loss": 0.6701,
+ "step": 2055
+ },
+ {
+ "epoch": 2.056578412678566,
+ "grad_norm": 0.49985572695732117,
+ "learning_rate": 4.4830341987684166e-05,
+ "loss": 0.5614,
+ "step": 2056
+ },
+ {
+ "epoch": 2.0575786940076894,
+ "grad_norm": 0.49652746319770813,
+ "learning_rate": 4.474294390907847e-05,
+ "loss": 0.593,
+ "step": 2057
+ },
+ {
+ "epoch": 2.0585789753368133,
+ "grad_norm": 0.4196471571922302,
+ "learning_rate": 4.465560654798417e-05,
+ "loss": 0.6161,
+ "step": 2058
+ },
+ {
+ "epoch": 2.059579256665937,
+ "grad_norm": 0.47680115699768066,
+ "learning_rate": 4.4568330000369286e-05,
+ "loss": 0.5485,
+ "step": 2059
+ },
+ {
+ "epoch": 2.060579537995061,
+ "grad_norm": 0.4866887032985687,
+ "learning_rate": 4.448111436213486e-05,
+ "loss": 0.6281,
+ "step": 2060
+ },
+ {
+ "epoch": 2.061579819324185,
+ "grad_norm": 0.5039479732513428,
+ "learning_rate": 4.4393959729115244e-05,
+ "loss": 0.6517,
+ "step": 2061
+ },
+ {
+ "epoch": 2.062580100653309,
+ "grad_norm": 0.4648885428905487,
+ "learning_rate": 4.4306866197077544e-05,
+ "loss": 0.58,
+ "step": 2062
+ },
+ {
+ "epoch": 2.0635803819824328,
+ "grad_norm": 0.5204921960830688,
+ "learning_rate": 4.421983386172178e-05,
+ "loss": 0.6859,
+ "step": 2063
+ },
+ {
+ "epoch": 2.064580663311556,
+ "grad_norm": 0.4675167202949524,
+ "learning_rate": 4.413286281868081e-05,
+ "loss": 0.6169,
+ "step": 2064
+ },
+ {
+ "epoch": 2.06558094464068,
+ "grad_norm": 0.5305991172790527,
+ "learning_rate": 4.404595316352002e-05,
+ "loss": 0.691,
+ "step": 2065
+ },
+ {
+ "epoch": 2.066581225969804,
+ "grad_norm": 0.4630433917045593,
+ "learning_rate": 4.3959104991737455e-05,
+ "loss": 0.6128,
+ "step": 2066
+ },
+ {
+ "epoch": 2.067581507298928,
+ "grad_norm": 0.44131141901016235,
+ "learning_rate": 4.387231839876349e-05,
+ "loss": 0.6266,
+ "step": 2067
+ },
+ {
+ "epoch": 2.0685817886280518,
+ "grad_norm": 0.5094907879829407,
+ "learning_rate": 4.3785593479960964e-05,
+ "loss": 0.7263,
+ "step": 2068
+ },
+ {
+ "epoch": 2.0695820699571756,
+ "grad_norm": 0.42740294337272644,
+ "learning_rate": 4.369893033062481e-05,
+ "loss": 0.543,
+ "step": 2069
+ },
+ {
+ "epoch": 2.070582351286299,
+ "grad_norm": 0.5060046911239624,
+ "learning_rate": 4.3612329045982236e-05,
+ "loss": 0.6893,
+ "step": 2070
+ },
+ {
+ "epoch": 2.071582632615423,
+ "grad_norm": 0.42943909764289856,
+ "learning_rate": 4.35257897211923e-05,
+ "loss": 0.6992,
+ "step": 2071
+ },
+ {
+ "epoch": 2.072582913944547,
+ "grad_norm": 0.48537638783454895,
+ "learning_rate": 4.343931245134616e-05,
+ "loss": 0.6904,
+ "step": 2072
+ },
+ {
+ "epoch": 2.0735831952736707,
+ "grad_norm": 0.4671311676502228,
+ "learning_rate": 4.335289733146665e-05,
+ "loss": 0.6142,
+ "step": 2073
+ },
+ {
+ "epoch": 2.0745834766027946,
+ "grad_norm": 0.4160546362400055,
+ "learning_rate": 4.326654445650833e-05,
+ "loss": 0.6052,
+ "step": 2074
+ },
+ {
+ "epoch": 2.0755837579319185,
+ "grad_norm": 0.4677714705467224,
+ "learning_rate": 4.3180253921357414e-05,
+ "loss": 0.6142,
+ "step": 2075
+ },
+ {
+ "epoch": 2.076584039261042,
+ "grad_norm": 0.4365472197532654,
+ "learning_rate": 4.309402582083161e-05,
+ "loss": 0.6131,
+ "step": 2076
+ },
+ {
+ "epoch": 2.077584320590166,
+ "grad_norm": 0.4473261833190918,
+ "learning_rate": 4.300786024968003e-05,
+ "loss": 0.5813,
+ "step": 2077
+ },
+ {
+ "epoch": 2.0785846019192897,
+ "grad_norm": 0.5056237578392029,
+ "learning_rate": 4.2921757302583e-05,
+ "loss": 0.5913,
+ "step": 2078
+ },
+ {
+ "epoch": 2.0795848832484136,
+ "grad_norm": 0.5617183446884155,
+ "learning_rate": 4.283571707415214e-05,
+ "loss": 0.7603,
+ "step": 2079
+ },
+ {
+ "epoch": 2.0805851645775375,
+ "grad_norm": 0.48133864998817444,
+ "learning_rate": 4.274973965893003e-05,
+ "loss": 0.5045,
+ "step": 2080
+ },
+ {
+ "epoch": 2.0815854459066614,
+ "grad_norm": 0.47303125262260437,
+ "learning_rate": 4.266382515139039e-05,
+ "loss": 0.5701,
+ "step": 2081
+ },
+ {
+ "epoch": 2.0825857272357853,
+ "grad_norm": 0.5299637317657471,
+ "learning_rate": 4.2577973645937674e-05,
+ "loss": 0.7264,
+ "step": 2082
+ },
+ {
+ "epoch": 2.0835860085649087,
+ "grad_norm": 0.5403549075126648,
+ "learning_rate": 4.2492185236907125e-05,
+ "loss": 0.6469,
+ "step": 2083
+ },
+ {
+ "epoch": 2.0845862898940326,
+ "grad_norm": 0.4790133237838745,
+ "learning_rate": 4.2406460018564765e-05,
+ "loss": 0.6682,
+ "step": 2084
+ },
+ {
+ "epoch": 2.0855865712231565,
+ "grad_norm": 0.47176721692085266,
+ "learning_rate": 4.2320798085107036e-05,
+ "loss": 0.6211,
+ "step": 2085
+ },
+ {
+ "epoch": 2.0865868525522804,
+ "grad_norm": 0.4947776794433594,
+ "learning_rate": 4.223519953066099e-05,
+ "loss": 0.59,
+ "step": 2086
+ },
+ {
+ "epoch": 2.0875871338814043,
+ "grad_norm": 0.5150135159492493,
+ "learning_rate": 4.214966444928387e-05,
+ "loss": 0.679,
+ "step": 2087
+ },
+ {
+ "epoch": 2.088587415210528,
+ "grad_norm": 0.5139247179031372,
+ "learning_rate": 4.206419293496333e-05,
+ "loss": 0.5977,
+ "step": 2088
+ },
+ {
+ "epoch": 2.0895876965396516,
+ "grad_norm": 0.48402106761932373,
+ "learning_rate": 4.1978785081617057e-05,
+ "loss": 0.7577,
+ "step": 2089
+ },
+ {
+ "epoch": 2.0905879778687755,
+ "grad_norm": 0.44100990891456604,
+ "learning_rate": 4.1893440983092856e-05,
+ "loss": 0.6396,
+ "step": 2090
+ },
+ {
+ "epoch": 2.0915882591978994,
+ "grad_norm": 0.4564374089241028,
+ "learning_rate": 4.18081607331685e-05,
+ "loss": 0.5049,
+ "step": 2091
+ },
+ {
+ "epoch": 2.0925885405270233,
+ "grad_norm": 0.49232858419418335,
+ "learning_rate": 4.172294442555148e-05,
+ "loss": 0.6589,
+ "step": 2092
+ },
+ {
+ "epoch": 2.093588821856147,
+ "grad_norm": 0.4091750383377075,
+ "learning_rate": 4.1637792153879196e-05,
+ "loss": 0.5527,
+ "step": 2093
+ },
+ {
+ "epoch": 2.094589103185271,
+ "grad_norm": 0.4389550983905792,
+ "learning_rate": 4.15527040117185e-05,
+ "loss": 0.6091,
+ "step": 2094
+ },
+ {
+ "epoch": 2.0955893845143945,
+ "grad_norm": 0.4765204191207886,
+ "learning_rate": 4.146768009256595e-05,
+ "loss": 0.7185,
+ "step": 2095
+ },
+ {
+ "epoch": 2.0965896658435184,
+ "grad_norm": 0.5131024718284607,
+ "learning_rate": 4.13827204898474e-05,
+ "loss": 0.6502,
+ "step": 2096
+ },
+ {
+ "epoch": 2.0975899471726422,
+ "grad_norm": 0.5671885013580322,
+ "learning_rate": 4.129782529691815e-05,
+ "loss": 0.577,
+ "step": 2097
+ },
+ {
+ "epoch": 2.098590228501766,
+ "grad_norm": 0.4500812590122223,
+ "learning_rate": 4.1212994607062594e-05,
+ "loss": 0.6345,
+ "step": 2098
+ },
+ {
+ "epoch": 2.09959050983089,
+ "grad_norm": 0.4754406213760376,
+ "learning_rate": 4.1128228513494385e-05,
+ "loss": 0.6497,
+ "step": 2099
+ },
+ {
+ "epoch": 2.100590791160014,
+ "grad_norm": 0.4294159412384033,
+ "learning_rate": 4.1043527109356095e-05,
+ "loss": 0.6813,
+ "step": 2100
+ },
+ {
+ "epoch": 2.1015910724891373,
+ "grad_norm": 0.7561903595924377,
+ "learning_rate": 4.095889048771922e-05,
+ "loss": 0.8037,
+ "step": 2101
+ },
+ {
+ "epoch": 2.1025913538182612,
+ "grad_norm": 0.48615512251853943,
+ "learning_rate": 4.087431874158416e-05,
+ "loss": 0.6563,
+ "step": 2102
+ },
+ {
+ "epoch": 2.103591635147385,
+ "grad_norm": 0.46207091212272644,
+ "learning_rate": 4.0789811963879906e-05,
+ "loss": 0.6163,
+ "step": 2103
+ },
+ {
+ "epoch": 2.104591916476509,
+ "grad_norm": 0.49406757950782776,
+ "learning_rate": 4.070537024746416e-05,
+ "loss": 0.5831,
+ "step": 2104
+ },
+ {
+ "epoch": 2.105592197805633,
+ "grad_norm": 0.5127863883972168,
+ "learning_rate": 4.06209936851231e-05,
+ "loss": 0.5506,
+ "step": 2105
+ },
+ {
+ "epoch": 2.106592479134757,
+ "grad_norm": 0.41014209389686584,
+ "learning_rate": 4.053668236957134e-05,
+ "loss": 0.5692,
+ "step": 2106
+ },
+ {
+ "epoch": 2.1075927604638807,
+ "grad_norm": 0.5290461182594299,
+ "learning_rate": 4.0452436393451735e-05,
+ "loss": 0.571,
+ "step": 2107
+ },
+ {
+ "epoch": 2.108593041793004,
+ "grad_norm": 0.45752203464508057,
+ "learning_rate": 4.036825584933533e-05,
+ "loss": 0.709,
+ "step": 2108
+ },
+ {
+ "epoch": 2.109593323122128,
+ "grad_norm": 0.5168712139129639,
+ "learning_rate": 4.028414082972141e-05,
+ "loss": 0.7713,
+ "step": 2109
+ },
+ {
+ "epoch": 2.110593604451252,
+ "grad_norm": 0.6233658194541931,
+ "learning_rate": 4.020009142703708e-05,
+ "loss": 0.5952,
+ "step": 2110
+ },
+ {
+ "epoch": 2.1115938857803758,
+ "grad_norm": 0.5407616496086121,
+ "learning_rate": 4.011610773363751e-05,
+ "loss": 0.7355,
+ "step": 2111
+ },
+ {
+ "epoch": 2.1125941671094997,
+ "grad_norm": 0.6055451035499573,
+ "learning_rate": 4.003218984180552e-05,
+ "loss": 0.6509,
+ "step": 2112
+ },
+ {
+ "epoch": 2.1135944484386235,
+ "grad_norm": 0.49832651019096375,
+ "learning_rate": 3.994833784375177e-05,
+ "loss": 0.6325,
+ "step": 2113
+ },
+ {
+ "epoch": 2.114594729767747,
+ "grad_norm": 0.5014695525169373,
+ "learning_rate": 3.986455183161437e-05,
+ "loss": 0.6134,
+ "step": 2114
+ },
+ {
+ "epoch": 2.115595011096871,
+ "grad_norm": 0.45379167795181274,
+ "learning_rate": 3.978083189745907e-05,
+ "loss": 0.5517,
+ "step": 2115
+ },
+ {
+ "epoch": 2.1165952924259948,
+ "grad_norm": 0.5124073028564453,
+ "learning_rate": 3.9697178133278855e-05,
+ "loss": 0.7396,
+ "step": 2116
+ },
+ {
+ "epoch": 2.1175955737551186,
+ "grad_norm": 0.4667278230190277,
+ "learning_rate": 3.961359063099416e-05,
+ "loss": 0.578,
+ "step": 2117
+ },
+ {
+ "epoch": 2.1185958550842425,
+ "grad_norm": 0.5495364665985107,
+ "learning_rate": 3.953006948245247e-05,
+ "loss": 0.4708,
+ "step": 2118
+ },
+ {
+ "epoch": 2.1195961364133664,
+ "grad_norm": 0.45876625180244446,
+ "learning_rate": 3.944661477942844e-05,
+ "loss": 0.5283,
+ "step": 2119
+ },
+ {
+ "epoch": 2.1205964177424903,
+ "grad_norm": 0.47809210419654846,
+ "learning_rate": 3.9363226613623736e-05,
+ "loss": 0.4733,
+ "step": 2120
+ },
+ {
+ "epoch": 2.1215966990716137,
+ "grad_norm": 0.47257041931152344,
+ "learning_rate": 3.9279905076666826e-05,
+ "loss": 0.6244,
+ "step": 2121
+ },
+ {
+ "epoch": 2.1225969804007376,
+ "grad_norm": 0.5050140023231506,
+ "learning_rate": 3.9196650260113044e-05,
+ "loss": 0.6397,
+ "step": 2122
+ },
+ {
+ "epoch": 2.1235972617298615,
+ "grad_norm": 0.5299871563911438,
+ "learning_rate": 3.9113462255444334e-05,
+ "loss": 0.6117,
+ "step": 2123
+ },
+ {
+ "epoch": 2.1245975430589854,
+ "grad_norm": 0.4946582019329071,
+ "learning_rate": 3.903034115406931e-05,
+ "loss": 0.5121,
+ "step": 2124
+ },
+ {
+ "epoch": 2.1255978243881093,
+ "grad_norm": 0.4589192867279053,
+ "learning_rate": 3.8947287047323e-05,
+ "loss": 0.4481,
+ "step": 2125
+ },
+ {
+ "epoch": 2.126598105717233,
+ "grad_norm": 0.5035550594329834,
+ "learning_rate": 3.886430002646688e-05,
+ "loss": 0.684,
+ "step": 2126
+ },
+ {
+ "epoch": 2.1275983870463566,
+ "grad_norm": 0.5557273030281067,
+ "learning_rate": 3.878138018268866e-05,
+ "loss": 0.6545,
+ "step": 2127
+ },
+ {
+ "epoch": 2.1285986683754805,
+ "grad_norm": 0.4621843099594116,
+ "learning_rate": 3.869852760710222e-05,
+ "loss": 0.8157,
+ "step": 2128
+ },
+ {
+ "epoch": 2.1295989497046044,
+ "grad_norm": 0.4417930245399475,
+ "learning_rate": 3.861574239074762e-05,
+ "loss": 0.5235,
+ "step": 2129
+ },
+ {
+ "epoch": 2.1305992310337283,
+ "grad_norm": 0.556983470916748,
+ "learning_rate": 3.8533024624590776e-05,
+ "loss": 0.7682,
+ "step": 2130
+ },
+ {
+ "epoch": 2.131599512362852,
+ "grad_norm": 0.5013543963432312,
+ "learning_rate": 3.845037439952362e-05,
+ "loss": 0.6058,
+ "step": 2131
+ },
+ {
+ "epoch": 2.132599793691976,
+ "grad_norm": 0.43936899304389954,
+ "learning_rate": 3.836779180636373e-05,
+ "loss": 0.5379,
+ "step": 2132
+ },
+ {
+ "epoch": 2.1336000750210995,
+ "grad_norm": 0.4661477208137512,
+ "learning_rate": 3.828527693585451e-05,
+ "loss": 0.5905,
+ "step": 2133
+ },
+ {
+ "epoch": 2.1346003563502234,
+ "grad_norm": 0.6327193379402161,
+ "learning_rate": 3.8202829878664816e-05,
+ "loss": 0.5805,
+ "step": 2134
+ },
+ {
+ "epoch": 2.1356006376793473,
+ "grad_norm": 0.4922885298728943,
+ "learning_rate": 3.812045072538909e-05,
+ "loss": 0.6583,
+ "step": 2135
+ },
+ {
+ "epoch": 2.136600919008471,
+ "grad_norm": 0.44325774908065796,
+ "learning_rate": 3.8038139566547146e-05,
+ "loss": 0.5686,
+ "step": 2136
+ },
+ {
+ "epoch": 2.137601200337595,
+ "grad_norm": 0.5307816863059998,
+ "learning_rate": 3.7955896492584e-05,
+ "loss": 0.6264,
+ "step": 2137
+ },
+ {
+ "epoch": 2.138601481666719,
+ "grad_norm": 0.4622756242752075,
+ "learning_rate": 3.787372159386999e-05,
+ "loss": 0.6792,
+ "step": 2138
+ },
+ {
+ "epoch": 2.1396017629958424,
+ "grad_norm": 0.5342557430267334,
+ "learning_rate": 3.7791614960700395e-05,
+ "loss": 0.5755,
+ "step": 2139
+ },
+ {
+ "epoch": 2.1406020443249663,
+ "grad_norm": 0.5566471815109253,
+ "learning_rate": 3.770957668329562e-05,
+ "loss": 0.7433,
+ "step": 2140
+ },
+ {
+ "epoch": 2.14160232565409,
+ "grad_norm": 0.47061699628829956,
+ "learning_rate": 3.7627606851800837e-05,
+ "loss": 0.4779,
+ "step": 2141
+ },
+ {
+ "epoch": 2.142602606983214,
+ "grad_norm": 0.42781785130500793,
+ "learning_rate": 3.7545705556286126e-05,
+ "loss": 0.6522,
+ "step": 2142
+ },
+ {
+ "epoch": 2.143602888312338,
+ "grad_norm": 0.5037875771522522,
+ "learning_rate": 3.746387288674613e-05,
+ "loss": 0.62,
+ "step": 2143
+ },
+ {
+ "epoch": 2.144603169641462,
+ "grad_norm": 0.5067894458770752,
+ "learning_rate": 3.7382108933100234e-05,
+ "loss": 0.7461,
+ "step": 2144
+ },
+ {
+ "epoch": 2.1456034509705857,
+ "grad_norm": 0.5479350090026855,
+ "learning_rate": 3.730041378519216e-05,
+ "loss": 0.7418,
+ "step": 2145
+ },
+ {
+ "epoch": 2.146603732299709,
+ "grad_norm": 0.4507127106189728,
+ "learning_rate": 3.721878753279017e-05,
+ "loss": 0.6838,
+ "step": 2146
+ },
+ {
+ "epoch": 2.147604013628833,
+ "grad_norm": 0.9193136096000671,
+ "learning_rate": 3.713723026558671e-05,
+ "loss": 0.5877,
+ "step": 2147
+ },
+ {
+ "epoch": 2.148604294957957,
+ "grad_norm": 0.43999728560447693,
+ "learning_rate": 3.705574207319844e-05,
+ "loss": 0.6485,
+ "step": 2148
+ },
+ {
+ "epoch": 2.149604576287081,
+ "grad_norm": 0.5130500197410583,
+ "learning_rate": 3.697432304516618e-05,
+ "loss": 0.7039,
+ "step": 2149
+ },
+ {
+ "epoch": 2.1506048576162047,
+ "grad_norm": 0.5071646571159363,
+ "learning_rate": 3.689297327095472e-05,
+ "loss": 0.602,
+ "step": 2150
+ },
+ {
+ "epoch": 2.1516051389453286,
+ "grad_norm": 0.47906339168548584,
+ "learning_rate": 3.681169283995279e-05,
+ "loss": 0.7002,
+ "step": 2151
+ },
+ {
+ "epoch": 2.152605420274452,
+ "grad_norm": 0.46951034665107727,
+ "learning_rate": 3.673048184147281e-05,
+ "loss": 0.6469,
+ "step": 2152
+ },
+ {
+ "epoch": 2.153605701603576,
+ "grad_norm": 0.4949340522289276,
+ "learning_rate": 3.664934036475104e-05,
+ "loss": 0.6304,
+ "step": 2153
+ },
+ {
+ "epoch": 2.1546059829327,
+ "grad_norm": 0.44482266902923584,
+ "learning_rate": 3.656826849894726e-05,
+ "loss": 0.4853,
+ "step": 2154
+ },
+ {
+ "epoch": 2.1556062642618237,
+ "grad_norm": 0.5063248872756958,
+ "learning_rate": 3.648726633314475e-05,
+ "loss": 0.6082,
+ "step": 2155
+ },
+ {
+ "epoch": 2.1566065455909476,
+ "grad_norm": 0.5235609412193298,
+ "learning_rate": 3.640633395635032e-05,
+ "loss": 0.6015,
+ "step": 2156
+ },
+ {
+ "epoch": 2.1576068269200714,
+ "grad_norm": 0.4473001956939697,
+ "learning_rate": 3.632547145749395e-05,
+ "loss": 0.5672,
+ "step": 2157
+ },
+ {
+ "epoch": 2.158607108249195,
+ "grad_norm": 0.4669405519962311,
+ "learning_rate": 3.624467892542895e-05,
+ "loss": 0.5406,
+ "step": 2158
+ },
+ {
+ "epoch": 2.1596073895783188,
+ "grad_norm": 0.4765620827674866,
+ "learning_rate": 3.616395644893166e-05,
+ "loss": 0.6277,
+ "step": 2159
+ },
+ {
+ "epoch": 2.1606076709074427,
+ "grad_norm": 0.47696128487586975,
+ "learning_rate": 3.6083304116701535e-05,
+ "loss": 0.5853,
+ "step": 2160
+ },
+ {
+ "epoch": 2.1616079522365665,
+ "grad_norm": 0.504258394241333,
+ "learning_rate": 3.600272201736082e-05,
+ "loss": 0.6468,
+ "step": 2161
+ },
+ {
+ "epoch": 2.1626082335656904,
+ "grad_norm": 0.5608981251716614,
+ "learning_rate": 3.5922210239454764e-05,
+ "loss": 0.6373,
+ "step": 2162
+ },
+ {
+ "epoch": 2.1636085148948143,
+ "grad_norm": 0.46563276648521423,
+ "learning_rate": 3.5841768871451185e-05,
+ "loss": 0.6602,
+ "step": 2163
+ },
+ {
+ "epoch": 2.1646087962239378,
+ "grad_norm": 0.44680067896842957,
+ "learning_rate": 3.57613980017406e-05,
+ "loss": 0.6879,
+ "step": 2164
+ },
+ {
+ "epoch": 2.1656090775530616,
+ "grad_norm": 0.5114299058914185,
+ "learning_rate": 3.568109771863613e-05,
+ "loss": 0.6655,
+ "step": 2165
+ },
+ {
+ "epoch": 2.1666093588821855,
+ "grad_norm": 0.4544784426689148,
+ "learning_rate": 3.560086811037316e-05,
+ "loss": 0.6687,
+ "step": 2166
+ },
+ {
+ "epoch": 2.1676096402113094,
+ "grad_norm": 0.4559856057167053,
+ "learning_rate": 3.552070926510962e-05,
+ "loss": 0.5433,
+ "step": 2167
+ },
+ {
+ "epoch": 2.1686099215404333,
+ "grad_norm": 0.506377100944519,
+ "learning_rate": 3.54406212709255e-05,
+ "loss": 0.7024,
+ "step": 2168
+ },
+ {
+ "epoch": 2.169610202869557,
+ "grad_norm": 0.5076850056648254,
+ "learning_rate": 3.536060421582309e-05,
+ "loss": 0.6704,
+ "step": 2169
+ },
+ {
+ "epoch": 2.170610484198681,
+ "grad_norm": 0.4937109351158142,
+ "learning_rate": 3.52806581877266e-05,
+ "loss": 0.6859,
+ "step": 2170
+ },
+ {
+ "epoch": 2.1716107655278045,
+ "grad_norm": 0.49975091218948364,
+ "learning_rate": 3.520078327448232e-05,
+ "loss": 0.5282,
+ "step": 2171
+ },
+ {
+ "epoch": 2.1726110468569284,
+ "grad_norm": 0.5231044888496399,
+ "learning_rate": 3.5120979563858266e-05,
+ "loss": 0.5605,
+ "step": 2172
+ },
+ {
+ "epoch": 2.1736113281860523,
+ "grad_norm": 0.46311333775520325,
+ "learning_rate": 3.5041247143544364e-05,
+ "loss": 0.6421,
+ "step": 2173
+ },
+ {
+ "epoch": 2.174611609515176,
+ "grad_norm": 0.5018386840820312,
+ "learning_rate": 3.496158610115207e-05,
+ "loss": 0.5633,
+ "step": 2174
+ },
+ {
+ "epoch": 2.1756118908443,
+ "grad_norm": 0.42992815375328064,
+ "learning_rate": 3.4881996524214445e-05,
+ "loss": 0.5782,
+ "step": 2175
+ },
+ {
+ "epoch": 2.176612172173424,
+ "grad_norm": 0.4959898889064789,
+ "learning_rate": 3.48024785001861e-05,
+ "loss": 0.6792,
+ "step": 2176
+ },
+ {
+ "epoch": 2.1776124535025474,
+ "grad_norm": 0.5085489749908447,
+ "learning_rate": 3.472303211644289e-05,
+ "loss": 0.6612,
+ "step": 2177
+ },
+ {
+ "epoch": 2.1786127348316713,
+ "grad_norm": 0.4328081011772156,
+ "learning_rate": 3.464365746028208e-05,
+ "loss": 0.6251,
+ "step": 2178
+ },
+ {
+ "epoch": 2.179613016160795,
+ "grad_norm": 0.4798353314399719,
+ "learning_rate": 3.456435461892203e-05,
+ "loss": 0.5382,
+ "step": 2179
+ },
+ {
+ "epoch": 2.180613297489919,
+ "grad_norm": 0.4488179087638855,
+ "learning_rate": 3.4485123679502274e-05,
+ "loss": 0.6123,
+ "step": 2180
+ },
+ {
+ "epoch": 2.181613578819043,
+ "grad_norm": 0.44371160864830017,
+ "learning_rate": 3.4405964729083254e-05,
+ "loss": 0.6717,
+ "step": 2181
+ },
+ {
+ "epoch": 2.182613860148167,
+ "grad_norm": 0.43803316354751587,
+ "learning_rate": 3.43268778546463e-05,
+ "loss": 0.5674,
+ "step": 2182
+ },
+ {
+ "epoch": 2.1836141414772907,
+ "grad_norm": 0.44481751322746277,
+ "learning_rate": 3.424786314309365e-05,
+ "loss": 0.5787,
+ "step": 2183
+ },
+ {
+ "epoch": 2.184614422806414,
+ "grad_norm": 0.5348169803619385,
+ "learning_rate": 3.416892068124812e-05,
+ "loss": 0.7258,
+ "step": 2184
+ },
+ {
+ "epoch": 2.185614704135538,
+ "grad_norm": 0.4896971583366394,
+ "learning_rate": 3.409005055585327e-05,
+ "loss": 0.5921,
+ "step": 2185
+ },
+ {
+ "epoch": 2.186614985464662,
+ "grad_norm": 0.5136271119117737,
+ "learning_rate": 3.401125285357302e-05,
+ "loss": 0.5936,
+ "step": 2186
+ },
+ {
+ "epoch": 2.187615266793786,
+ "grad_norm": 0.45636460185050964,
+ "learning_rate": 3.393252766099187e-05,
+ "loss": 0.6523,
+ "step": 2187
+ },
+ {
+ "epoch": 2.1886155481229097,
+ "grad_norm": 0.7612220644950867,
+ "learning_rate": 3.3853875064614515e-05,
+ "loss": 0.6971,
+ "step": 2188
+ },
+ {
+ "epoch": 2.1896158294520336,
+ "grad_norm": 0.5007143616676331,
+ "learning_rate": 3.377529515086598e-05,
+ "loss": 0.5335,
+ "step": 2189
+ },
+ {
+ "epoch": 2.190616110781157,
+ "grad_norm": 0.47488054633140564,
+ "learning_rate": 3.369678800609134e-05,
+ "loss": 0.6134,
+ "step": 2190
+ },
+ {
+ "epoch": 2.191616392110281,
+ "grad_norm": 0.4808323383331299,
+ "learning_rate": 3.361835371655578e-05,
+ "loss": 0.6084,
+ "step": 2191
+ },
+ {
+ "epoch": 2.192616673439405,
+ "grad_norm": 0.4287136495113373,
+ "learning_rate": 3.353999236844436e-05,
+ "loss": 0.5938,
+ "step": 2192
+ },
+ {
+ "epoch": 2.1936169547685287,
+ "grad_norm": 0.48613372445106506,
+ "learning_rate": 3.3461704047862054e-05,
+ "loss": 0.624,
+ "step": 2193
+ },
+ {
+ "epoch": 2.1946172360976526,
+ "grad_norm": 0.5133928060531616,
+ "learning_rate": 3.33834888408336e-05,
+ "loss": 0.5899,
+ "step": 2194
+ },
+ {
+ "epoch": 2.1956175174267765,
+ "grad_norm": 0.5271064043045044,
+ "learning_rate": 3.3305346833303296e-05,
+ "loss": 0.7295,
+ "step": 2195
+ },
+ {
+ "epoch": 2.1966177987559,
+ "grad_norm": 0.5942690968513489,
+ "learning_rate": 3.322727811113516e-05,
+ "loss": 0.7228,
+ "step": 2196
+ },
+ {
+ "epoch": 2.197618080085024,
+ "grad_norm": 0.47183600068092346,
+ "learning_rate": 3.314928276011251e-05,
+ "loss": 0.6717,
+ "step": 2197
+ },
+ {
+ "epoch": 2.1986183614141477,
+ "grad_norm": 0.4545646011829376,
+ "learning_rate": 3.307136086593821e-05,
+ "loss": 0.5611,
+ "step": 2198
+ },
+ {
+ "epoch": 2.1996186427432716,
+ "grad_norm": 0.4944184422492981,
+ "learning_rate": 3.299351251423426e-05,
+ "loss": 0.551,
+ "step": 2199
+ },
+ {
+ "epoch": 2.2006189240723955,
+ "grad_norm": 0.4972105026245117,
+ "learning_rate": 3.291573779054199e-05,
+ "loss": 0.7719,
+ "step": 2200
+ },
+ {
+ "epoch": 2.2016192054015193,
+ "grad_norm": 0.5225645899772644,
+ "learning_rate": 3.2838036780321715e-05,
+ "loss": 0.7034,
+ "step": 2201
+ },
+ {
+ "epoch": 2.202619486730643,
+ "grad_norm": 0.49897319078445435,
+ "learning_rate": 3.2760409568952766e-05,
+ "loss": 0.6892,
+ "step": 2202
+ },
+ {
+ "epoch": 2.2036197680597667,
+ "grad_norm": 0.4999954402446747,
+ "learning_rate": 3.268285624173347e-05,
+ "loss": 0.6754,
+ "step": 2203
+ },
+ {
+ "epoch": 2.2046200493888906,
+ "grad_norm": 0.4323941469192505,
+ "learning_rate": 3.260537688388086e-05,
+ "loss": 0.5694,
+ "step": 2204
+ },
+ {
+ "epoch": 2.2056203307180144,
+ "grad_norm": 0.5175321102142334,
+ "learning_rate": 3.252797158053077e-05,
+ "loss": 0.6986,
+ "step": 2205
+ },
+ {
+ "epoch": 2.2066206120471383,
+ "grad_norm": 0.5313690900802612,
+ "learning_rate": 3.24506404167376e-05,
+ "loss": 0.6527,
+ "step": 2206
+ },
+ {
+ "epoch": 2.207620893376262,
+ "grad_norm": 0.5002806186676025,
+ "learning_rate": 3.2373383477474354e-05,
+ "loss": 0.5748,
+ "step": 2207
+ },
+ {
+ "epoch": 2.208621174705386,
+ "grad_norm": 0.5108035802841187,
+ "learning_rate": 3.229620084763237e-05,
+ "loss": 0.6255,
+ "step": 2208
+ },
+ {
+ "epoch": 2.2096214560345095,
+ "grad_norm": 0.5970383882522583,
+ "learning_rate": 3.221909261202146e-05,
+ "loss": 0.7238,
+ "step": 2209
+ },
+ {
+ "epoch": 2.2106217373636334,
+ "grad_norm": 0.5481739044189453,
+ "learning_rate": 3.214205885536965e-05,
+ "loss": 0.6904,
+ "step": 2210
+ },
+ {
+ "epoch": 2.2116220186927573,
+ "grad_norm": 0.4750816226005554,
+ "learning_rate": 3.2065099662323017e-05,
+ "loss": 0.5531,
+ "step": 2211
+ },
+ {
+ "epoch": 2.212622300021881,
+ "grad_norm": 0.4694627523422241,
+ "learning_rate": 3.1988215117445896e-05,
+ "loss": 0.503,
+ "step": 2212
+ },
+ {
+ "epoch": 2.213622581351005,
+ "grad_norm": 0.5215654373168945,
+ "learning_rate": 3.191140530522041e-05,
+ "loss": 0.5861,
+ "step": 2213
+ },
+ {
+ "epoch": 2.214622862680129,
+ "grad_norm": 0.49040964245796204,
+ "learning_rate": 3.1834670310046734e-05,
+ "loss": 0.6475,
+ "step": 2214
+ },
+ {
+ "epoch": 2.2156231440092524,
+ "grad_norm": 0.4749949276447296,
+ "learning_rate": 3.1758010216242664e-05,
+ "loss": 0.5104,
+ "step": 2215
+ },
+ {
+ "epoch": 2.2166234253383763,
+ "grad_norm": 0.431478887796402,
+ "learning_rate": 3.168142510804386e-05,
+ "loss": 0.6221,
+ "step": 2216
+ },
+ {
+ "epoch": 2.2176237066675,
+ "grad_norm": 0.5049036741256714,
+ "learning_rate": 3.1604915069603436e-05,
+ "loss": 0.7063,
+ "step": 2217
+ },
+ {
+ "epoch": 2.218623987996624,
+ "grad_norm": 0.5182607173919678,
+ "learning_rate": 3.152848018499215e-05,
+ "loss": 0.5814,
+ "step": 2218
+ },
+ {
+ "epoch": 2.219624269325748,
+ "grad_norm": 0.4258774518966675,
+ "learning_rate": 3.145212053819806e-05,
+ "loss": 0.5629,
+ "step": 2219
+ },
+ {
+ "epoch": 2.220624550654872,
+ "grad_norm": 0.48201316595077515,
+ "learning_rate": 3.137583621312665e-05,
+ "loss": 0.7363,
+ "step": 2220
+ },
+ {
+ "epoch": 2.2216248319839953,
+ "grad_norm": 0.45533907413482666,
+ "learning_rate": 3.1299627293600595e-05,
+ "loss": 0.6155,
+ "step": 2221
+ },
+ {
+ "epoch": 2.222625113313119,
+ "grad_norm": 0.8064365983009338,
+ "learning_rate": 3.122349386335964e-05,
+ "loss": 0.6509,
+ "step": 2222
+ },
+ {
+ "epoch": 2.223625394642243,
+ "grad_norm": 0.4432089924812317,
+ "learning_rate": 3.114743600606078e-05,
+ "loss": 0.5431,
+ "step": 2223
+ },
+ {
+ "epoch": 2.224625675971367,
+ "grad_norm": 0.47190824151039124,
+ "learning_rate": 3.107145380527776e-05,
+ "loss": 0.6119,
+ "step": 2224
+ },
+ {
+ "epoch": 2.225625957300491,
+ "grad_norm": 0.4532092213630676,
+ "learning_rate": 3.099554734450133e-05,
+ "loss": 0.538,
+ "step": 2225
+ },
+ {
+ "epoch": 2.2266262386296147,
+ "grad_norm": 0.4889605641365051,
+ "learning_rate": 3.091971670713889e-05,
+ "loss": 0.6446,
+ "step": 2226
+ },
+ {
+ "epoch": 2.227626519958738,
+ "grad_norm": 0.5159114003181458,
+ "learning_rate": 3.084396197651468e-05,
+ "loss": 0.6148,
+ "step": 2227
+ },
+ {
+ "epoch": 2.228626801287862,
+ "grad_norm": 0.5456231236457825,
+ "learning_rate": 3.076828323586941e-05,
+ "loss": 0.6804,
+ "step": 2228
+ },
+ {
+ "epoch": 2.229627082616986,
+ "grad_norm": 0.5233959555625916,
+ "learning_rate": 3.06926805683603e-05,
+ "loss": 0.5888,
+ "step": 2229
+ },
+ {
+ "epoch": 2.23062736394611,
+ "grad_norm": 0.5444768071174622,
+ "learning_rate": 3.061715405706106e-05,
+ "loss": 0.7607,
+ "step": 2230
+ },
+ {
+ "epoch": 2.2316276452752337,
+ "grad_norm": 0.5582504272460938,
+ "learning_rate": 3.0541703784961615e-05,
+ "loss": 0.613,
+ "step": 2231
+ },
+ {
+ "epoch": 2.2326279266043576,
+ "grad_norm": 0.5025148987770081,
+ "learning_rate": 3.0466329834968233e-05,
+ "loss": 0.6876,
+ "step": 2232
+ },
+ {
+ "epoch": 2.2336282079334815,
+ "grad_norm": 0.5344957709312439,
+ "learning_rate": 3.0391032289903188e-05,
+ "loss": 0.7175,
+ "step": 2233
+ },
+ {
+ "epoch": 2.234628489262605,
+ "grad_norm": 0.4237043559551239,
+ "learning_rate": 3.0315811232504922e-05,
+ "loss": 0.5648,
+ "step": 2234
+ },
+ {
+ "epoch": 2.235628770591729,
+ "grad_norm": 0.4444836378097534,
+ "learning_rate": 3.0240666745427713e-05,
+ "loss": 0.494,
+ "step": 2235
+ },
+ {
+ "epoch": 2.2366290519208527,
+ "grad_norm": 0.46955639123916626,
+ "learning_rate": 3.0165598911241832e-05,
+ "loss": 0.4465,
+ "step": 2236
+ },
+ {
+ "epoch": 2.2376293332499766,
+ "grad_norm": 0.49513357877731323,
+ "learning_rate": 3.009060781243319e-05,
+ "loss": 0.6519,
+ "step": 2237
+ },
+ {
+ "epoch": 2.2386296145791005,
+ "grad_norm": 0.4216475784778595,
+ "learning_rate": 3.0015693531403465e-05,
+ "loss": 0.5114,
+ "step": 2238
+ },
+ {
+ "epoch": 2.2396298959082244,
+ "grad_norm": 0.5368056297302246,
+ "learning_rate": 2.994085615046993e-05,
+ "loss": 0.659,
+ "step": 2239
+ },
+ {
+ "epoch": 2.240630177237348,
+ "grad_norm": 0.4923858642578125,
+ "learning_rate": 2.9866095751865297e-05,
+ "loss": 0.7238,
+ "step": 2240
+ },
+ {
+ "epoch": 2.2416304585664717,
+ "grad_norm": 0.5030984282493591,
+ "learning_rate": 2.979141241773775e-05,
+ "loss": 0.6715,
+ "step": 2241
+ },
+ {
+ "epoch": 2.2426307398955956,
+ "grad_norm": 0.5050022006034851,
+ "learning_rate": 2.971680623015074e-05,
+ "loss": 0.5918,
+ "step": 2242
+ },
+ {
+ "epoch": 2.2436310212247195,
+ "grad_norm": 0.5575593709945679,
+ "learning_rate": 2.9642277271083008e-05,
+ "loss": 0.5425,
+ "step": 2243
+ },
+ {
+ "epoch": 2.2446313025538434,
+ "grad_norm": 0.4873676300048828,
+ "learning_rate": 2.9567825622428358e-05,
+ "loss": 0.6573,
+ "step": 2244
+ },
+ {
+ "epoch": 2.2456315838829672,
+ "grad_norm": 0.4244104325771332,
+ "learning_rate": 2.9493451365995737e-05,
+ "loss": 0.619,
+ "step": 2245
+ },
+ {
+ "epoch": 2.246631865212091,
+ "grad_norm": 0.5072455406188965,
+ "learning_rate": 2.9419154583508978e-05,
+ "loss": 0.6958,
+ "step": 2246
+ },
+ {
+ "epoch": 2.2476321465412146,
+ "grad_norm": 0.4528377950191498,
+ "learning_rate": 2.9344935356606773e-05,
+ "loss": 0.5586,
+ "step": 2247
+ },
+ {
+ "epoch": 2.2486324278703385,
+ "grad_norm": 0.46183013916015625,
+ "learning_rate": 2.9270793766842697e-05,
+ "loss": 0.5195,
+ "step": 2248
+ },
+ {
+ "epoch": 2.2496327091994623,
+ "grad_norm": 0.5115411281585693,
+ "learning_rate": 2.9196729895684884e-05,
+ "loss": 0.6447,
+ "step": 2249
+ },
+ {
+ "epoch": 2.2506329905285862,
+ "grad_norm": 0.44066107273101807,
+ "learning_rate": 2.9122743824516195e-05,
+ "loss": 0.5917,
+ "step": 2250
+ },
+ {
+ "epoch": 2.25163327185771,
+ "grad_norm": 0.4783106744289398,
+ "learning_rate": 2.9048835634633887e-05,
+ "loss": 0.5601,
+ "step": 2251
+ },
+ {
+ "epoch": 2.2526335531868336,
+ "grad_norm": 0.46325576305389404,
+ "learning_rate": 2.897500540724972e-05,
+ "loss": 0.6595,
+ "step": 2252
+ },
+ {
+ "epoch": 2.2536338345159574,
+ "grad_norm": 0.4388025403022766,
+ "learning_rate": 2.8901253223489754e-05,
+ "loss": 0.5466,
+ "step": 2253
+ },
+ {
+ "epoch": 2.2546341158450813,
+ "grad_norm": 0.5207952857017517,
+ "learning_rate": 2.8827579164394347e-05,
+ "loss": 0.7255,
+ "step": 2254
+ },
+ {
+ "epoch": 2.255634397174205,
+ "grad_norm": 0.5066066384315491,
+ "learning_rate": 2.875398331091792e-05,
+ "loss": 0.6495,
+ "step": 2255
+ },
+ {
+ "epoch": 2.256634678503329,
+ "grad_norm": 0.577724277973175,
+ "learning_rate": 2.8680465743928985e-05,
+ "loss": 0.6658,
+ "step": 2256
+ },
+ {
+ "epoch": 2.257634959832453,
+ "grad_norm": 0.4669063985347748,
+ "learning_rate": 2.8607026544210114e-05,
+ "loss": 0.582,
+ "step": 2257
+ },
+ {
+ "epoch": 2.258635241161577,
+ "grad_norm": 0.4934767186641693,
+ "learning_rate": 2.8533665792457644e-05,
+ "loss": 0.7237,
+ "step": 2258
+ },
+ {
+ "epoch": 2.2596355224907003,
+ "grad_norm": 0.44358426332473755,
+ "learning_rate": 2.8460383569281824e-05,
+ "loss": 0.5016,
+ "step": 2259
+ },
+ {
+ "epoch": 2.260635803819824,
+ "grad_norm": 0.5185582637786865,
+ "learning_rate": 2.8387179955206523e-05,
+ "loss": 0.6666,
+ "step": 2260
+ },
+ {
+ "epoch": 2.261636085148948,
+ "grad_norm": 0.5082037448883057,
+ "learning_rate": 2.831405503066932e-05,
+ "loss": 0.7377,
+ "step": 2261
+ },
+ {
+ "epoch": 2.262636366478072,
+ "grad_norm": 0.47630825638771057,
+ "learning_rate": 2.8241008876021215e-05,
+ "loss": 0.5947,
+ "step": 2262
+ },
+ {
+ "epoch": 2.263636647807196,
+ "grad_norm": 0.5042298436164856,
+ "learning_rate": 2.8168041571526805e-05,
+ "loss": 0.6501,
+ "step": 2263
+ },
+ {
+ "epoch": 2.2646369291363198,
+ "grad_norm": 0.4552183747291565,
+ "learning_rate": 2.8095153197363887e-05,
+ "loss": 0.5852,
+ "step": 2264
+ },
+ {
+ "epoch": 2.265637210465443,
+ "grad_norm": 0.4342525005340576,
+ "learning_rate": 2.8022343833623666e-05,
+ "loss": 0.5362,
+ "step": 2265
+ },
+ {
+ "epoch": 2.266637491794567,
+ "grad_norm": 0.4309101700782776,
+ "learning_rate": 2.7949613560310438e-05,
+ "loss": 0.4905,
+ "step": 2266
+ },
+ {
+ "epoch": 2.267637773123691,
+ "grad_norm": 0.5703599452972412,
+ "learning_rate": 2.787696245734155e-05,
+ "loss": 0.6974,
+ "step": 2267
+ },
+ {
+ "epoch": 2.268638054452815,
+ "grad_norm": 0.5007729530334473,
+ "learning_rate": 2.7804390604547557e-05,
+ "loss": 0.6958,
+ "step": 2268
+ },
+ {
+ "epoch": 2.2696383357819387,
+ "grad_norm": 0.47054824233055115,
+ "learning_rate": 2.7731898081671702e-05,
+ "loss": 0.5988,
+ "step": 2269
+ },
+ {
+ "epoch": 2.2706386171110626,
+ "grad_norm": 0.4500153362751007,
+ "learning_rate": 2.765948496837022e-05,
+ "loss": 0.6857,
+ "step": 2270
+ },
+ {
+ "epoch": 2.2716388984401865,
+ "grad_norm": 0.5590565204620361,
+ "learning_rate": 2.758715134421197e-05,
+ "loss": 0.6839,
+ "step": 2271
+ },
+ {
+ "epoch": 2.27263917976931,
+ "grad_norm": 0.486512690782547,
+ "learning_rate": 2.7514897288678578e-05,
+ "loss": 0.6154,
+ "step": 2272
+ },
+ {
+ "epoch": 2.273639461098434,
+ "grad_norm": 0.48422694206237793,
+ "learning_rate": 2.744272288116416e-05,
+ "loss": 0.6642,
+ "step": 2273
+ },
+ {
+ "epoch": 2.2746397424275577,
+ "grad_norm": 0.4691951870918274,
+ "learning_rate": 2.7370628200975302e-05,
+ "loss": 0.6612,
+ "step": 2274
+ },
+ {
+ "epoch": 2.2756400237566816,
+ "grad_norm": 0.4122920036315918,
+ "learning_rate": 2.729861332733108e-05,
+ "loss": 0.491,
+ "step": 2275
+ },
+ {
+ "epoch": 2.2766403050858055,
+ "grad_norm": 0.4303779602050781,
+ "learning_rate": 2.7226678339362755e-05,
+ "loss": 0.5108,
+ "step": 2276
+ },
+ {
+ "epoch": 2.2776405864149294,
+ "grad_norm": 0.45343050360679626,
+ "learning_rate": 2.7154823316113932e-05,
+ "loss": 0.5594,
+ "step": 2277
+ },
+ {
+ "epoch": 2.278640867744053,
+ "grad_norm": 0.4414820969104767,
+ "learning_rate": 2.708304833654023e-05,
+ "loss": 0.563,
+ "step": 2278
+ },
+ {
+ "epoch": 2.2796411490731767,
+ "grad_norm": 0.45219919085502625,
+ "learning_rate": 2.7011353479509426e-05,
+ "loss": 0.5469,
+ "step": 2279
+ },
+ {
+ "epoch": 2.2806414304023006,
+ "grad_norm": 0.4790736734867096,
+ "learning_rate": 2.693973882380114e-05,
+ "loss": 0.7291,
+ "step": 2280
+ },
+ {
+ "epoch": 2.2816417117314245,
+ "grad_norm": 0.4839097261428833,
+ "learning_rate": 2.686820444810696e-05,
+ "loss": 0.4779,
+ "step": 2281
+ },
+ {
+ "epoch": 2.2826419930605484,
+ "grad_norm": 0.4934631586074829,
+ "learning_rate": 2.679675043103026e-05,
+ "loss": 0.6304,
+ "step": 2282
+ },
+ {
+ "epoch": 2.2836422743896723,
+ "grad_norm": 0.5528481006622314,
+ "learning_rate": 2.6725376851086025e-05,
+ "loss": 0.7422,
+ "step": 2283
+ },
+ {
+ "epoch": 2.284642555718796,
+ "grad_norm": 0.45739200711250305,
+ "learning_rate": 2.6654083786700955e-05,
+ "loss": 0.5069,
+ "step": 2284
+ },
+ {
+ "epoch": 2.2856428370479196,
+ "grad_norm": 0.4885886013507843,
+ "learning_rate": 2.6582871316213198e-05,
+ "loss": 0.6197,
+ "step": 2285
+ },
+ {
+ "epoch": 2.2866431183770435,
+ "grad_norm": 0.518183171749115,
+ "learning_rate": 2.6511739517872426e-05,
+ "loss": 0.5722,
+ "step": 2286
+ },
+ {
+ "epoch": 2.2876433997061674,
+ "grad_norm": 0.4652218520641327,
+ "learning_rate": 2.644068846983956e-05,
+ "loss": 0.5164,
+ "step": 2287
+ },
+ {
+ "epoch": 2.2886436810352913,
+ "grad_norm": 0.48020297288894653,
+ "learning_rate": 2.6369718250186914e-05,
+ "loss": 0.6531,
+ "step": 2288
+ },
+ {
+ "epoch": 2.289643962364415,
+ "grad_norm": 0.386658251285553,
+ "learning_rate": 2.6298828936897867e-05,
+ "loss": 0.524,
+ "step": 2289
+ },
+ {
+ "epoch": 2.2906442436935386,
+ "grad_norm": 0.5391028523445129,
+ "learning_rate": 2.622802060786702e-05,
+ "loss": 0.741,
+ "step": 2290
+ },
+ {
+ "epoch": 2.2916445250226625,
+ "grad_norm": 0.44360673427581787,
+ "learning_rate": 2.6157293340899857e-05,
+ "loss": 0.5736,
+ "step": 2291
+ },
+ {
+ "epoch": 2.2926448063517864,
+ "grad_norm": 0.4551480710506439,
+ "learning_rate": 2.60866472137129e-05,
+ "loss": 0.5947,
+ "step": 2292
+ },
+ {
+ "epoch": 2.2936450876809102,
+ "grad_norm": 0.4541544020175934,
+ "learning_rate": 2.6016082303933454e-05,
+ "loss": 0.5172,
+ "step": 2293
+ },
+ {
+ "epoch": 2.294645369010034,
+ "grad_norm": 0.5024133920669556,
+ "learning_rate": 2.594559868909956e-05,
+ "loss": 0.6832,
+ "step": 2294
+ },
+ {
+ "epoch": 2.295645650339158,
+ "grad_norm": 0.5044113993644714,
+ "learning_rate": 2.587519644666001e-05,
+ "loss": 0.5062,
+ "step": 2295
+ },
+ {
+ "epoch": 2.296645931668282,
+ "grad_norm": 0.5235409736633301,
+ "learning_rate": 2.580487565397406e-05,
+ "loss": 0.6371,
+ "step": 2296
+ },
+ {
+ "epoch": 2.2976462129974053,
+ "grad_norm": 0.5046529769897461,
+ "learning_rate": 2.573463638831166e-05,
+ "loss": 0.5736,
+ "step": 2297
+ },
+ {
+ "epoch": 2.2986464943265292,
+ "grad_norm": 0.44103822112083435,
+ "learning_rate": 2.566447872685298e-05,
+ "loss": 0.6557,
+ "step": 2298
+ },
+ {
+ "epoch": 2.299646775655653,
+ "grad_norm": 0.5476238131523132,
+ "learning_rate": 2.559440274668864e-05,
+ "loss": 0.7338,
+ "step": 2299
+ },
+ {
+ "epoch": 2.300647056984777,
+ "grad_norm": 0.5168704986572266,
+ "learning_rate": 2.5524408524819453e-05,
+ "loss": 0.6034,
+ "step": 2300
+ },
+ {
+ "epoch": 2.301647338313901,
+ "grad_norm": 0.4194817543029785,
+ "learning_rate": 2.545449613815639e-05,
+ "loss": 0.56,
+ "step": 2301
+ },
+ {
+ "epoch": 2.302647619643025,
+ "grad_norm": 0.43994396924972534,
+ "learning_rate": 2.5384665663520558e-05,
+ "loss": 0.6744,
+ "step": 2302
+ },
+ {
+ "epoch": 2.303647900972148,
+ "grad_norm": 0.4471636116504669,
+ "learning_rate": 2.5314917177642972e-05,
+ "loss": 0.703,
+ "step": 2303
+ },
+ {
+ "epoch": 2.304648182301272,
+ "grad_norm": 0.48795682191848755,
+ "learning_rate": 2.5245250757164663e-05,
+ "loss": 0.7916,
+ "step": 2304
+ },
+ {
+ "epoch": 2.305648463630396,
+ "grad_norm": 0.5703234076499939,
+ "learning_rate": 2.5175666478636374e-05,
+ "loss": 0.7299,
+ "step": 2305
+ },
+ {
+ "epoch": 2.30664874495952,
+ "grad_norm": 0.532819926738739,
+ "learning_rate": 2.5106164418518686e-05,
+ "loss": 0.7058,
+ "step": 2306
+ },
+ {
+ "epoch": 2.3076490262886438,
+ "grad_norm": 0.4922640919685364,
+ "learning_rate": 2.5036744653181753e-05,
+ "loss": 0.708,
+ "step": 2307
+ },
+ {
+ "epoch": 2.3086493076177677,
+ "grad_norm": 0.4729764461517334,
+ "learning_rate": 2.4967407258905385e-05,
+ "loss": 0.6014,
+ "step": 2308
+ },
+ {
+ "epoch": 2.3096495889468915,
+ "grad_norm": 0.48904159665107727,
+ "learning_rate": 2.48981523118788e-05,
+ "loss": 0.6238,
+ "step": 2309
+ },
+ {
+ "epoch": 2.310649870276015,
+ "grad_norm": 0.49575427174568176,
+ "learning_rate": 2.4828979888200698e-05,
+ "loss": 0.7582,
+ "step": 2310
+ },
+ {
+ "epoch": 2.311650151605139,
+ "grad_norm": 0.5098155736923218,
+ "learning_rate": 2.475989006387901e-05,
+ "loss": 0.6547,
+ "step": 2311
+ },
+ {
+ "epoch": 2.3126504329342628,
+ "grad_norm": 0.45581546425819397,
+ "learning_rate": 2.4690882914831004e-05,
+ "loss": 0.6139,
+ "step": 2312
+ },
+ {
+ "epoch": 2.3136507142633866,
+ "grad_norm": 0.5907835960388184,
+ "learning_rate": 2.462195851688306e-05,
+ "loss": 0.5946,
+ "step": 2313
+ },
+ {
+ "epoch": 2.3146509955925105,
+ "grad_norm": 0.46699121594429016,
+ "learning_rate": 2.4553116945770583e-05,
+ "loss": 0.5983,
+ "step": 2314
+ },
+ {
+ "epoch": 2.315651276921634,
+ "grad_norm": 0.47327184677124023,
+ "learning_rate": 2.4484358277138065e-05,
+ "loss": 0.6508,
+ "step": 2315
+ },
+ {
+ "epoch": 2.316651558250758,
+ "grad_norm": 0.4823262095451355,
+ "learning_rate": 2.441568258653879e-05,
+ "loss": 0.6737,
+ "step": 2316
+ },
+ {
+ "epoch": 2.3176518395798817,
+ "grad_norm": 0.49834492802619934,
+ "learning_rate": 2.4347089949434988e-05,
+ "loss": 0.6298,
+ "step": 2317
+ },
+ {
+ "epoch": 2.3186521209090056,
+ "grad_norm": 0.49446576833724976,
+ "learning_rate": 2.4278580441197484e-05,
+ "loss": 0.659,
+ "step": 2318
+ },
+ {
+ "epoch": 2.3196524022381295,
+ "grad_norm": 0.4349921643733978,
+ "learning_rate": 2.421015413710591e-05,
+ "loss": 0.6368,
+ "step": 2319
+ },
+ {
+ "epoch": 2.3206526835672534,
+ "grad_norm": 0.6123060584068298,
+ "learning_rate": 2.4141811112348377e-05,
+ "loss": 0.6668,
+ "step": 2320
+ },
+ {
+ "epoch": 2.3216529648963773,
+ "grad_norm": 0.46083199977874756,
+ "learning_rate": 2.407355144202147e-05,
+ "loss": 0.6067,
+ "step": 2321
+ },
+ {
+ "epoch": 2.3226532462255007,
+ "grad_norm": 0.5320808291435242,
+ "learning_rate": 2.4005375201130274e-05,
+ "loss": 0.5295,
+ "step": 2322
+ },
+ {
+ "epoch": 2.3236535275546246,
+ "grad_norm": 0.616462767124176,
+ "learning_rate": 2.3937282464588108e-05,
+ "loss": 0.8035,
+ "step": 2323
+ },
+ {
+ "epoch": 2.3246538088837485,
+ "grad_norm": 0.5211688280105591,
+ "learning_rate": 2.3869273307216612e-05,
+ "loss": 0.7214,
+ "step": 2324
+ },
+ {
+ "epoch": 2.3256540902128724,
+ "grad_norm": 0.494314044713974,
+ "learning_rate": 2.3801347803745512e-05,
+ "loss": 0.7419,
+ "step": 2325
+ },
+ {
+ "epoch": 2.3266543715419963,
+ "grad_norm": 0.5524937510490417,
+ "learning_rate": 2.3733506028812658e-05,
+ "loss": 0.7203,
+ "step": 2326
+ },
+ {
+ "epoch": 2.32765465287112,
+ "grad_norm": 0.5332032442092896,
+ "learning_rate": 2.3665748056963956e-05,
+ "loss": 0.6084,
+ "step": 2327
+ },
+ {
+ "epoch": 2.3286549342002436,
+ "grad_norm": 0.49110063910484314,
+ "learning_rate": 2.3598073962653066e-05,
+ "loss": 0.5949,
+ "step": 2328
+ },
+ {
+ "epoch": 2.3296552155293675,
+ "grad_norm": 0.41060465574264526,
+ "learning_rate": 2.3530483820241656e-05,
+ "loss": 0.5049,
+ "step": 2329
+ },
+ {
+ "epoch": 2.3306554968584914,
+ "grad_norm": 0.4867851436138153,
+ "learning_rate": 2.3462977703999023e-05,
+ "loss": 0.6684,
+ "step": 2330
+ },
+ {
+ "epoch": 2.3316557781876153,
+ "grad_norm": 0.4757525622844696,
+ "learning_rate": 2.339555568810221e-05,
+ "loss": 0.4925,
+ "step": 2331
+ },
+ {
+ "epoch": 2.332656059516739,
+ "grad_norm": 0.4445713758468628,
+ "learning_rate": 2.332821784663578e-05,
+ "loss": 0.4718,
+ "step": 2332
+ },
+ {
+ "epoch": 2.333656340845863,
+ "grad_norm": 0.48554399609565735,
+ "learning_rate": 2.3260964253591898e-05,
+ "loss": 0.574,
+ "step": 2333
+ },
+ {
+ "epoch": 2.334656622174987,
+ "grad_norm": 0.5004045963287354,
+ "learning_rate": 2.3193794982870044e-05,
+ "loss": 0.5835,
+ "step": 2334
+ },
+ {
+ "epoch": 2.3356569035041104,
+ "grad_norm": 0.511870801448822,
+ "learning_rate": 2.312671010827715e-05,
+ "loss": 0.5677,
+ "step": 2335
+ },
+ {
+ "epoch": 2.3366571848332343,
+ "grad_norm": 0.4409622251987457,
+ "learning_rate": 2.30597097035273e-05,
+ "loss": 0.6213,
+ "step": 2336
+ },
+ {
+ "epoch": 2.337657466162358,
+ "grad_norm": 0.5014410614967346,
+ "learning_rate": 2.29927938422419e-05,
+ "loss": 0.5394,
+ "step": 2337
+ },
+ {
+ "epoch": 2.338657747491482,
+ "grad_norm": 0.572220504283905,
+ "learning_rate": 2.2925962597949302e-05,
+ "loss": 0.7122,
+ "step": 2338
+ },
+ {
+ "epoch": 2.339658028820606,
+ "grad_norm": 0.5024709105491638,
+ "learning_rate": 2.285921604408502e-05,
+ "loss": 0.6339,
+ "step": 2339
+ },
+ {
+ "epoch": 2.34065831014973,
+ "grad_norm": 0.5063747763633728,
+ "learning_rate": 2.2792554253991415e-05,
+ "loss": 0.6029,
+ "step": 2340
+ },
+ {
+ "epoch": 2.3416585914788532,
+ "grad_norm": 0.44128766655921936,
+ "learning_rate": 2.272597730091769e-05,
+ "loss": 0.522,
+ "step": 2341
+ },
+ {
+ "epoch": 2.342658872807977,
+ "grad_norm": 0.45375195145606995,
+ "learning_rate": 2.2659485258019976e-05,
+ "loss": 0.605,
+ "step": 2342
+ },
+ {
+ "epoch": 2.343659154137101,
+ "grad_norm": 0.4744661748409271,
+ "learning_rate": 2.259307819836093e-05,
+ "loss": 0.6479,
+ "step": 2343
+ },
+ {
+ "epoch": 2.344659435466225,
+ "grad_norm": 0.49682337045669556,
+ "learning_rate": 2.252675619490996e-05,
+ "loss": 0.6434,
+ "step": 2344
+ },
+ {
+ "epoch": 2.345659716795349,
+ "grad_norm": 0.5090720653533936,
+ "learning_rate": 2.2460519320542883e-05,
+ "loss": 0.6712,
+ "step": 2345
+ },
+ {
+ "epoch": 2.3466599981244727,
+ "grad_norm": 0.4929216206073761,
+ "learning_rate": 2.2394367648042102e-05,
+ "loss": 0.6301,
+ "step": 2346
+ },
+ {
+ "epoch": 2.3476602794535966,
+ "grad_norm": 0.45321589708328247,
+ "learning_rate": 2.2328301250096327e-05,
+ "loss": 0.5003,
+ "step": 2347
+ },
+ {
+ "epoch": 2.34866056078272,
+ "grad_norm": 0.5079351663589478,
+ "learning_rate": 2.2262320199300557e-05,
+ "loss": 0.5639,
+ "step": 2348
+ },
+ {
+ "epoch": 2.349660842111844,
+ "grad_norm": 0.459007203578949,
+ "learning_rate": 2.2196424568156073e-05,
+ "loss": 0.5999,
+ "step": 2349
+ },
+ {
+ "epoch": 2.350661123440968,
+ "grad_norm": 0.44086092710494995,
+ "learning_rate": 2.2130614429070207e-05,
+ "loss": 0.5651,
+ "step": 2350
+ },
+ {
+ "epoch": 2.3516614047700917,
+ "grad_norm": 0.4631021320819855,
+ "learning_rate": 2.206488985435645e-05,
+ "loss": 0.5437,
+ "step": 2351
+ },
+ {
+ "epoch": 2.3526616860992156,
+ "grad_norm": 0.5214501619338989,
+ "learning_rate": 2.199925091623418e-05,
+ "loss": 0.712,
+ "step": 2352
+ },
+ {
+ "epoch": 2.353661967428339,
+ "grad_norm": 0.4480469524860382,
+ "learning_rate": 2.193369768682877e-05,
+ "loss": 0.559,
+ "step": 2353
+ },
+ {
+ "epoch": 2.354662248757463,
+ "grad_norm": 0.48840999603271484,
+ "learning_rate": 2.1868230238171293e-05,
+ "loss": 0.5665,
+ "step": 2354
+ },
+ {
+ "epoch": 2.3556625300865868,
+ "grad_norm": 0.48083189129829407,
+ "learning_rate": 2.1802848642198692e-05,
+ "loss": 0.6433,
+ "step": 2355
+ },
+ {
+ "epoch": 2.3566628114157107,
+ "grad_norm": 0.5179978609085083,
+ "learning_rate": 2.1737552970753526e-05,
+ "loss": 0.6703,
+ "step": 2356
+ },
+ {
+ "epoch": 2.3576630927448345,
+ "grad_norm": 0.45892852544784546,
+ "learning_rate": 2.1672343295583873e-05,
+ "loss": 0.5417,
+ "step": 2357
+ },
+ {
+ "epoch": 2.3586633740739584,
+ "grad_norm": 0.5100318789482117,
+ "learning_rate": 2.160721968834344e-05,
+ "loss": 0.6418,
+ "step": 2358
+ },
+ {
+ "epoch": 2.3596636554030823,
+ "grad_norm": 0.5485228300094604,
+ "learning_rate": 2.154218222059122e-05,
+ "loss": 0.5995,
+ "step": 2359
+ },
+ {
+ "epoch": 2.3606639367322058,
+ "grad_norm": 0.4661252200603485,
+ "learning_rate": 2.1477230963791706e-05,
+ "loss": 0.5304,
+ "step": 2360
+ },
+ {
+ "epoch": 2.3616642180613296,
+ "grad_norm": 0.45510804653167725,
+ "learning_rate": 2.141236598931451e-05,
+ "loss": 0.5799,
+ "step": 2361
+ },
+ {
+ "epoch": 2.3626644993904535,
+ "grad_norm": 0.5123688578605652,
+ "learning_rate": 2.1347587368434575e-05,
+ "loss": 0.6454,
+ "step": 2362
+ },
+ {
+ "epoch": 2.3636647807195774,
+ "grad_norm": 0.4892440736293793,
+ "learning_rate": 2.1282895172331817e-05,
+ "loss": 0.6505,
+ "step": 2363
+ },
+ {
+ "epoch": 2.3646650620487013,
+ "grad_norm": 0.44063228368759155,
+ "learning_rate": 2.1218289472091336e-05,
+ "loss": 0.4452,
+ "step": 2364
+ },
+ {
+ "epoch": 2.365665343377825,
+ "grad_norm": 0.48182591795921326,
+ "learning_rate": 2.115377033870305e-05,
+ "loss": 0.5295,
+ "step": 2365
+ },
+ {
+ "epoch": 2.3666656247069486,
+ "grad_norm": 0.5330935716629028,
+ "learning_rate": 2.1089337843061863e-05,
+ "loss": 0.7468,
+ "step": 2366
+ },
+ {
+ "epoch": 2.3676659060360725,
+ "grad_norm": 0.4640701413154602,
+ "learning_rate": 2.102499205596743e-05,
+ "loss": 0.5936,
+ "step": 2367
+ },
+ {
+ "epoch": 2.3686661873651964,
+ "grad_norm": 0.40019840002059937,
+ "learning_rate": 2.0960733048124083e-05,
+ "loss": 0.489,
+ "step": 2368
+ },
+ {
+ "epoch": 2.3696664686943203,
+ "grad_norm": 0.5035619139671326,
+ "learning_rate": 2.0896560890140913e-05,
+ "loss": 0.6878,
+ "step": 2369
+ },
+ {
+ "epoch": 2.370666750023444,
+ "grad_norm": 0.46381524205207825,
+ "learning_rate": 2.0832475652531447e-05,
+ "loss": 0.603,
+ "step": 2370
+ },
+ {
+ "epoch": 2.371667031352568,
+ "grad_norm": 0.48167362809181213,
+ "learning_rate": 2.076847740571387e-05,
+ "loss": 0.4978,
+ "step": 2371
+ },
+ {
+ "epoch": 2.372667312681692,
+ "grad_norm": 0.5534481406211853,
+ "learning_rate": 2.070456622001059e-05,
+ "loss": 0.5736,
+ "step": 2372
+ },
+ {
+ "epoch": 2.3736675940108154,
+ "grad_norm": 0.47841575741767883,
+ "learning_rate": 2.064074216564852e-05,
+ "loss": 0.6201,
+ "step": 2373
+ },
+ {
+ "epoch": 2.3746678753399393,
+ "grad_norm": 0.5568225383758545,
+ "learning_rate": 2.0577005312758703e-05,
+ "loss": 0.7379,
+ "step": 2374
+ },
+ {
+ "epoch": 2.375668156669063,
+ "grad_norm": 0.5010125637054443,
+ "learning_rate": 2.0513355731376395e-05,
+ "loss": 0.6665,
+ "step": 2375
+ },
+ {
+ "epoch": 2.376668437998187,
+ "grad_norm": 0.5012779831886292,
+ "learning_rate": 2.0449793491441028e-05,
+ "loss": 0.6885,
+ "step": 2376
+ },
+ {
+ "epoch": 2.377668719327311,
+ "grad_norm": 0.4700705409049988,
+ "learning_rate": 2.0386318662795957e-05,
+ "loss": 0.7168,
+ "step": 2377
+ },
+ {
+ "epoch": 2.3786690006564344,
+ "grad_norm": 0.5078738927841187,
+ "learning_rate": 2.0322931315188586e-05,
+ "loss": 0.5534,
+ "step": 2378
+ },
+ {
+ "epoch": 2.3796692819855583,
+ "grad_norm": 0.4546999931335449,
+ "learning_rate": 2.0259631518270105e-05,
+ "loss": 0.6172,
+ "step": 2379
+ },
+ {
+ "epoch": 2.380669563314682,
+ "grad_norm": 0.4790453314781189,
+ "learning_rate": 2.0196419341595595e-05,
+ "loss": 0.5531,
+ "step": 2380
+ },
+ {
+ "epoch": 2.381669844643806,
+ "grad_norm": 0.4022303819656372,
+ "learning_rate": 2.013329485462374e-05,
+ "loss": 0.5742,
+ "step": 2381
+ },
+ {
+ "epoch": 2.38267012597293,
+ "grad_norm": 0.4902719557285309,
+ "learning_rate": 2.0070258126717e-05,
+ "loss": 0.6463,
+ "step": 2382
+ },
+ {
+ "epoch": 2.383670407302054,
+ "grad_norm": 0.4552217721939087,
+ "learning_rate": 2.000730922714128e-05,
+ "loss": 0.5703,
+ "step": 2383
+ },
+ {
+ "epoch": 2.3846706886311777,
+ "grad_norm": 0.5057043433189392,
+ "learning_rate": 1.9944448225066093e-05,
+ "loss": 0.6637,
+ "step": 2384
+ },
+ {
+ "epoch": 2.385670969960301,
+ "grad_norm": 0.5370767712593079,
+ "learning_rate": 1.9881675189564254e-05,
+ "loss": 0.6248,
+ "step": 2385
+ },
+ {
+ "epoch": 2.386671251289425,
+ "grad_norm": 0.5089964866638184,
+ "learning_rate": 1.981899018961202e-05,
+ "loss": 0.5206,
+ "step": 2386
+ },
+ {
+ "epoch": 2.387671532618549,
+ "grad_norm": 0.5276069641113281,
+ "learning_rate": 1.975639329408887e-05,
+ "loss": 0.5773,
+ "step": 2387
+ },
+ {
+ "epoch": 2.388671813947673,
+ "grad_norm": 0.4586690664291382,
+ "learning_rate": 1.9693884571777432e-05,
+ "loss": 0.4942,
+ "step": 2388
+ },
+ {
+ "epoch": 2.3896720952767967,
+ "grad_norm": 0.4275995194911957,
+ "learning_rate": 1.963146409136354e-05,
+ "loss": 0.5222,
+ "step": 2389
+ },
+ {
+ "epoch": 2.3906723766059206,
+ "grad_norm": 0.46232300996780396,
+ "learning_rate": 1.9569131921435956e-05,
+ "loss": 0.5835,
+ "step": 2390
+ },
+ {
+ "epoch": 2.391672657935044,
+ "grad_norm": 0.4702429175376892,
+ "learning_rate": 1.950688813048652e-05,
+ "loss": 0.5547,
+ "step": 2391
+ },
+ {
+ "epoch": 2.392672939264168,
+ "grad_norm": 0.45481425523757935,
+ "learning_rate": 1.944473278690986e-05,
+ "loss": 0.5783,
+ "step": 2392
+ },
+ {
+ "epoch": 2.393673220593292,
+ "grad_norm": 0.4588642120361328,
+ "learning_rate": 1.9382665959003477e-05,
+ "loss": 0.6411,
+ "step": 2393
+ },
+ {
+ "epoch": 2.3946735019224157,
+ "grad_norm": 0.4446251690387726,
+ "learning_rate": 1.93206877149676e-05,
+ "loss": 0.5831,
+ "step": 2394
+ },
+ {
+ "epoch": 2.3956737832515396,
+ "grad_norm": 0.48509830236434937,
+ "learning_rate": 1.9258798122905064e-05,
+ "loss": 0.5913,
+ "step": 2395
+ },
+ {
+ "epoch": 2.3966740645806635,
+ "grad_norm": 0.4828680753707886,
+ "learning_rate": 1.9196997250821392e-05,
+ "loss": 0.6255,
+ "step": 2396
+ },
+ {
+ "epoch": 2.3976743459097873,
+ "grad_norm": 0.5534887909889221,
+ "learning_rate": 1.913528516662452e-05,
+ "loss": 0.7288,
+ "step": 2397
+ },
+ {
+ "epoch": 2.398674627238911,
+ "grad_norm": 0.41673797369003296,
+ "learning_rate": 1.907366193812491e-05,
+ "loss": 0.5078,
+ "step": 2398
+ },
+ {
+ "epoch": 2.3996749085680347,
+ "grad_norm": 0.4955064356327057,
+ "learning_rate": 1.9012127633035305e-05,
+ "loss": 0.5843,
+ "step": 2399
+ },
+ {
+ "epoch": 2.4006751898971586,
+ "grad_norm": 0.6254858374595642,
+ "learning_rate": 1.895068231897079e-05,
+ "loss": 0.6269,
+ "step": 2400
+ },
+ {
+ "epoch": 2.4016754712262824,
+ "grad_norm": 0.5201045870780945,
+ "learning_rate": 1.8889326063448697e-05,
+ "loss": 0.521,
+ "step": 2401
+ },
+ {
+ "epoch": 2.4026757525554063,
+ "grad_norm": 0.49939653277397156,
+ "learning_rate": 1.8828058933888392e-05,
+ "loss": 0.718,
+ "step": 2402
+ },
+ {
+ "epoch": 2.40367603388453,
+ "grad_norm": 0.45866259932518005,
+ "learning_rate": 1.8766880997611424e-05,
+ "loss": 0.6565,
+ "step": 2403
+ },
+ {
+ "epoch": 2.4046763152136537,
+ "grad_norm": 0.5090838670730591,
+ "learning_rate": 1.870579232184122e-05,
+ "loss": 0.5909,
+ "step": 2404
+ },
+ {
+ "epoch": 2.4056765965427775,
+ "grad_norm": 0.4334961175918579,
+ "learning_rate": 1.864479297370325e-05,
+ "loss": 0.4925,
+ "step": 2405
+ },
+ {
+ "epoch": 2.4066768778719014,
+ "grad_norm": 0.5367956757545471,
+ "learning_rate": 1.8583883020224724e-05,
+ "loss": 0.6032,
+ "step": 2406
+ },
+ {
+ "epoch": 2.4076771592010253,
+ "grad_norm": 0.4732288420200348,
+ "learning_rate": 1.8523062528334688e-05,
+ "loss": 0.5782,
+ "step": 2407
+ },
+ {
+ "epoch": 2.408677440530149,
+ "grad_norm": 0.5187519788742065,
+ "learning_rate": 1.8462331564863832e-05,
+ "loss": 0.6937,
+ "step": 2408
+ },
+ {
+ "epoch": 2.409677721859273,
+ "grad_norm": 0.47051140666007996,
+ "learning_rate": 1.8401690196544552e-05,
+ "loss": 0.7286,
+ "step": 2409
+ },
+ {
+ "epoch": 2.410678003188397,
+ "grad_norm": 0.49937713146209717,
+ "learning_rate": 1.834113849001069e-05,
+ "loss": 0.6367,
+ "step": 2410
+ },
+ {
+ "epoch": 2.4116782845175204,
+ "grad_norm": 0.5458667278289795,
+ "learning_rate": 1.8280676511797666e-05,
+ "loss": 0.6091,
+ "step": 2411
+ },
+ {
+ "epoch": 2.4126785658466443,
+ "grad_norm": 0.5090888142585754,
+ "learning_rate": 1.8220304328342252e-05,
+ "loss": 0.659,
+ "step": 2412
+ },
+ {
+ "epoch": 2.413678847175768,
+ "grad_norm": 0.4376786947250366,
+ "learning_rate": 1.8160022005982515e-05,
+ "loss": 0.5746,
+ "step": 2413
+ },
+ {
+ "epoch": 2.414679128504892,
+ "grad_norm": 0.43062934279441833,
+ "learning_rate": 1.8099829610957863e-05,
+ "loss": 0.5639,
+ "step": 2414
+ },
+ {
+ "epoch": 2.415679409834016,
+ "grad_norm": 0.4858124256134033,
+ "learning_rate": 1.8039727209408842e-05,
+ "loss": 0.6354,
+ "step": 2415
+ },
+ {
+ "epoch": 2.4166796911631394,
+ "grad_norm": 0.49024826288223267,
+ "learning_rate": 1.7979714867377152e-05,
+ "loss": 0.5691,
+ "step": 2416
+ },
+ {
+ "epoch": 2.4176799724922633,
+ "grad_norm": 0.593974769115448,
+ "learning_rate": 1.7919792650805455e-05,
+ "loss": 0.621,
+ "step": 2417
+ },
+ {
+ "epoch": 2.418680253821387,
+ "grad_norm": 0.5197362303733826,
+ "learning_rate": 1.7859960625537476e-05,
+ "loss": 0.6257,
+ "step": 2418
+ },
+ {
+ "epoch": 2.419680535150511,
+ "grad_norm": 0.4285022020339966,
+ "learning_rate": 1.7800218857317742e-05,
+ "loss": 0.6256,
+ "step": 2419
+ },
+ {
+ "epoch": 2.420680816479635,
+ "grad_norm": 0.4791402816772461,
+ "learning_rate": 1.774056741179171e-05,
+ "loss": 0.4882,
+ "step": 2420
+ },
+ {
+ "epoch": 2.421681097808759,
+ "grad_norm": 0.4530814290046692,
+ "learning_rate": 1.7681006354505493e-05,
+ "loss": 0.5457,
+ "step": 2421
+ },
+ {
+ "epoch": 2.4226813791378827,
+ "grad_norm": 0.4875739812850952,
+ "learning_rate": 1.7621535750905905e-05,
+ "loss": 0.7153,
+ "step": 2422
+ },
+ {
+ "epoch": 2.423681660467006,
+ "grad_norm": 0.4448545575141907,
+ "learning_rate": 1.756215566634043e-05,
+ "loss": 0.5714,
+ "step": 2423
+ },
+ {
+ "epoch": 2.42468194179613,
+ "grad_norm": 0.4434129595756531,
+ "learning_rate": 1.7502866166056986e-05,
+ "loss": 0.6356,
+ "step": 2424
+ },
+ {
+ "epoch": 2.425682223125254,
+ "grad_norm": 0.48909735679626465,
+ "learning_rate": 1.744366731520408e-05,
+ "loss": 0.7804,
+ "step": 2425
+ },
+ {
+ "epoch": 2.426682504454378,
+ "grad_norm": 0.4361596703529358,
+ "learning_rate": 1.7384559178830472e-05,
+ "loss": 0.6193,
+ "step": 2426
+ },
+ {
+ "epoch": 2.4276827857835017,
+ "grad_norm": 0.48339372873306274,
+ "learning_rate": 1.7325541821885384e-05,
+ "loss": 0.6213,
+ "step": 2427
+ },
+ {
+ "epoch": 2.4286830671126256,
+ "grad_norm": 0.5264155864715576,
+ "learning_rate": 1.726661530921815e-05,
+ "loss": 0.7486,
+ "step": 2428
+ },
+ {
+ "epoch": 2.429683348441749,
+ "grad_norm": 0.5361571311950684,
+ "learning_rate": 1.7207779705578375e-05,
+ "loss": 0.7603,
+ "step": 2429
+ },
+ {
+ "epoch": 2.430683629770873,
+ "grad_norm": 0.42906150221824646,
+ "learning_rate": 1.7149035075615794e-05,
+ "loss": 0.5387,
+ "step": 2430
+ },
+ {
+ "epoch": 2.431683911099997,
+ "grad_norm": 0.4638700783252716,
+ "learning_rate": 1.709038148388007e-05,
+ "loss": 0.5149,
+ "step": 2431
+ },
+ {
+ "epoch": 2.4326841924291207,
+ "grad_norm": 0.5149651765823364,
+ "learning_rate": 1.7031818994820926e-05,
+ "loss": 0.7173,
+ "step": 2432
+ },
+ {
+ "epoch": 2.4336844737582446,
+ "grad_norm": 0.48855680227279663,
+ "learning_rate": 1.697334767278792e-05,
+ "loss": 0.649,
+ "step": 2433
+ },
+ {
+ "epoch": 2.4346847550873685,
+ "grad_norm": 0.4574027955532074,
+ "learning_rate": 1.6914967582030493e-05,
+ "loss": 0.5281,
+ "step": 2434
+ },
+ {
+ "epoch": 2.4356850364164924,
+ "grad_norm": 0.49395766854286194,
+ "learning_rate": 1.6856678786697778e-05,
+ "loss": 0.5188,
+ "step": 2435
+ },
+ {
+ "epoch": 2.436685317745616,
+ "grad_norm": 0.4664051830768585,
+ "learning_rate": 1.6798481350838648e-05,
+ "loss": 0.6979,
+ "step": 2436
+ },
+ {
+ "epoch": 2.4376855990747397,
+ "grad_norm": 0.4599386751651764,
+ "learning_rate": 1.6740375338401526e-05,
+ "loss": 0.5938,
+ "step": 2437
+ },
+ {
+ "epoch": 2.4386858804038636,
+ "grad_norm": 0.45751938223838806,
+ "learning_rate": 1.6682360813234444e-05,
+ "loss": 0.5343,
+ "step": 2438
+ },
+ {
+ "epoch": 2.4396861617329875,
+ "grad_norm": 0.48478764295578003,
+ "learning_rate": 1.6624437839084862e-05,
+ "loss": 0.6195,
+ "step": 2439
+ },
+ {
+ "epoch": 2.4406864430621114,
+ "grad_norm": 0.46098843216896057,
+ "learning_rate": 1.656660647959962e-05,
+ "loss": 0.5366,
+ "step": 2440
+ },
+ {
+ "epoch": 2.441686724391235,
+ "grad_norm": 0.5450953245162964,
+ "learning_rate": 1.6508866798324986e-05,
+ "loss": 0.5466,
+ "step": 2441
+ },
+ {
+ "epoch": 2.4426870057203587,
+ "grad_norm": 0.5364235639572144,
+ "learning_rate": 1.6451218858706374e-05,
+ "loss": 0.7062,
+ "step": 2442
+ },
+ {
+ "epoch": 2.4436872870494826,
+ "grad_norm": 0.5759331583976746,
+ "learning_rate": 1.6393662724088478e-05,
+ "loss": 0.6879,
+ "step": 2443
+ },
+ {
+ "epoch": 2.4446875683786065,
+ "grad_norm": 0.5829169154167175,
+ "learning_rate": 1.633619845771501e-05,
+ "loss": 0.5463,
+ "step": 2444
+ },
+ {
+ "epoch": 2.4456878497077303,
+ "grad_norm": 0.4662203788757324,
+ "learning_rate": 1.627882612272893e-05,
+ "loss": 0.5994,
+ "step": 2445
+ },
+ {
+ "epoch": 2.4466881310368542,
+ "grad_norm": 0.5085203647613525,
+ "learning_rate": 1.622154578217199e-05,
+ "loss": 0.654,
+ "step": 2446
+ },
+ {
+ "epoch": 2.447688412365978,
+ "grad_norm": 0.4459596872329712,
+ "learning_rate": 1.6164357498984893e-05,
+ "loss": 0.4425,
+ "step": 2447
+ },
+ {
+ "epoch": 2.4486886936951016,
+ "grad_norm": 0.5450780987739563,
+ "learning_rate": 1.6107261336007285e-05,
+ "loss": 0.6722,
+ "step": 2448
+ },
+ {
+ "epoch": 2.4496889750242254,
+ "grad_norm": 0.4960186779499054,
+ "learning_rate": 1.605025735597746e-05,
+ "loss": 0.5617,
+ "step": 2449
+ },
+ {
+ "epoch": 2.4506892563533493,
+ "grad_norm": 0.49519863724708557,
+ "learning_rate": 1.599334562153254e-05,
+ "loss": 0.5659,
+ "step": 2450
+ },
+ {
+ "epoch": 2.451689537682473,
+ "grad_norm": 0.42490801215171814,
+ "learning_rate": 1.593652619520819e-05,
+ "loss": 0.5917,
+ "step": 2451
+ },
+ {
+ "epoch": 2.452689819011597,
+ "grad_norm": 0.42814430594444275,
+ "learning_rate": 1.587979913943871e-05,
+ "loss": 0.5453,
+ "step": 2452
+ },
+ {
+ "epoch": 2.453690100340721,
+ "grad_norm": 0.40873077511787415,
+ "learning_rate": 1.5823164516556842e-05,
+ "loss": 0.519,
+ "step": 2453
+ },
+ {
+ "epoch": 2.4546903816698444,
+ "grad_norm": 0.46831750869750977,
+ "learning_rate": 1.5766622388793838e-05,
+ "loss": 0.6087,
+ "step": 2454
+ },
+ {
+ "epoch": 2.4556906629989683,
+ "grad_norm": 0.47180086374282837,
+ "learning_rate": 1.5710172818279222e-05,
+ "loss": 0.5621,
+ "step": 2455
+ },
+ {
+ "epoch": 2.456690944328092,
+ "grad_norm": 0.4417397081851959,
+ "learning_rate": 1.5653815867040923e-05,
+ "loss": 0.588,
+ "step": 2456
+ },
+ {
+ "epoch": 2.457691225657216,
+ "grad_norm": 0.47052255272865295,
+ "learning_rate": 1.5597551597004966e-05,
+ "loss": 0.5726,
+ "step": 2457
+ },
+ {
+ "epoch": 2.45869150698634,
+ "grad_norm": 0.5345332622528076,
+ "learning_rate": 1.554138006999568e-05,
+ "loss": 0.7633,
+ "step": 2458
+ },
+ {
+ "epoch": 2.459691788315464,
+ "grad_norm": 0.4659261703491211,
+ "learning_rate": 1.5485301347735348e-05,
+ "loss": 0.6815,
+ "step": 2459
+ },
+ {
+ "epoch": 2.4606920696445878,
+ "grad_norm": 0.4760098159313202,
+ "learning_rate": 1.5429315491844388e-05,
+ "loss": 0.502,
+ "step": 2460
+ },
+ {
+ "epoch": 2.461692350973711,
+ "grad_norm": 0.4500744044780731,
+ "learning_rate": 1.5373422563841133e-05,
+ "loss": 0.5937,
+ "step": 2461
+ },
+ {
+ "epoch": 2.462692632302835,
+ "grad_norm": 0.4707466661930084,
+ "learning_rate": 1.531762262514177e-05,
+ "loss": 0.5163,
+ "step": 2462
+ },
+ {
+ "epoch": 2.463692913631959,
+ "grad_norm": 0.4404618740081787,
+ "learning_rate": 1.5261915737060384e-05,
+ "loss": 0.6068,
+ "step": 2463
+ },
+ {
+ "epoch": 2.464693194961083,
+ "grad_norm": 0.44543537497520447,
+ "learning_rate": 1.5206301960808722e-05,
+ "loss": 0.5419,
+ "step": 2464
+ },
+ {
+ "epoch": 2.4656934762902067,
+ "grad_norm": 0.4371756911277771,
+ "learning_rate": 1.5150781357496314e-05,
+ "loss": 0.5073,
+ "step": 2465
+ },
+ {
+ "epoch": 2.4666937576193306,
+ "grad_norm": 0.4547995328903198,
+ "learning_rate": 1.5095353988130235e-05,
+ "loss": 0.5694,
+ "step": 2466
+ },
+ {
+ "epoch": 2.467694038948454,
+ "grad_norm": 0.45245441794395447,
+ "learning_rate": 1.5040019913615123e-05,
+ "loss": 0.5318,
+ "step": 2467
+ },
+ {
+ "epoch": 2.468694320277578,
+ "grad_norm": 0.5516065359115601,
+ "learning_rate": 1.4984779194753151e-05,
+ "loss": 0.7389,
+ "step": 2468
+ },
+ {
+ "epoch": 2.469694601606702,
+ "grad_norm": 0.5618095993995667,
+ "learning_rate": 1.4929631892243856e-05,
+ "loss": 0.8326,
+ "step": 2469
+ },
+ {
+ "epoch": 2.4706948829358257,
+ "grad_norm": 0.5322582125663757,
+ "learning_rate": 1.4874578066684186e-05,
+ "loss": 0.6577,
+ "step": 2470
+ },
+ {
+ "epoch": 2.4716951642649496,
+ "grad_norm": 0.4064349830150604,
+ "learning_rate": 1.4819617778568285e-05,
+ "loss": 0.545,
+ "step": 2471
+ },
+ {
+ "epoch": 2.4726954455940735,
+ "grad_norm": 0.4223059415817261,
+ "learning_rate": 1.476475108828762e-05,
+ "loss": 0.6278,
+ "step": 2472
+ },
+ {
+ "epoch": 2.4736957269231974,
+ "grad_norm": 0.4212653934955597,
+ "learning_rate": 1.4709978056130713e-05,
+ "loss": 0.6044,
+ "step": 2473
+ },
+ {
+ "epoch": 2.474696008252321,
+ "grad_norm": 0.479046106338501,
+ "learning_rate": 1.4655298742283252e-05,
+ "loss": 0.5589,
+ "step": 2474
+ },
+ {
+ "epoch": 2.4756962895814447,
+ "grad_norm": 0.41649335622787476,
+ "learning_rate": 1.4600713206827932e-05,
+ "loss": 0.5048,
+ "step": 2475
+ },
+ {
+ "epoch": 2.4766965709105686,
+ "grad_norm": 0.41688817739486694,
+ "learning_rate": 1.454622150974434e-05,
+ "loss": 0.5926,
+ "step": 2476
+ },
+ {
+ "epoch": 2.4776968522396925,
+ "grad_norm": 0.5202938914299011,
+ "learning_rate": 1.4491823710909047e-05,
+ "loss": 0.666,
+ "step": 2477
+ },
+ {
+ "epoch": 2.4786971335688164,
+ "grad_norm": 0.5899435877799988,
+ "learning_rate": 1.4437519870095329e-05,
+ "loss": 0.6623,
+ "step": 2478
+ },
+ {
+ "epoch": 2.47969741489794,
+ "grad_norm": 0.43830054998397827,
+ "learning_rate": 1.4383310046973365e-05,
+ "loss": 0.4251,
+ "step": 2479
+ },
+ {
+ "epoch": 2.4806976962270637,
+ "grad_norm": 0.509669840335846,
+ "learning_rate": 1.4329194301109872e-05,
+ "loss": 0.6183,
+ "step": 2480
+ },
+ {
+ "epoch": 2.4816979775561876,
+ "grad_norm": 0.5677187442779541,
+ "learning_rate": 1.427517269196833e-05,
+ "loss": 0.5949,
+ "step": 2481
+ },
+ {
+ "epoch": 2.4826982588853115,
+ "grad_norm": 0.4813043773174286,
+ "learning_rate": 1.4221245278908668e-05,
+ "loss": 0.6929,
+ "step": 2482
+ },
+ {
+ "epoch": 2.4836985402144354,
+ "grad_norm": 0.4711589217185974,
+ "learning_rate": 1.4167412121187406e-05,
+ "loss": 0.6191,
+ "step": 2483
+ },
+ {
+ "epoch": 2.4846988215435593,
+ "grad_norm": 0.4879576861858368,
+ "learning_rate": 1.4113673277957395e-05,
+ "loss": 0.5817,
+ "step": 2484
+ },
+ {
+ "epoch": 2.485699102872683,
+ "grad_norm": 0.5340747833251953,
+ "learning_rate": 1.4060028808267967e-05,
+ "loss": 0.6396,
+ "step": 2485
+ },
+ {
+ "epoch": 2.4866993842018066,
+ "grad_norm": 0.655983567237854,
+ "learning_rate": 1.4006478771064646e-05,
+ "loss": 0.6395,
+ "step": 2486
+ },
+ {
+ "epoch": 2.4876996655309305,
+ "grad_norm": 0.44405293464660645,
+ "learning_rate": 1.3953023225189243e-05,
+ "loss": 0.5643,
+ "step": 2487
+ },
+ {
+ "epoch": 2.4886999468600544,
+ "grad_norm": 0.4928829073905945,
+ "learning_rate": 1.389966222937974e-05,
+ "loss": 0.5402,
+ "step": 2488
+ },
+ {
+ "epoch": 2.4897002281891782,
+ "grad_norm": 0.3877166509628296,
+ "learning_rate": 1.3846395842270232e-05,
+ "loss": 0.5906,
+ "step": 2489
+ },
+ {
+ "epoch": 2.490700509518302,
+ "grad_norm": 0.5130916237831116,
+ "learning_rate": 1.3793224122390858e-05,
+ "loss": 0.711,
+ "step": 2490
+ },
+ {
+ "epoch": 2.491700790847426,
+ "grad_norm": 0.5104362368583679,
+ "learning_rate": 1.374014712816768e-05,
+ "loss": 0.634,
+ "step": 2491
+ },
+ {
+ "epoch": 2.4927010721765495,
+ "grad_norm": 0.5256757736206055,
+ "learning_rate": 1.3687164917922768e-05,
+ "loss": 0.7387,
+ "step": 2492
+ },
+ {
+ "epoch": 2.4937013535056733,
+ "grad_norm": 0.5370634198188782,
+ "learning_rate": 1.3634277549873953e-05,
+ "loss": 0.6866,
+ "step": 2493
+ },
+ {
+ "epoch": 2.4947016348347972,
+ "grad_norm": 0.45995843410491943,
+ "learning_rate": 1.3581485082134882e-05,
+ "loss": 0.4967,
+ "step": 2494
+ },
+ {
+ "epoch": 2.495701916163921,
+ "grad_norm": 0.5517768263816833,
+ "learning_rate": 1.3528787572714952e-05,
+ "loss": 0.6128,
+ "step": 2495
+ },
+ {
+ "epoch": 2.496702197493045,
+ "grad_norm": 0.45371124148368835,
+ "learning_rate": 1.3476185079519177e-05,
+ "loss": 0.6992,
+ "step": 2496
+ },
+ {
+ "epoch": 2.497702478822169,
+ "grad_norm": 0.5358415842056274,
+ "learning_rate": 1.342367766034821e-05,
+ "loss": 0.6828,
+ "step": 2497
+ },
+ {
+ "epoch": 2.498702760151293,
+ "grad_norm": 0.5540277361869812,
+ "learning_rate": 1.3371265372898167e-05,
+ "loss": 0.7249,
+ "step": 2498
+ },
+ {
+ "epoch": 2.499703041480416,
+ "grad_norm": 0.4393683671951294,
+ "learning_rate": 1.3318948274760734e-05,
+ "loss": 0.5943,
+ "step": 2499
+ },
+ {
+ "epoch": 2.50070332280954,
+ "grad_norm": 0.4958156645298004,
+ "learning_rate": 1.326672642342287e-05,
+ "loss": 0.6252,
+ "step": 2500
+ }
+ ],
+ "logging_steps": 1,
+ "max_steps": 2997,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 3,
+ "save_steps": 500,
+ "stateful_callbacks": {
+ "TrainerControl": {
+ "args": {
+ "should_epoch_stop": false,
+ "should_evaluate": false,
+ "should_log": false,
+ "should_save": true,
+ "should_training_stop": false
+ },
+ "attributes": {}
+ }
+ },
+ "total_flos": 1.618165860313989e+17,
+ "train_batch_size": 2,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/checkpoint-2500/training_args.bin b/checkpoint-2500/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..2f5db54c03213459099040d14f85829b6aeb0666
--- /dev/null
+++ b/checkpoint-2500/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7cb43d78443117126e44061cb7a0c1f9a5c40f27f7bf1d5cd0232587a4334407
+size 5304
diff --git a/checkpoint-2997/config.json b/checkpoint-2997/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..06ec1df58f28234ccce22a5325e108ece94f0078
--- /dev/null
+++ b/checkpoint-2997/config.json
@@ -0,0 +1,34 @@
+{
+ "_name_or_path": "facebook/nllb-200-3.3B",
+ "activation_dropout": 0.0,
+ "activation_function": "relu",
+ "architectures": [
+ "M2M100ForConditionalGeneration"
+ ],
+ "attention_dropout": 0.1,
+ "bos_token_id": 0,
+ "d_model": 2048,
+ "decoder_attention_heads": 16,
+ "decoder_ffn_dim": 8192,
+ "decoder_layerdrop": 0,
+ "decoder_layers": 24,
+ "decoder_start_token_id": 2,
+ "dropout": 0.1,
+ "encoder_attention_heads": 16,
+ "encoder_ffn_dim": 8192,
+ "encoder_layerdrop": 0,
+ "encoder_layers": 24,
+ "eos_token_id": 2,
+ "init_std": 0.02,
+ "is_encoder_decoder": true,
+ "max_length": 200,
+ "max_position_embeddings": 1024,
+ "model_type": "m2m_100",
+ "num_hidden_layers": 24,
+ "pad_token_id": 1,
+ "scale_embedding": true,
+ "torch_dtype": "float32",
+ "transformers_version": "4.43.2",
+ "use_cache": true,
+ "vocab_size": 256206
+}
diff --git a/checkpoint-2997/generation_config.json b/checkpoint-2997/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..402a1a43d1af8c080466b8139184b4e5b7f3f47c
--- /dev/null
+++ b/checkpoint-2997/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 0,
+ "decoder_start_token_id": 2,
+ "eos_token_id": 2,
+ "max_length": 200,
+ "pad_token_id": 1,
+ "transformers_version": "4.43.2"
+}
diff --git a/checkpoint-2997/model-00001-of-00003.safetensors b/checkpoint-2997/model-00001-of-00003.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..4a67ce8ab6e998c86542b60f2f7df17a9b43e21d
--- /dev/null
+++ b/checkpoint-2997/model-00001-of-00003.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c369f48abbcd62a29748ea19ce35e34ebc1ad9122ed611d55ad8034aa3915a72
+size 4986088344
diff --git a/checkpoint-2997/model-00002-of-00003.safetensors b/checkpoint-2997/model-00002-of-00003.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..8e5af793ff049e38c845933302050973b630c869
--- /dev/null
+++ b/checkpoint-2997/model-00002-of-00003.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:51551c8055bc83d503c48a85e3b723a1498d3d9c07fb0699ce0cb2de8eaf7480
+size 4985688360
diff --git a/checkpoint-2997/model-00003-of-00003.safetensors b/checkpoint-2997/model-00003-of-00003.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..62bb5e411f7955071f32c12ed19ad8a8808b3fee
--- /dev/null
+++ b/checkpoint-2997/model-00003-of-00003.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1ae91973f87c9a5cb7ffd3e1a35b0db4ef04262aba76c923e1a2550582678ee1
+size 3407796744
diff --git a/checkpoint-2997/model.safetensors.index.json b/checkpoint-2997/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..66f5db79b23230955de24502c00adc6525edbdfc
--- /dev/null
+++ b/checkpoint-2997/model.safetensors.index.json
@@ -0,0 +1,1020 @@
+{
+ "metadata": {
+ "total_size": 13379452928
+ },
+ "weight_map": {
+ "model.decoder.layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.0.encoder_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.encoder_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.encoder_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.encoder_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.encoder_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.encoder_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.encoder_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.encoder_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.encoder_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.encoder_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.encoder_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.encoder_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.encoder_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.encoder_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.encoder_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.encoder_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.encoder_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.encoder_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.encoder_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.encoder_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.encoder_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.encoder_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.encoder_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.encoder_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.encoder_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.encoder_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.encoder_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.encoder_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.encoder_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.encoder_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.11.encoder_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.11.encoder_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.11.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.11.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.11.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.11.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.11.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.11.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.11.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.11.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.11.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.11.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.12.encoder_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.encoder_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.self_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.self_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.self_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.self_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.encoder_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.encoder_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.self_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.self_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.self_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.self_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.encoder_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.encoder_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.self_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.self_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.self_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.self_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.encoder_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.encoder_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.self_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.self_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.self_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.self_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.encoder_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.encoder_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.self_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.self_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.self_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.self_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.encoder_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.encoder_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.self_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.self_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.self_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.self_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.encoder_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.encoder_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.self_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.self_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.self_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.self_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.encoder_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.encoder_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.self_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.self_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.self_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.self_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.2.encoder_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.encoder_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.encoder_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.encoder_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.encoder_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.encoder_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.encoder_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.encoder_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.encoder_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.encoder_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.20.encoder_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.encoder_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.self_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.self_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.self_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.self_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.encoder_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.encoder_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.self_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.self_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.self_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.self_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.encoder_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.encoder_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.self_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.self_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.self_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.self_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.encoder_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.encoder_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.self_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.self_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.self_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.self_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.3.encoder_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.encoder_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.encoder_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.encoder_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.encoder_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.encoder_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.encoder_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.encoder_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.encoder_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.encoder_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.encoder_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.encoder_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.encoder_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.encoder_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.encoder_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.encoder_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.encoder_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.encoder_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.encoder_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.encoder_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.encoder_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.encoder_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.encoder_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.encoder_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.encoder_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.encoder_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.encoder_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.encoder_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.encoder_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.encoder_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.encoder_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.encoder_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.encoder_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.encoder_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.encoder_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.encoder_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.encoder_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.encoder_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.encoder_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.encoder_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.encoder_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.encoder_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.encoder_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.encoder_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.encoder_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.encoder_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.encoder_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.encoder_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.encoder_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.encoder_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.encoder_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.encoder_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.encoder_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.encoder_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.encoder_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.encoder_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.encoder_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.encoder_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.encoder_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.encoder_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.encoder_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.encoder_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.encoder_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.encoder_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.encoder_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.encoder_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.encoder_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.encoder_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.encoder_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.encoder_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.0.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.14.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.14.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.14.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.14.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.14.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.14.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.14.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.14.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.14.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.14.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.14.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.14.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.14.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.14.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.14.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.14.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.15.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.2.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.20.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.3.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.shared.weight": "model-00001-of-00003.safetensors"
+ }
+}
diff --git a/checkpoint-2997/optimizer.pt b/checkpoint-2997/optimizer.pt
new file mode 100644
index 0000000000000000000000000000000000000000..2ad7c471d249d6f114279e712017e2ba39427959
--- /dev/null
+++ b/checkpoint-2997/optimizer.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:47dab79becb582851cf1dd57a8ea06b445abb587a4bef61ad00261274d0c2c55
+size 16695613
diff --git a/checkpoint-2997/rng_state.pth b/checkpoint-2997/rng_state.pth
new file mode 100644
index 0000000000000000000000000000000000000000..f6d6feec7ef7e2e9d9768f06f898604f36e9e24a
--- /dev/null
+++ b/checkpoint-2997/rng_state.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1a64f34286243934087e7b7f69307e1c4ef2178683952385fa52b8a871013d0b
+size 14244
diff --git a/checkpoint-2997/scheduler.pt b/checkpoint-2997/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..c8d7a14e843b88d04e82adecafb2059c0889c841
--- /dev/null
+++ b/checkpoint-2997/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:283f56cb64358dcfe445dadd782a570e2d61256c4354ad9314a950651aa88c8d
+size 1064
diff --git a/checkpoint-2997/sentencepiece.bpe.model b/checkpoint-2997/sentencepiece.bpe.model
new file mode 100644
index 0000000000000000000000000000000000000000..dc2262d3e1d375b235eb71c24119c8e73f85d4ad
--- /dev/null
+++ b/checkpoint-2997/sentencepiece.bpe.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:14bb8dfb35c0ffdea7bc01e56cea38b9e3d5efcdcb9c251d6b40538e1aab555a
+size 4852054
diff --git a/checkpoint-2997/special_tokens_map.json b/checkpoint-2997/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..770c6f4e25faf27bbc3878b806f2ecfb88c5169e
--- /dev/null
+++ b/checkpoint-2997/special_tokens_map.json
@@ -0,0 +1,255 @@
+{
+ "additional_special_tokens": [
+ "ace_Arab",
+ "ace_Latn",
+ "acm_Arab",
+ "acq_Arab",
+ "aeb_Arab",
+ "afr_Latn",
+ "ajp_Arab",
+ "aka_Latn",
+ "amh_Ethi",
+ "apc_Arab",
+ "arb_Arab",
+ "ars_Arab",
+ "ary_Arab",
+ "arz_Arab",
+ "asm_Beng",
+ "ast_Latn",
+ "awa_Deva",
+ "ayr_Latn",
+ "azb_Arab",
+ "azj_Latn",
+ "bak_Cyrl",
+ "bam_Latn",
+ "ban_Latn",
+ "bel_Cyrl",
+ "bem_Latn",
+ "ben_Beng",
+ "bho_Deva",
+ "bjn_Arab",
+ "bjn_Latn",
+ "bod_Tibt",
+ "bos_Latn",
+ "bug_Latn",
+ "bul_Cyrl",
+ "cat_Latn",
+ "ceb_Latn",
+ "ces_Latn",
+ "cjk_Latn",
+ "ckb_Arab",
+ "crh_Latn",
+ "cym_Latn",
+ "dan_Latn",
+ "deu_Latn",
+ "dik_Latn",
+ "dyu_Latn",
+ "dzo_Tibt",
+ "ell_Grek",
+ "eng_Latn",
+ "epo_Latn",
+ "est_Latn",
+ "eus_Latn",
+ "ewe_Latn",
+ "fao_Latn",
+ "pes_Arab",
+ "fij_Latn",
+ "fin_Latn",
+ "fon_Latn",
+ "fra_Latn",
+ "fur_Latn",
+ "fuv_Latn",
+ "gla_Latn",
+ "gle_Latn",
+ "glg_Latn",
+ "grn_Latn",
+ "guj_Gujr",
+ "hat_Latn",
+ "hau_Latn",
+ "heb_Hebr",
+ "hin_Deva",
+ "hne_Deva",
+ "hrv_Latn",
+ "hun_Latn",
+ "hye_Armn",
+ "ibo_Latn",
+ "ilo_Latn",
+ "ind_Latn",
+ "isl_Latn",
+ "ita_Latn",
+ "jav_Latn",
+ "jpn_Jpan",
+ "kab_Latn",
+ "kac_Latn",
+ "kam_Latn",
+ "kan_Knda",
+ "kas_Arab",
+ "kas_Deva",
+ "kat_Geor",
+ "knc_Arab",
+ "knc_Latn",
+ "kaz_Cyrl",
+ "kbp_Latn",
+ "kea_Latn",
+ "khm_Khmr",
+ "kik_Latn",
+ "kin_Latn",
+ "kir_Cyrl",
+ "kmb_Latn",
+ "kon_Latn",
+ "kor_Hang",
+ "kmr_Latn",
+ "lao_Laoo",
+ "lvs_Latn",
+ "lij_Latn",
+ "lim_Latn",
+ "lin_Latn",
+ "lit_Latn",
+ "lmo_Latn",
+ "ltg_Latn",
+ "ltz_Latn",
+ "lua_Latn",
+ "lug_Latn",
+ "luo_Latn",
+ "lus_Latn",
+ "mag_Deva",
+ "mai_Deva",
+ "mal_Mlym",
+ "mar_Deva",
+ "min_Latn",
+ "mkd_Cyrl",
+ "plt_Latn",
+ "mlt_Latn",
+ "mni_Beng",
+ "khk_Cyrl",
+ "mos_Latn",
+ "mri_Latn",
+ "zsm_Latn",
+ "mya_Mymr",
+ "nld_Latn",
+ "nno_Latn",
+ "nob_Latn",
+ "npi_Deva",
+ "nso_Latn",
+ "nus_Latn",
+ "nya_Latn",
+ "oci_Latn",
+ "gaz_Latn",
+ "ory_Orya",
+ "pag_Latn",
+ "pan_Guru",
+ "pap_Latn",
+ "pol_Latn",
+ "por_Latn",
+ "prs_Arab",
+ "pbt_Arab",
+ "quy_Latn",
+ "ron_Latn",
+ "run_Latn",
+ "rus_Cyrl",
+ "sag_Latn",
+ "san_Deva",
+ "sat_Beng",
+ "scn_Latn",
+ "shn_Mymr",
+ "sin_Sinh",
+ "slk_Latn",
+ "slv_Latn",
+ "smo_Latn",
+ "sna_Latn",
+ "snd_Arab",
+ "som_Latn",
+ "sot_Latn",
+ "spa_Latn",
+ "als_Latn",
+ "srd_Latn",
+ "srp_Cyrl",
+ "ssw_Latn",
+ "sun_Latn",
+ "swe_Latn",
+ "swh_Latn",
+ "szl_Latn",
+ "tam_Taml",
+ "tat_Cyrl",
+ "tel_Telu",
+ "tgk_Cyrl",
+ "tgl_Latn",
+ "tha_Thai",
+ "tir_Ethi",
+ "taq_Latn",
+ "taq_Tfng",
+ "tpi_Latn",
+ "tsn_Latn",
+ "tso_Latn",
+ "tuk_Latn",
+ "tum_Latn",
+ "tur_Latn",
+ "twi_Latn",
+ "tzm_Tfng",
+ "uig_Arab",
+ "ukr_Cyrl",
+ "umb_Latn",
+ "urd_Arab",
+ "uzn_Latn",
+ "vec_Latn",
+ "vie_Latn",
+ "war_Latn",
+ "wol_Latn",
+ "xho_Latn",
+ "ydd_Hebr",
+ "yor_Latn",
+ "yue_Hant",
+ "zho_Hans",
+ "zho_Hant",
+ "zul_Latn"
+ ],
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "cls_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "mask_token": {
+ "content": "",
+ "lstrip": true,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "sep_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/checkpoint-2997/tokenizer.json b/checkpoint-2997/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..98050e98b98364c06d83b3f41864076220cb8408
--- /dev/null
+++ b/checkpoint-2997/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3b39b25b0763a1dd69dec54081fafcf10770d9f2538a3bd975a0c4be6d60a9c2
+size 17331294
diff --git a/checkpoint-2997/tokenizer_config.json b/checkpoint-2997/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f1424d3657c008568198b44be241646482e7e9f2
--- /dev/null
+++ b/checkpoint-2997/tokenizer_config.json
@@ -0,0 +1,1878 @@
+{
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "3": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256001": {
+ "content": "ace_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256002": {
+ "content": "ace_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256003": {
+ "content": "acm_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256004": {
+ "content": "acq_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256005": {
+ "content": "aeb_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256006": {
+ "content": "afr_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256007": {
+ "content": "ajp_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256008": {
+ "content": "aka_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256009": {
+ "content": "amh_Ethi",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256010": {
+ "content": "apc_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256011": {
+ "content": "arb_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256012": {
+ "content": "ars_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256013": {
+ "content": "ary_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256014": {
+ "content": "arz_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256015": {
+ "content": "asm_Beng",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256016": {
+ "content": "ast_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256017": {
+ "content": "awa_Deva",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256018": {
+ "content": "ayr_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256019": {
+ "content": "azb_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256020": {
+ "content": "azj_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256021": {
+ "content": "bak_Cyrl",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256022": {
+ "content": "bam_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256023": {
+ "content": "ban_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256024": {
+ "content": "bel_Cyrl",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256025": {
+ "content": "bem_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256026": {
+ "content": "ben_Beng",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256027": {
+ "content": "bho_Deva",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256028": {
+ "content": "bjn_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256029": {
+ "content": "bjn_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256030": {
+ "content": "bod_Tibt",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256031": {
+ "content": "bos_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256032": {
+ "content": "bug_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256033": {
+ "content": "bul_Cyrl",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256034": {
+ "content": "cat_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256035": {
+ "content": "ceb_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256036": {
+ "content": "ces_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256037": {
+ "content": "cjk_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256038": {
+ "content": "ckb_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256039": {
+ "content": "crh_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256040": {
+ "content": "cym_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256041": {
+ "content": "dan_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256042": {
+ "content": "deu_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256043": {
+ "content": "dik_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256044": {
+ "content": "dyu_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256045": {
+ "content": "dzo_Tibt",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256046": {
+ "content": "ell_Grek",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256047": {
+ "content": "eng_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256048": {
+ "content": "epo_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256049": {
+ "content": "est_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256050": {
+ "content": "eus_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256051": {
+ "content": "ewe_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256052": {
+ "content": "fao_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256053": {
+ "content": "pes_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256054": {
+ "content": "fij_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256055": {
+ "content": "fin_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256056": {
+ "content": "fon_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256057": {
+ "content": "fra_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256058": {
+ "content": "fur_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256059": {
+ "content": "fuv_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256060": {
+ "content": "gla_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256061": {
+ "content": "gle_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256062": {
+ "content": "glg_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256063": {
+ "content": "grn_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256064": {
+ "content": "guj_Gujr",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256065": {
+ "content": "hat_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256066": {
+ "content": "hau_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256067": {
+ "content": "heb_Hebr",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256068": {
+ "content": "hin_Deva",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256069": {
+ "content": "hne_Deva",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256070": {
+ "content": "hrv_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256071": {
+ "content": "hun_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256072": {
+ "content": "hye_Armn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256073": {
+ "content": "ibo_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256074": {
+ "content": "ilo_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256075": {
+ "content": "ind_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256076": {
+ "content": "isl_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256077": {
+ "content": "ita_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256078": {
+ "content": "jav_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256079": {
+ "content": "jpn_Jpan",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256080": {
+ "content": "kab_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256081": {
+ "content": "kac_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256082": {
+ "content": "kam_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256083": {
+ "content": "kan_Knda",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256084": {
+ "content": "kas_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256085": {
+ "content": "kas_Deva",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256086": {
+ "content": "kat_Geor",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256087": {
+ "content": "knc_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256088": {
+ "content": "knc_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256089": {
+ "content": "kaz_Cyrl",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256090": {
+ "content": "kbp_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256091": {
+ "content": "kea_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256092": {
+ "content": "khm_Khmr",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256093": {
+ "content": "kik_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256094": {
+ "content": "kin_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256095": {
+ "content": "kir_Cyrl",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256096": {
+ "content": "kmb_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256097": {
+ "content": "kon_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256098": {
+ "content": "kor_Hang",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256099": {
+ "content": "kmr_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256100": {
+ "content": "lao_Laoo",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256101": {
+ "content": "lvs_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256102": {
+ "content": "lij_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256103": {
+ "content": "lim_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256104": {
+ "content": "lin_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256105": {
+ "content": "lit_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256106": {
+ "content": "lmo_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256107": {
+ "content": "ltg_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256108": {
+ "content": "ltz_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256109": {
+ "content": "lua_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256110": {
+ "content": "lug_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256111": {
+ "content": "luo_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256112": {
+ "content": "lus_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256113": {
+ "content": "mag_Deva",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256114": {
+ "content": "mai_Deva",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256115": {
+ "content": "mal_Mlym",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256116": {
+ "content": "mar_Deva",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256117": {
+ "content": "min_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256118": {
+ "content": "mkd_Cyrl",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256119": {
+ "content": "plt_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256120": {
+ "content": "mlt_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256121": {
+ "content": "mni_Beng",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256122": {
+ "content": "khk_Cyrl",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256123": {
+ "content": "mos_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256124": {
+ "content": "mri_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256125": {
+ "content": "zsm_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256126": {
+ "content": "mya_Mymr",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256127": {
+ "content": "nld_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256128": {
+ "content": "nno_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256129": {
+ "content": "nob_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256130": {
+ "content": "npi_Deva",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256131": {
+ "content": "nso_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256132": {
+ "content": "nus_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256133": {
+ "content": "nya_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256134": {
+ "content": "oci_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256135": {
+ "content": "gaz_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256136": {
+ "content": "ory_Orya",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256137": {
+ "content": "pag_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256138": {
+ "content": "pan_Guru",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256139": {
+ "content": "pap_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256140": {
+ "content": "pol_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256141": {
+ "content": "por_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256142": {
+ "content": "prs_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256143": {
+ "content": "pbt_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256144": {
+ "content": "quy_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256145": {
+ "content": "ron_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256146": {
+ "content": "run_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256147": {
+ "content": "rus_Cyrl",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256148": {
+ "content": "sag_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256149": {
+ "content": "san_Deva",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256150": {
+ "content": "sat_Beng",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256151": {
+ "content": "scn_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256152": {
+ "content": "shn_Mymr",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256153": {
+ "content": "sin_Sinh",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256154": {
+ "content": "slk_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256155": {
+ "content": "slv_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256156": {
+ "content": "smo_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256157": {
+ "content": "sna_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256158": {
+ "content": "snd_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256159": {
+ "content": "som_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256160": {
+ "content": "sot_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256161": {
+ "content": "spa_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256162": {
+ "content": "als_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256163": {
+ "content": "srd_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256164": {
+ "content": "srp_Cyrl",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256165": {
+ "content": "ssw_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256166": {
+ "content": "sun_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256167": {
+ "content": "swe_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256168": {
+ "content": "swh_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256169": {
+ "content": "szl_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256170": {
+ "content": "tam_Taml",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256171": {
+ "content": "tat_Cyrl",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256172": {
+ "content": "tel_Telu",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256173": {
+ "content": "tgk_Cyrl",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256174": {
+ "content": "tgl_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256175": {
+ "content": "tha_Thai",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256176": {
+ "content": "tir_Ethi",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256177": {
+ "content": "taq_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256178": {
+ "content": "taq_Tfng",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256179": {
+ "content": "tpi_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256180": {
+ "content": "tsn_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256181": {
+ "content": "tso_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256182": {
+ "content": "tuk_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256183": {
+ "content": "tum_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256184": {
+ "content": "tur_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256185": {
+ "content": "twi_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256186": {
+ "content": "tzm_Tfng",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256187": {
+ "content": "uig_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256188": {
+ "content": "ukr_Cyrl",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256189": {
+ "content": "umb_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256190": {
+ "content": "urd_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256191": {
+ "content": "uzn_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256192": {
+ "content": "vec_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256193": {
+ "content": "vie_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256194": {
+ "content": "war_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256195": {
+ "content": "wol_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256196": {
+ "content": "xho_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256197": {
+ "content": "ydd_Hebr",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256198": {
+ "content": "yor_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256199": {
+ "content": "yue_Hant",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256200": {
+ "content": "zho_Hans",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256201": {
+ "content": "zho_Hant",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256202": {
+ "content": "zul_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256203": {
+ "content": "",
+ "lstrip": true,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "additional_special_tokens": [
+ "ace_Arab",
+ "ace_Latn",
+ "acm_Arab",
+ "acq_Arab",
+ "aeb_Arab",
+ "afr_Latn",
+ "ajp_Arab",
+ "aka_Latn",
+ "amh_Ethi",
+ "apc_Arab",
+ "arb_Arab",
+ "ars_Arab",
+ "ary_Arab",
+ "arz_Arab",
+ "asm_Beng",
+ "ast_Latn",
+ "awa_Deva",
+ "ayr_Latn",
+ "azb_Arab",
+ "azj_Latn",
+ "bak_Cyrl",
+ "bam_Latn",
+ "ban_Latn",
+ "bel_Cyrl",
+ "bem_Latn",
+ "ben_Beng",
+ "bho_Deva",
+ "bjn_Arab",
+ "bjn_Latn",
+ "bod_Tibt",
+ "bos_Latn",
+ "bug_Latn",
+ "bul_Cyrl",
+ "cat_Latn",
+ "ceb_Latn",
+ "ces_Latn",
+ "cjk_Latn",
+ "ckb_Arab",
+ "crh_Latn",
+ "cym_Latn",
+ "dan_Latn",
+ "deu_Latn",
+ "dik_Latn",
+ "dyu_Latn",
+ "dzo_Tibt",
+ "ell_Grek",
+ "eng_Latn",
+ "epo_Latn",
+ "est_Latn",
+ "eus_Latn",
+ "ewe_Latn",
+ "fao_Latn",
+ "pes_Arab",
+ "fij_Latn",
+ "fin_Latn",
+ "fon_Latn",
+ "fra_Latn",
+ "fur_Latn",
+ "fuv_Latn",
+ "gla_Latn",
+ "gle_Latn",
+ "glg_Latn",
+ "grn_Latn",
+ "guj_Gujr",
+ "hat_Latn",
+ "hau_Latn",
+ "heb_Hebr",
+ "hin_Deva",
+ "hne_Deva",
+ "hrv_Latn",
+ "hun_Latn",
+ "hye_Armn",
+ "ibo_Latn",
+ "ilo_Latn",
+ "ind_Latn",
+ "isl_Latn",
+ "ita_Latn",
+ "jav_Latn",
+ "jpn_Jpan",
+ "kab_Latn",
+ "kac_Latn",
+ "kam_Latn",
+ "kan_Knda",
+ "kas_Arab",
+ "kas_Deva",
+ "kat_Geor",
+ "knc_Arab",
+ "knc_Latn",
+ "kaz_Cyrl",
+ "kbp_Latn",
+ "kea_Latn",
+ "khm_Khmr",
+ "kik_Latn",
+ "kin_Latn",
+ "kir_Cyrl",
+ "kmb_Latn",
+ "kon_Latn",
+ "kor_Hang",
+ "kmr_Latn",
+ "lao_Laoo",
+ "lvs_Latn",
+ "lij_Latn",
+ "lim_Latn",
+ "lin_Latn",
+ "lit_Latn",
+ "lmo_Latn",
+ "ltg_Latn",
+ "ltz_Latn",
+ "lua_Latn",
+ "lug_Latn",
+ "luo_Latn",
+ "lus_Latn",
+ "mag_Deva",
+ "mai_Deva",
+ "mal_Mlym",
+ "mar_Deva",
+ "min_Latn",
+ "mkd_Cyrl",
+ "plt_Latn",
+ "mlt_Latn",
+ "mni_Beng",
+ "khk_Cyrl",
+ "mos_Latn",
+ "mri_Latn",
+ "zsm_Latn",
+ "mya_Mymr",
+ "nld_Latn",
+ "nno_Latn",
+ "nob_Latn",
+ "npi_Deva",
+ "nso_Latn",
+ "nus_Latn",
+ "nya_Latn",
+ "oci_Latn",
+ "gaz_Latn",
+ "ory_Orya",
+ "pag_Latn",
+ "pan_Guru",
+ "pap_Latn",
+ "pol_Latn",
+ "por_Latn",
+ "prs_Arab",
+ "pbt_Arab",
+ "quy_Latn",
+ "ron_Latn",
+ "run_Latn",
+ "rus_Cyrl",
+ "sag_Latn",
+ "san_Deva",
+ "sat_Beng",
+ "scn_Latn",
+ "shn_Mymr",
+ "sin_Sinh",
+ "slk_Latn",
+ "slv_Latn",
+ "smo_Latn",
+ "sna_Latn",
+ "snd_Arab",
+ "som_Latn",
+ "sot_Latn",
+ "spa_Latn",
+ "als_Latn",
+ "srd_Latn",
+ "srp_Cyrl",
+ "ssw_Latn",
+ "sun_Latn",
+ "swe_Latn",
+ "swh_Latn",
+ "szl_Latn",
+ "tam_Taml",
+ "tat_Cyrl",
+ "tel_Telu",
+ "tgk_Cyrl",
+ "tgl_Latn",
+ "tha_Thai",
+ "tir_Ethi",
+ "taq_Latn",
+ "taq_Tfng",
+ "tpi_Latn",
+ "tsn_Latn",
+ "tso_Latn",
+ "tuk_Latn",
+ "tum_Latn",
+ "tur_Latn",
+ "twi_Latn",
+ "tzm_Tfng",
+ "uig_Arab",
+ "ukr_Cyrl",
+ "umb_Latn",
+ "urd_Arab",
+ "uzn_Latn",
+ "vec_Latn",
+ "vie_Latn",
+ "war_Latn",
+ "wol_Latn",
+ "xho_Latn",
+ "ydd_Hebr",
+ "yor_Latn",
+ "yue_Hant",
+ "zho_Hans",
+ "zho_Hant",
+ "zul_Latn"
+ ],
+ "bos_token": "",
+ "clean_up_tokenization_spaces": true,
+ "cls_token": "",
+ "eos_token": "",
+ "legacy_behaviour": false,
+ "mask_token": "",
+ "model_max_length": 1024,
+ "pad_token": "",
+ "sep_token": "",
+ "sp_model_kwargs": {},
+ "src_lang": "eng_Latn",
+ "tgt_lang": null,
+ "tokenizer_class": "NllbTokenizer",
+ "unk_token": ""
+}
diff --git a/checkpoint-2997/trainer_state.json b/checkpoint-2997/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..23794e6999d8818c5b918f72eef72044fa5a6dbb
--- /dev/null
+++ b/checkpoint-2997/trainer_state.json
@@ -0,0 +1,21012 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 2.9978431433840766,
+ "eval_steps": 500,
+ "global_step": 2997,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.001000281329123816,
+ "grad_norm": 5.902005195617676,
+ "learning_rate": 0.0001999999450590425,
+ "loss": 3.1875,
+ "step": 1
+ },
+ {
+ "epoch": 0.002000562658247632,
+ "grad_norm": 3.2577760219573975,
+ "learning_rate": 0.00019999978023623033,
+ "loss": 2.3666,
+ "step": 2
+ },
+ {
+ "epoch": 0.003000843987371448,
+ "grad_norm": 5.3700995445251465,
+ "learning_rate": 0.0001999995055317446,
+ "loss": 2.8282,
+ "step": 3
+ },
+ {
+ "epoch": 0.004001125316495264,
+ "grad_norm": 2.1445534229278564,
+ "learning_rate": 0.00019999912094588717,
+ "loss": 2.2322,
+ "step": 4
+ },
+ {
+ "epoch": 0.005001406645619081,
+ "grad_norm": 1.5143821239471436,
+ "learning_rate": 0.00019999862647908064,
+ "loss": 2.1709,
+ "step": 5
+ },
+ {
+ "epoch": 0.006001687974742896,
+ "grad_norm": 2.0491714477539062,
+ "learning_rate": 0.00019999802213186834,
+ "loss": 2.2863,
+ "step": 6
+ },
+ {
+ "epoch": 0.007001969303866712,
+ "grad_norm": 1.2016857862472534,
+ "learning_rate": 0.0001999973079049143,
+ "loss": 1.5595,
+ "step": 7
+ },
+ {
+ "epoch": 0.008002250632990529,
+ "grad_norm": 1.3860406875610352,
+ "learning_rate": 0.00019999648379900338,
+ "loss": 1.7264,
+ "step": 8
+ },
+ {
+ "epoch": 0.009002531962114344,
+ "grad_norm": 1.0861930847167969,
+ "learning_rate": 0.0001999955498150411,
+ "loss": 2.0533,
+ "step": 9
+ },
+ {
+ "epoch": 0.010002813291238161,
+ "grad_norm": 2.233243703842163,
+ "learning_rate": 0.00019999450595405374,
+ "loss": 1.9378,
+ "step": 10
+ },
+ {
+ "epoch": 0.011003094620361977,
+ "grad_norm": 1.302808165550232,
+ "learning_rate": 0.0001999933522171883,
+ "loss": 1.9182,
+ "step": 11
+ },
+ {
+ "epoch": 0.012003375949485792,
+ "grad_norm": 0.8285257816314697,
+ "learning_rate": 0.00019999208860571255,
+ "loss": 1.9146,
+ "step": 12
+ },
+ {
+ "epoch": 0.01300365727860961,
+ "grad_norm": 1.2248319387435913,
+ "learning_rate": 0.00019999071512101496,
+ "loss": 1.7467,
+ "step": 13
+ },
+ {
+ "epoch": 0.014003938607733425,
+ "grad_norm": 0.8307135105133057,
+ "learning_rate": 0.00019998923176460474,
+ "loss": 1.6896,
+ "step": 14
+ },
+ {
+ "epoch": 0.01500421993685724,
+ "grad_norm": 1.1531301736831665,
+ "learning_rate": 0.00019998763853811184,
+ "loss": 1.7549,
+ "step": 15
+ },
+ {
+ "epoch": 0.016004501265981057,
+ "grad_norm": 1.0071958303451538,
+ "learning_rate": 0.00019998593544328692,
+ "loss": 1.903,
+ "step": 16
+ },
+ {
+ "epoch": 0.017004782595104875,
+ "grad_norm": 0.9111937284469604,
+ "learning_rate": 0.00019998412248200138,
+ "loss": 1.8372,
+ "step": 17
+ },
+ {
+ "epoch": 0.01800506392422869,
+ "grad_norm": 0.9943836331367493,
+ "learning_rate": 0.00019998219965624734,
+ "loss": 1.7304,
+ "step": 18
+ },
+ {
+ "epoch": 0.019005345253352506,
+ "grad_norm": 0.8139007687568665,
+ "learning_rate": 0.0001999801669681376,
+ "loss": 1.6932,
+ "step": 19
+ },
+ {
+ "epoch": 0.020005626582476323,
+ "grad_norm": 0.7991273999214172,
+ "learning_rate": 0.00019997802441990573,
+ "loss": 1.9596,
+ "step": 20
+ },
+ {
+ "epoch": 0.021005907911600136,
+ "grad_norm": 0.832266628742218,
+ "learning_rate": 0.00019997577201390606,
+ "loss": 1.7116,
+ "step": 21
+ },
+ {
+ "epoch": 0.022006189240723954,
+ "grad_norm": 0.8465655446052551,
+ "learning_rate": 0.00019997340975261353,
+ "loss": 1.7711,
+ "step": 22
+ },
+ {
+ "epoch": 0.02300647056984777,
+ "grad_norm": 1.032426118850708,
+ "learning_rate": 0.00019997093763862383,
+ "loss": 1.6746,
+ "step": 23
+ },
+ {
+ "epoch": 0.024006751898971584,
+ "grad_norm": 1.0036743879318237,
+ "learning_rate": 0.0001999683556746534,
+ "loss": 1.7274,
+ "step": 24
+ },
+ {
+ "epoch": 0.0250070332280954,
+ "grad_norm": 0.9491412043571472,
+ "learning_rate": 0.0001999656638635393,
+ "loss": 2.0302,
+ "step": 25
+ },
+ {
+ "epoch": 0.02600731455721922,
+ "grad_norm": 0.9477822184562683,
+ "learning_rate": 0.0001999628622082394,
+ "loss": 1.6107,
+ "step": 26
+ },
+ {
+ "epoch": 0.027007595886343033,
+ "grad_norm": 1.0687041282653809,
+ "learning_rate": 0.0001999599507118322,
+ "loss": 1.8225,
+ "step": 27
+ },
+ {
+ "epoch": 0.02800787721546685,
+ "grad_norm": 1.6572712659835815,
+ "learning_rate": 0.00019995692937751683,
+ "loss": 1.896,
+ "step": 28
+ },
+ {
+ "epoch": 0.029008158544590667,
+ "grad_norm": 1.013258695602417,
+ "learning_rate": 0.0001999537982086133,
+ "loss": 1.7847,
+ "step": 29
+ },
+ {
+ "epoch": 0.03000843987371448,
+ "grad_norm": 0.7584932446479797,
+ "learning_rate": 0.00019995055720856218,
+ "loss": 1.5841,
+ "step": 30
+ },
+ {
+ "epoch": 0.031008721202838298,
+ "grad_norm": 1.1543537378311157,
+ "learning_rate": 0.00019994720638092468,
+ "loss": 1.8362,
+ "step": 31
+ },
+ {
+ "epoch": 0.032009002531962115,
+ "grad_norm": 0.8389608860015869,
+ "learning_rate": 0.00019994374572938277,
+ "loss": 1.7913,
+ "step": 32
+ },
+ {
+ "epoch": 0.03300928386108593,
+ "grad_norm": 0.7582125663757324,
+ "learning_rate": 0.00019994017525773913,
+ "loss": 1.5406,
+ "step": 33
+ },
+ {
+ "epoch": 0.03400956519020975,
+ "grad_norm": 0.7866935133934021,
+ "learning_rate": 0.00019993649496991705,
+ "loss": 1.5363,
+ "step": 34
+ },
+ {
+ "epoch": 0.03500984651933356,
+ "grad_norm": 0.8007768988609314,
+ "learning_rate": 0.00019993270486996046,
+ "loss": 1.7597,
+ "step": 35
+ },
+ {
+ "epoch": 0.03601012784845738,
+ "grad_norm": 0.8109031319618225,
+ "learning_rate": 0.000199928804962034,
+ "loss": 1.5554,
+ "step": 36
+ },
+ {
+ "epoch": 0.037010409177581194,
+ "grad_norm": 0.7722628116607666,
+ "learning_rate": 0.00019992479525042303,
+ "loss": 1.6437,
+ "step": 37
+ },
+ {
+ "epoch": 0.03801069050670501,
+ "grad_norm": 0.7336480021476746,
+ "learning_rate": 0.00019992067573953342,
+ "loss": 1.7276,
+ "step": 38
+ },
+ {
+ "epoch": 0.03901097183582883,
+ "grad_norm": 0.6940280795097351,
+ "learning_rate": 0.0001999164464338918,
+ "loss": 1.846,
+ "step": 39
+ },
+ {
+ "epoch": 0.040011253164952645,
+ "grad_norm": 0.7079702615737915,
+ "learning_rate": 0.0001999121073381454,
+ "loss": 1.7017,
+ "step": 40
+ },
+ {
+ "epoch": 0.041011534494076456,
+ "grad_norm": 0.7438498139381409,
+ "learning_rate": 0.0001999076584570621,
+ "loss": 1.665,
+ "step": 41
+ },
+ {
+ "epoch": 0.04201181582320027,
+ "grad_norm": 0.6951525211334229,
+ "learning_rate": 0.00019990309979553045,
+ "loss": 1.588,
+ "step": 42
+ },
+ {
+ "epoch": 0.04301209715232409,
+ "grad_norm": 0.9398604035377502,
+ "learning_rate": 0.00019989843135855958,
+ "loss": 1.6513,
+ "step": 43
+ },
+ {
+ "epoch": 0.04401237848144791,
+ "grad_norm": 0.7384347319602966,
+ "learning_rate": 0.00019989365315127922,
+ "loss": 1.5975,
+ "step": 44
+ },
+ {
+ "epoch": 0.045012659810571724,
+ "grad_norm": 0.9856846332550049,
+ "learning_rate": 0.0001998887651789398,
+ "loss": 1.644,
+ "step": 45
+ },
+ {
+ "epoch": 0.04601294113969554,
+ "grad_norm": 0.7322820425033569,
+ "learning_rate": 0.0001998837674469123,
+ "loss": 1.5207,
+ "step": 46
+ },
+ {
+ "epoch": 0.04701322246881936,
+ "grad_norm": 0.8695257902145386,
+ "learning_rate": 0.00019987865996068833,
+ "loss": 1.5572,
+ "step": 47
+ },
+ {
+ "epoch": 0.04801350379794317,
+ "grad_norm": 0.7231017351150513,
+ "learning_rate": 0.00019987344272588006,
+ "loss": 1.5841,
+ "step": 48
+ },
+ {
+ "epoch": 0.049013785127066986,
+ "grad_norm": 0.7147384285926819,
+ "learning_rate": 0.00019986811574822033,
+ "loss": 1.8628,
+ "step": 49
+ },
+ {
+ "epoch": 0.0500140664561908,
+ "grad_norm": 0.8631477355957031,
+ "learning_rate": 0.00019986267903356254,
+ "loss": 1.8487,
+ "step": 50
+ },
+ {
+ "epoch": 0.05101434778531462,
+ "grad_norm": 0.7995486855506897,
+ "learning_rate": 0.0001998571325878806,
+ "loss": 1.6491,
+ "step": 51
+ },
+ {
+ "epoch": 0.05201462911443844,
+ "grad_norm": 0.7828657031059265,
+ "learning_rate": 0.0001998514764172691,
+ "loss": 1.7496,
+ "step": 52
+ },
+ {
+ "epoch": 0.053014910443562255,
+ "grad_norm": 0.7789833545684814,
+ "learning_rate": 0.00019984571052794313,
+ "loss": 1.6628,
+ "step": 53
+ },
+ {
+ "epoch": 0.054015191772686065,
+ "grad_norm": 0.7077661752700806,
+ "learning_rate": 0.00019983983492623833,
+ "loss": 1.771,
+ "step": 54
+ },
+ {
+ "epoch": 0.05501547310180988,
+ "grad_norm": 0.7939582467079163,
+ "learning_rate": 0.00019983384961861096,
+ "loss": 1.707,
+ "step": 55
+ },
+ {
+ "epoch": 0.0560157544309337,
+ "grad_norm": 0.9438828229904175,
+ "learning_rate": 0.0001998277546116378,
+ "loss": 1.8334,
+ "step": 56
+ },
+ {
+ "epoch": 0.05701603576005752,
+ "grad_norm": 0.8028286695480347,
+ "learning_rate": 0.00019982154991201608,
+ "loss": 1.9117,
+ "step": 57
+ },
+ {
+ "epoch": 0.058016317089181334,
+ "grad_norm": 0.6563037037849426,
+ "learning_rate": 0.00019981523552656377,
+ "loss": 1.4767,
+ "step": 58
+ },
+ {
+ "epoch": 0.05901659841830515,
+ "grad_norm": 0.6600964665412903,
+ "learning_rate": 0.00019980881146221914,
+ "loss": 1.6656,
+ "step": 59
+ },
+ {
+ "epoch": 0.06001687974742896,
+ "grad_norm": 0.7966578602790833,
+ "learning_rate": 0.00019980227772604112,
+ "loss": 1.4844,
+ "step": 60
+ },
+ {
+ "epoch": 0.06101716107655278,
+ "grad_norm": 0.8019976615905762,
+ "learning_rate": 0.0001997956343252091,
+ "loss": 1.5682,
+ "step": 61
+ },
+ {
+ "epoch": 0.062017442405676595,
+ "grad_norm": 0.8935349583625793,
+ "learning_rate": 0.00019978888126702296,
+ "loss": 1.8131,
+ "step": 62
+ },
+ {
+ "epoch": 0.06301772373480041,
+ "grad_norm": 0.8085179924964905,
+ "learning_rate": 0.00019978201855890308,
+ "loss": 1.5602,
+ "step": 63
+ },
+ {
+ "epoch": 0.06401800506392423,
+ "grad_norm": 0.7631951570510864,
+ "learning_rate": 0.00019977504620839035,
+ "loss": 1.8008,
+ "step": 64
+ },
+ {
+ "epoch": 0.06501828639304805,
+ "grad_norm": 0.7315165996551514,
+ "learning_rate": 0.00019976796422314615,
+ "loss": 1.5735,
+ "step": 65
+ },
+ {
+ "epoch": 0.06601856772217186,
+ "grad_norm": 0.745726466178894,
+ "learning_rate": 0.00019976077261095226,
+ "loss": 1.5775,
+ "step": 66
+ },
+ {
+ "epoch": 0.06701884905129568,
+ "grad_norm": 0.9082249999046326,
+ "learning_rate": 0.00019975347137971098,
+ "loss": 1.7427,
+ "step": 67
+ },
+ {
+ "epoch": 0.0680191303804195,
+ "grad_norm": 0.6575669050216675,
+ "learning_rate": 0.00019974606053744503,
+ "loss": 1.5231,
+ "step": 68
+ },
+ {
+ "epoch": 0.06901941170954332,
+ "grad_norm": 0.7749233245849609,
+ "learning_rate": 0.00019973854009229763,
+ "loss": 1.5703,
+ "step": 69
+ },
+ {
+ "epoch": 0.07001969303866712,
+ "grad_norm": 0.7240824699401855,
+ "learning_rate": 0.00019973091005253232,
+ "loss": 1.5197,
+ "step": 70
+ },
+ {
+ "epoch": 0.07101997436779094,
+ "grad_norm": 0.8683856725692749,
+ "learning_rate": 0.0001997231704265332,
+ "loss": 1.6183,
+ "step": 71
+ },
+ {
+ "epoch": 0.07202025569691475,
+ "grad_norm": 0.6885640621185303,
+ "learning_rate": 0.00019971532122280464,
+ "loss": 1.6565,
+ "step": 72
+ },
+ {
+ "epoch": 0.07302053702603857,
+ "grad_norm": 0.6648329496383667,
+ "learning_rate": 0.0001997073624499716,
+ "loss": 1.5943,
+ "step": 73
+ },
+ {
+ "epoch": 0.07402081835516239,
+ "grad_norm": 0.8867416977882385,
+ "learning_rate": 0.0001996992941167792,
+ "loss": 1.7855,
+ "step": 74
+ },
+ {
+ "epoch": 0.0750210996842862,
+ "grad_norm": 0.7790491580963135,
+ "learning_rate": 0.00019969111623209323,
+ "loss": 1.6723,
+ "step": 75
+ },
+ {
+ "epoch": 0.07602138101341002,
+ "grad_norm": 0.7999201416969299,
+ "learning_rate": 0.00019968282880489957,
+ "loss": 1.5619,
+ "step": 76
+ },
+ {
+ "epoch": 0.07702166234253384,
+ "grad_norm": 0.6316407322883606,
+ "learning_rate": 0.00019967443184430467,
+ "loss": 1.6377,
+ "step": 77
+ },
+ {
+ "epoch": 0.07802194367165766,
+ "grad_norm": 0.7680445313453674,
+ "learning_rate": 0.0001996659253595353,
+ "loss": 1.5433,
+ "step": 78
+ },
+ {
+ "epoch": 0.07902222500078147,
+ "grad_norm": 0.7158446907997131,
+ "learning_rate": 0.0001996573093599385,
+ "loss": 1.5436,
+ "step": 79
+ },
+ {
+ "epoch": 0.08002250632990529,
+ "grad_norm": 0.7354825139045715,
+ "learning_rate": 0.00019964858385498172,
+ "loss": 1.6512,
+ "step": 80
+ },
+ {
+ "epoch": 0.08102278765902911,
+ "grad_norm": 0.7031210660934448,
+ "learning_rate": 0.00019963974885425266,
+ "loss": 1.6411,
+ "step": 81
+ },
+ {
+ "epoch": 0.08202306898815291,
+ "grad_norm": 0.8451045751571655,
+ "learning_rate": 0.00019963080436745945,
+ "loss": 1.6622,
+ "step": 82
+ },
+ {
+ "epoch": 0.08302335031727673,
+ "grad_norm": 0.8329521417617798,
+ "learning_rate": 0.00019962175040443044,
+ "loss": 1.9269,
+ "step": 83
+ },
+ {
+ "epoch": 0.08402363164640055,
+ "grad_norm": 0.6967645883560181,
+ "learning_rate": 0.0001996125869751143,
+ "loss": 1.7243,
+ "step": 84
+ },
+ {
+ "epoch": 0.08502391297552436,
+ "grad_norm": 0.8699042797088623,
+ "learning_rate": 0.00019960331408957997,
+ "loss": 1.7211,
+ "step": 85
+ },
+ {
+ "epoch": 0.08602419430464818,
+ "grad_norm": 0.6780512928962708,
+ "learning_rate": 0.00019959393175801671,
+ "loss": 1.6376,
+ "step": 86
+ },
+ {
+ "epoch": 0.087024475633772,
+ "grad_norm": 0.7213720679283142,
+ "learning_rate": 0.00019958443999073397,
+ "loss": 1.6048,
+ "step": 87
+ },
+ {
+ "epoch": 0.08802475696289581,
+ "grad_norm": 0.6077585816383362,
+ "learning_rate": 0.00019957483879816151,
+ "loss": 1.5231,
+ "step": 88
+ },
+ {
+ "epoch": 0.08902503829201963,
+ "grad_norm": 0.6854611039161682,
+ "learning_rate": 0.00019956512819084928,
+ "loss": 1.4726,
+ "step": 89
+ },
+ {
+ "epoch": 0.09002531962114345,
+ "grad_norm": 0.6969390511512756,
+ "learning_rate": 0.00019955530817946748,
+ "loss": 1.6435,
+ "step": 90
+ },
+ {
+ "epoch": 0.09102560095026727,
+ "grad_norm": 0.7178792953491211,
+ "learning_rate": 0.00019954537877480655,
+ "loss": 1.6967,
+ "step": 91
+ },
+ {
+ "epoch": 0.09202588227939108,
+ "grad_norm": 0.8248458504676819,
+ "learning_rate": 0.00019953533998777706,
+ "loss": 1.5884,
+ "step": 92
+ },
+ {
+ "epoch": 0.0930261636085149,
+ "grad_norm": 0.6472075581550598,
+ "learning_rate": 0.00019952519182940993,
+ "loss": 1.5074,
+ "step": 93
+ },
+ {
+ "epoch": 0.09402644493763872,
+ "grad_norm": 0.7548672556877136,
+ "learning_rate": 0.00019951493431085603,
+ "loss": 1.6934,
+ "step": 94
+ },
+ {
+ "epoch": 0.09502672626676252,
+ "grad_norm": 0.6680666208267212,
+ "learning_rate": 0.00019950456744338658,
+ "loss": 1.4014,
+ "step": 95
+ },
+ {
+ "epoch": 0.09602700759588634,
+ "grad_norm": 0.7270862460136414,
+ "learning_rate": 0.00019949409123839288,
+ "loss": 1.6991,
+ "step": 96
+ },
+ {
+ "epoch": 0.09702728892501016,
+ "grad_norm": 0.682833731174469,
+ "learning_rate": 0.00019948350570738642,
+ "loss": 1.4926,
+ "step": 97
+ },
+ {
+ "epoch": 0.09802757025413397,
+ "grad_norm": 0.6598315238952637,
+ "learning_rate": 0.0001994728108619987,
+ "loss": 1.6329,
+ "step": 98
+ },
+ {
+ "epoch": 0.09902785158325779,
+ "grad_norm": 0.830845832824707,
+ "learning_rate": 0.0001994620067139815,
+ "loss": 1.8517,
+ "step": 99
+ },
+ {
+ "epoch": 0.1000281329123816,
+ "grad_norm": 0.6964694857597351,
+ "learning_rate": 0.00019945109327520658,
+ "loss": 1.5459,
+ "step": 100
+ },
+ {
+ "epoch": 0.10102841424150542,
+ "grad_norm": 0.8640177249908447,
+ "learning_rate": 0.00019944007055766586,
+ "loss": 1.6638,
+ "step": 101
+ },
+ {
+ "epoch": 0.10202869557062924,
+ "grad_norm": 0.6485210657119751,
+ "learning_rate": 0.00019942893857347128,
+ "loss": 1.8025,
+ "step": 102
+ },
+ {
+ "epoch": 0.10302897689975306,
+ "grad_norm": 0.6746248006820679,
+ "learning_rate": 0.00019941769733485494,
+ "loss": 1.6954,
+ "step": 103
+ },
+ {
+ "epoch": 0.10402925822887688,
+ "grad_norm": 0.7386549115180969,
+ "learning_rate": 0.00019940634685416888,
+ "loss": 1.4547,
+ "step": 104
+ },
+ {
+ "epoch": 0.10502953955800069,
+ "grad_norm": 0.7518633008003235,
+ "learning_rate": 0.00019939488714388524,
+ "loss": 1.5098,
+ "step": 105
+ },
+ {
+ "epoch": 0.10602982088712451,
+ "grad_norm": 0.7350422739982605,
+ "learning_rate": 0.00019938331821659614,
+ "loss": 1.5452,
+ "step": 106
+ },
+ {
+ "epoch": 0.10703010221624833,
+ "grad_norm": 0.6544668674468994,
+ "learning_rate": 0.0001993716400850138,
+ "loss": 1.5106,
+ "step": 107
+ },
+ {
+ "epoch": 0.10803038354537213,
+ "grad_norm": 0.6108564138412476,
+ "learning_rate": 0.0001993598527619703,
+ "loss": 1.5818,
+ "step": 108
+ },
+ {
+ "epoch": 0.10903066487449595,
+ "grad_norm": 0.731071949005127,
+ "learning_rate": 0.00019934795626041783,
+ "loss": 1.4819,
+ "step": 109
+ },
+ {
+ "epoch": 0.11003094620361976,
+ "grad_norm": 0.5978986620903015,
+ "learning_rate": 0.0001993359505934285,
+ "loss": 1.5469,
+ "step": 110
+ },
+ {
+ "epoch": 0.11103122753274358,
+ "grad_norm": 0.7249881029129028,
+ "learning_rate": 0.00019932383577419432,
+ "loss": 1.7466,
+ "step": 111
+ },
+ {
+ "epoch": 0.1120315088618674,
+ "grad_norm": 0.6161806583404541,
+ "learning_rate": 0.0001993116118160273,
+ "loss": 1.3411,
+ "step": 112
+ },
+ {
+ "epoch": 0.11303179019099122,
+ "grad_norm": 0.6745229363441467,
+ "learning_rate": 0.00019929927873235938,
+ "loss": 1.5615,
+ "step": 113
+ },
+ {
+ "epoch": 0.11403207152011503,
+ "grad_norm": 0.6489872336387634,
+ "learning_rate": 0.00019928683653674237,
+ "loss": 1.6279,
+ "step": 114
+ },
+ {
+ "epoch": 0.11503235284923885,
+ "grad_norm": 0.7769975662231445,
+ "learning_rate": 0.00019927428524284805,
+ "loss": 1.6155,
+ "step": 115
+ },
+ {
+ "epoch": 0.11603263417836267,
+ "grad_norm": 0.734336793422699,
+ "learning_rate": 0.00019926162486446792,
+ "loss": 1.6837,
+ "step": 116
+ },
+ {
+ "epoch": 0.11703291550748648,
+ "grad_norm": 0.6966903805732727,
+ "learning_rate": 0.0001992488554155135,
+ "loss": 1.6659,
+ "step": 117
+ },
+ {
+ "epoch": 0.1180331968366103,
+ "grad_norm": 0.6714586615562439,
+ "learning_rate": 0.00019923597691001615,
+ "loss": 1.5161,
+ "step": 118
+ },
+ {
+ "epoch": 0.11903347816573412,
+ "grad_norm": 0.6390894651412964,
+ "learning_rate": 0.0001992229893621269,
+ "loss": 1.4561,
+ "step": 119
+ },
+ {
+ "epoch": 0.12003375949485792,
+ "grad_norm": 0.6481143832206726,
+ "learning_rate": 0.00019920989278611687,
+ "loss": 1.6331,
+ "step": 120
+ },
+ {
+ "epoch": 0.12103404082398174,
+ "grad_norm": 0.6819384694099426,
+ "learning_rate": 0.0001991966871963767,
+ "loss": 1.6508,
+ "step": 121
+ },
+ {
+ "epoch": 0.12203432215310556,
+ "grad_norm": 0.6839059591293335,
+ "learning_rate": 0.000199183372607417,
+ "loss": 1.6514,
+ "step": 122
+ },
+ {
+ "epoch": 0.12303460348222937,
+ "grad_norm": 0.6401050090789795,
+ "learning_rate": 0.0001991699490338681,
+ "loss": 1.8065,
+ "step": 123
+ },
+ {
+ "epoch": 0.12403488481135319,
+ "grad_norm": 0.6860588788986206,
+ "learning_rate": 0.00019915641649048005,
+ "loss": 1.7658,
+ "step": 124
+ },
+ {
+ "epoch": 0.12503516614047702,
+ "grad_norm": 0.6286434531211853,
+ "learning_rate": 0.0001991427749921227,
+ "loss": 1.7678,
+ "step": 125
+ },
+ {
+ "epoch": 0.12603544746960083,
+ "grad_norm": 0.6609922647476196,
+ "learning_rate": 0.00019912902455378556,
+ "loss": 1.4934,
+ "step": 126
+ },
+ {
+ "epoch": 0.12703572879872463,
+ "grad_norm": 0.7058399319648743,
+ "learning_rate": 0.00019911516519057788,
+ "loss": 1.6058,
+ "step": 127
+ },
+ {
+ "epoch": 0.12803601012784846,
+ "grad_norm": 0.6362051963806152,
+ "learning_rate": 0.00019910119691772863,
+ "loss": 1.502,
+ "step": 128
+ },
+ {
+ "epoch": 0.12903629145697226,
+ "grad_norm": 0.7493100762367249,
+ "learning_rate": 0.00019908711975058637,
+ "loss": 1.5287,
+ "step": 129
+ },
+ {
+ "epoch": 0.1300365727860961,
+ "grad_norm": 0.6492393612861633,
+ "learning_rate": 0.0001990729337046194,
+ "loss": 1.5716,
+ "step": 130
+ },
+ {
+ "epoch": 0.1310368541152199,
+ "grad_norm": 0.70331871509552,
+ "learning_rate": 0.0001990586387954156,
+ "loss": 1.5882,
+ "step": 131
+ },
+ {
+ "epoch": 0.13203713544434373,
+ "grad_norm": 0.7581572532653809,
+ "learning_rate": 0.00019904423503868247,
+ "loss": 1.7627,
+ "step": 132
+ },
+ {
+ "epoch": 0.13303741677346753,
+ "grad_norm": 0.7087228894233704,
+ "learning_rate": 0.00019902972245024715,
+ "loss": 1.6257,
+ "step": 133
+ },
+ {
+ "epoch": 0.13403769810259136,
+ "grad_norm": 0.7920627593994141,
+ "learning_rate": 0.00019901510104605637,
+ "loss": 1.572,
+ "step": 134
+ },
+ {
+ "epoch": 0.13503797943171517,
+ "grad_norm": 0.6869202256202698,
+ "learning_rate": 0.00019900037084217637,
+ "loss": 1.5478,
+ "step": 135
+ },
+ {
+ "epoch": 0.136038260760839,
+ "grad_norm": 0.6879409551620483,
+ "learning_rate": 0.00019898553185479303,
+ "loss": 1.3104,
+ "step": 136
+ },
+ {
+ "epoch": 0.1370385420899628,
+ "grad_norm": 0.6574143767356873,
+ "learning_rate": 0.00019897058410021167,
+ "loss": 1.7041,
+ "step": 137
+ },
+ {
+ "epoch": 0.13803882341908663,
+ "grad_norm": 0.7793259620666504,
+ "learning_rate": 0.00019895552759485722,
+ "loss": 1.5417,
+ "step": 138
+ },
+ {
+ "epoch": 0.13903910474821043,
+ "grad_norm": 0.6310438513755798,
+ "learning_rate": 0.00019894036235527395,
+ "loss": 1.4978,
+ "step": 139
+ },
+ {
+ "epoch": 0.14003938607733424,
+ "grad_norm": 0.6298012137413025,
+ "learning_rate": 0.00019892508839812584,
+ "loss": 1.5367,
+ "step": 140
+ },
+ {
+ "epoch": 0.14103966740645807,
+ "grad_norm": 0.5647856593132019,
+ "learning_rate": 0.00019890970574019617,
+ "loss": 1.537,
+ "step": 141
+ },
+ {
+ "epoch": 0.14203994873558187,
+ "grad_norm": 0.6491876244544983,
+ "learning_rate": 0.00019889421439838763,
+ "loss": 1.6992,
+ "step": 142
+ },
+ {
+ "epoch": 0.1430402300647057,
+ "grad_norm": 0.6574720144271851,
+ "learning_rate": 0.00019887861438972246,
+ "loss": 1.3837,
+ "step": 143
+ },
+ {
+ "epoch": 0.1440405113938295,
+ "grad_norm": 0.6267092227935791,
+ "learning_rate": 0.00019886290573134228,
+ "loss": 1.6307,
+ "step": 144
+ },
+ {
+ "epoch": 0.14504079272295334,
+ "grad_norm": 0.6785029172897339,
+ "learning_rate": 0.000198847088440508,
+ "loss": 1.574,
+ "step": 145
+ },
+ {
+ "epoch": 0.14604107405207714,
+ "grad_norm": 0.6218644380569458,
+ "learning_rate": 0.0001988311625346,
+ "loss": 1.4676,
+ "step": 146
+ },
+ {
+ "epoch": 0.14704135538120097,
+ "grad_norm": 0.6047986745834351,
+ "learning_rate": 0.00019881512803111796,
+ "loss": 1.4316,
+ "step": 147
+ },
+ {
+ "epoch": 0.14804163671032478,
+ "grad_norm": 0.7340937256813049,
+ "learning_rate": 0.00019879898494768093,
+ "loss": 1.5185,
+ "step": 148
+ },
+ {
+ "epoch": 0.1490419180394486,
+ "grad_norm": 0.5874620676040649,
+ "learning_rate": 0.00019878273330202717,
+ "loss": 1.5031,
+ "step": 149
+ },
+ {
+ "epoch": 0.1500421993685724,
+ "grad_norm": 0.6943556666374207,
+ "learning_rate": 0.00019876637311201433,
+ "loss": 1.7323,
+ "step": 150
+ },
+ {
+ "epoch": 0.15104248069769624,
+ "grad_norm": 0.6345832347869873,
+ "learning_rate": 0.00019874990439561934,
+ "loss": 1.4691,
+ "step": 151
+ },
+ {
+ "epoch": 0.15204276202682004,
+ "grad_norm": 0.7047753930091858,
+ "learning_rate": 0.0001987333271709383,
+ "loss": 1.5198,
+ "step": 152
+ },
+ {
+ "epoch": 0.15304304335594385,
+ "grad_norm": 0.6043322086334229,
+ "learning_rate": 0.00019871664145618657,
+ "loss": 1.5488,
+ "step": 153
+ },
+ {
+ "epoch": 0.15404332468506768,
+ "grad_norm": 0.5978446006774902,
+ "learning_rate": 0.00019869984726969878,
+ "loss": 1.4278,
+ "step": 154
+ },
+ {
+ "epoch": 0.15504360601419148,
+ "grad_norm": 0.6796436905860901,
+ "learning_rate": 0.00019868294462992866,
+ "loss": 1.5845,
+ "step": 155
+ },
+ {
+ "epoch": 0.1560438873433153,
+ "grad_norm": 0.7113372087478638,
+ "learning_rate": 0.00019866593355544922,
+ "loss": 1.7509,
+ "step": 156
+ },
+ {
+ "epoch": 0.15704416867243912,
+ "grad_norm": 0.5908107757568359,
+ "learning_rate": 0.00019864881406495246,
+ "loss": 1.5693,
+ "step": 157
+ },
+ {
+ "epoch": 0.15804445000156295,
+ "grad_norm": 0.7135252952575684,
+ "learning_rate": 0.00019863158617724967,
+ "loss": 1.6109,
+ "step": 158
+ },
+ {
+ "epoch": 0.15904473133068675,
+ "grad_norm": 0.5621710419654846,
+ "learning_rate": 0.00019861424991127115,
+ "loss": 1.5368,
+ "step": 159
+ },
+ {
+ "epoch": 0.16004501265981058,
+ "grad_norm": 0.6205443143844604,
+ "learning_rate": 0.00019859680528606637,
+ "loss": 1.5181,
+ "step": 160
+ },
+ {
+ "epoch": 0.16104529398893438,
+ "grad_norm": 0.6933260560035706,
+ "learning_rate": 0.00019857925232080373,
+ "loss": 1.4508,
+ "step": 161
+ },
+ {
+ "epoch": 0.16204557531805822,
+ "grad_norm": 0.6911661028862,
+ "learning_rate": 0.00019856159103477086,
+ "loss": 1.5423,
+ "step": 162
+ },
+ {
+ "epoch": 0.16304585664718202,
+ "grad_norm": 0.7684744000434875,
+ "learning_rate": 0.00019854382144737426,
+ "loss": 1.4097,
+ "step": 163
+ },
+ {
+ "epoch": 0.16404613797630582,
+ "grad_norm": 0.6657288074493408,
+ "learning_rate": 0.00019852594357813952,
+ "loss": 1.6145,
+ "step": 164
+ },
+ {
+ "epoch": 0.16504641930542965,
+ "grad_norm": 0.7030160427093506,
+ "learning_rate": 0.00019850795744671116,
+ "loss": 1.6551,
+ "step": 165
+ },
+ {
+ "epoch": 0.16604670063455346,
+ "grad_norm": 0.87894207239151,
+ "learning_rate": 0.0001984898630728527,
+ "loss": 1.6316,
+ "step": 166
+ },
+ {
+ "epoch": 0.1670469819636773,
+ "grad_norm": 0.6282681226730347,
+ "learning_rate": 0.0001984716604764466,
+ "loss": 1.451,
+ "step": 167
+ },
+ {
+ "epoch": 0.1680472632928011,
+ "grad_norm": 0.6729792952537537,
+ "learning_rate": 0.0001984533496774942,
+ "loss": 1.4381,
+ "step": 168
+ },
+ {
+ "epoch": 0.16904754462192492,
+ "grad_norm": 0.7300116419792175,
+ "learning_rate": 0.0001984349306961158,
+ "loss": 1.4244,
+ "step": 169
+ },
+ {
+ "epoch": 0.17004782595104873,
+ "grad_norm": 0.6853480935096741,
+ "learning_rate": 0.00019841640355255043,
+ "loss": 1.6174,
+ "step": 170
+ },
+ {
+ "epoch": 0.17104810728017256,
+ "grad_norm": 0.735612690448761,
+ "learning_rate": 0.00019839776826715614,
+ "loss": 1.5085,
+ "step": 171
+ },
+ {
+ "epoch": 0.17204838860929636,
+ "grad_norm": 0.6735563278198242,
+ "learning_rate": 0.00019837902486040978,
+ "loss": 1.507,
+ "step": 172
+ },
+ {
+ "epoch": 0.1730486699384202,
+ "grad_norm": 0.6617917418479919,
+ "learning_rate": 0.0001983601733529069,
+ "loss": 1.6774,
+ "step": 173
+ },
+ {
+ "epoch": 0.174048951267544,
+ "grad_norm": 0.7137823700904846,
+ "learning_rate": 0.00019834121376536187,
+ "loss": 1.4665,
+ "step": 174
+ },
+ {
+ "epoch": 0.17504923259666783,
+ "grad_norm": 0.6372626423835754,
+ "learning_rate": 0.00019832214611860793,
+ "loss": 1.3597,
+ "step": 175
+ },
+ {
+ "epoch": 0.17604951392579163,
+ "grad_norm": 0.7131632566452026,
+ "learning_rate": 0.00019830297043359692,
+ "loss": 1.4833,
+ "step": 176
+ },
+ {
+ "epoch": 0.17704979525491543,
+ "grad_norm": 0.7538559436798096,
+ "learning_rate": 0.00019828368673139947,
+ "loss": 1.4714,
+ "step": 177
+ },
+ {
+ "epoch": 0.17805007658403926,
+ "grad_norm": 0.5684806108474731,
+ "learning_rate": 0.0001982642950332049,
+ "loss": 1.5012,
+ "step": 178
+ },
+ {
+ "epoch": 0.17905035791316307,
+ "grad_norm": 0.621658444404602,
+ "learning_rate": 0.00019824479536032112,
+ "loss": 1.9119,
+ "step": 179
+ },
+ {
+ "epoch": 0.1800506392422869,
+ "grad_norm": 0.6564679741859436,
+ "learning_rate": 0.0001982251877341748,
+ "loss": 1.5131,
+ "step": 180
+ },
+ {
+ "epoch": 0.1810509205714107,
+ "grad_norm": 0.6546526551246643,
+ "learning_rate": 0.00019820547217631117,
+ "loss": 1.4493,
+ "step": 181
+ },
+ {
+ "epoch": 0.18205120190053453,
+ "grad_norm": 0.6504479050636292,
+ "learning_rate": 0.00019818564870839405,
+ "loss": 1.6131,
+ "step": 182
+ },
+ {
+ "epoch": 0.18305148322965833,
+ "grad_norm": 0.6269803047180176,
+ "learning_rate": 0.00019816571735220583,
+ "loss": 1.5936,
+ "step": 183
+ },
+ {
+ "epoch": 0.18405176455878217,
+ "grad_norm": 0.6303942799568176,
+ "learning_rate": 0.00019814567812964748,
+ "loss": 1.6948,
+ "step": 184
+ },
+ {
+ "epoch": 0.18505204588790597,
+ "grad_norm": 0.6562885046005249,
+ "learning_rate": 0.00019812553106273847,
+ "loss": 1.5542,
+ "step": 185
+ },
+ {
+ "epoch": 0.1860523272170298,
+ "grad_norm": 0.5844212174415588,
+ "learning_rate": 0.00019810527617361681,
+ "loss": 1.539,
+ "step": 186
+ },
+ {
+ "epoch": 0.1870526085461536,
+ "grad_norm": 0.6402295231819153,
+ "learning_rate": 0.00019808491348453894,
+ "loss": 1.4748,
+ "step": 187
+ },
+ {
+ "epoch": 0.18805288987527743,
+ "grad_norm": 0.6579477190971375,
+ "learning_rate": 0.00019806444301787978,
+ "loss": 1.5114,
+ "step": 188
+ },
+ {
+ "epoch": 0.18905317120440124,
+ "grad_norm": 0.6511597037315369,
+ "learning_rate": 0.0001980438647961327,
+ "loss": 1.4678,
+ "step": 189
+ },
+ {
+ "epoch": 0.19005345253352504,
+ "grad_norm": 0.6911427974700928,
+ "learning_rate": 0.00019802317884190935,
+ "loss": 1.6876,
+ "step": 190
+ },
+ {
+ "epoch": 0.19105373386264887,
+ "grad_norm": 0.6146433353424072,
+ "learning_rate": 0.00019800238517793996,
+ "loss": 1.5986,
+ "step": 191
+ },
+ {
+ "epoch": 0.19205401519177268,
+ "grad_norm": 0.6126302480697632,
+ "learning_rate": 0.00019798148382707296,
+ "loss": 1.571,
+ "step": 192
+ },
+ {
+ "epoch": 0.1930542965208965,
+ "grad_norm": 0.5751072764396667,
+ "learning_rate": 0.00019796047481227515,
+ "loss": 1.4921,
+ "step": 193
+ },
+ {
+ "epoch": 0.1940545778500203,
+ "grad_norm": 0.6484839916229248,
+ "learning_rate": 0.00019793935815663163,
+ "loss": 1.7495,
+ "step": 194
+ },
+ {
+ "epoch": 0.19505485917914414,
+ "grad_norm": 0.6875973343849182,
+ "learning_rate": 0.00019791813388334581,
+ "loss": 1.5782,
+ "step": 195
+ },
+ {
+ "epoch": 0.19605514050826794,
+ "grad_norm": 0.8130943179130554,
+ "learning_rate": 0.00019789680201573933,
+ "loss": 1.4964,
+ "step": 196
+ },
+ {
+ "epoch": 0.19705542183739178,
+ "grad_norm": 0.6734403371810913,
+ "learning_rate": 0.00019787536257725202,
+ "loss": 1.4787,
+ "step": 197
+ },
+ {
+ "epoch": 0.19805570316651558,
+ "grad_norm": 0.6480582356452942,
+ "learning_rate": 0.00019785381559144196,
+ "loss": 1.5629,
+ "step": 198
+ },
+ {
+ "epoch": 0.1990559844956394,
+ "grad_norm": 0.6554624438285828,
+ "learning_rate": 0.00019783216108198542,
+ "loss": 1.5806,
+ "step": 199
+ },
+ {
+ "epoch": 0.2000562658247632,
+ "grad_norm": 0.705443263053894,
+ "learning_rate": 0.00019781039907267677,
+ "loss": 1.8372,
+ "step": 200
+ },
+ {
+ "epoch": 0.20105654715388704,
+ "grad_norm": 0.706923246383667,
+ "learning_rate": 0.00019778852958742853,
+ "loss": 1.6405,
+ "step": 201
+ },
+ {
+ "epoch": 0.20205682848301085,
+ "grad_norm": 0.7062544822692871,
+ "learning_rate": 0.00019776655265027127,
+ "loss": 1.6,
+ "step": 202
+ },
+ {
+ "epoch": 0.20305710981213465,
+ "grad_norm": 0.7227569222450256,
+ "learning_rate": 0.00019774446828535371,
+ "loss": 1.5172,
+ "step": 203
+ },
+ {
+ "epoch": 0.20405739114125848,
+ "grad_norm": 0.6762563586235046,
+ "learning_rate": 0.00019772227651694256,
+ "loss": 1.6753,
+ "step": 204
+ },
+ {
+ "epoch": 0.20505767247038229,
+ "grad_norm": 0.6048421859741211,
+ "learning_rate": 0.00019769997736942258,
+ "loss": 1.4827,
+ "step": 205
+ },
+ {
+ "epoch": 0.20605795379950612,
+ "grad_norm": 0.6002956032752991,
+ "learning_rate": 0.00019767757086729647,
+ "loss": 1.5438,
+ "step": 206
+ },
+ {
+ "epoch": 0.20705823512862992,
+ "grad_norm": 0.7948954701423645,
+ "learning_rate": 0.00019765505703518496,
+ "loss": 1.4988,
+ "step": 207
+ },
+ {
+ "epoch": 0.20805851645775375,
+ "grad_norm": 0.6495680809020996,
+ "learning_rate": 0.00019763243589782662,
+ "loss": 1.5738,
+ "step": 208
+ },
+ {
+ "epoch": 0.20905879778687755,
+ "grad_norm": 0.6413107514381409,
+ "learning_rate": 0.00019760970748007803,
+ "loss": 1.3794,
+ "step": 209
+ },
+ {
+ "epoch": 0.21005907911600138,
+ "grad_norm": 0.5999665260314941,
+ "learning_rate": 0.0001975868718069136,
+ "loss": 1.4313,
+ "step": 210
+ },
+ {
+ "epoch": 0.2110593604451252,
+ "grad_norm": 0.6355773210525513,
+ "learning_rate": 0.00019756392890342563,
+ "loss": 1.5107,
+ "step": 211
+ },
+ {
+ "epoch": 0.21205964177424902,
+ "grad_norm": 0.6068251729011536,
+ "learning_rate": 0.00019754087879482422,
+ "loss": 1.536,
+ "step": 212
+ },
+ {
+ "epoch": 0.21305992310337282,
+ "grad_norm": 0.5568909049034119,
+ "learning_rate": 0.00019751772150643722,
+ "loss": 1.5372,
+ "step": 213
+ },
+ {
+ "epoch": 0.21406020443249665,
+ "grad_norm": 0.5771281719207764,
+ "learning_rate": 0.00019749445706371038,
+ "loss": 1.487,
+ "step": 214
+ },
+ {
+ "epoch": 0.21506048576162046,
+ "grad_norm": 0.6146671772003174,
+ "learning_rate": 0.00019747108549220702,
+ "loss": 1.4585,
+ "step": 215
+ },
+ {
+ "epoch": 0.21606076709074426,
+ "grad_norm": 0.5595754981040955,
+ "learning_rate": 0.00019744760681760832,
+ "loss": 1.4224,
+ "step": 216
+ },
+ {
+ "epoch": 0.2170610484198681,
+ "grad_norm": 0.5873929858207703,
+ "learning_rate": 0.00019742402106571314,
+ "loss": 1.4581,
+ "step": 217
+ },
+ {
+ "epoch": 0.2180613297489919,
+ "grad_norm": 0.5725668668746948,
+ "learning_rate": 0.00019740032826243788,
+ "loss": 1.4393,
+ "step": 218
+ },
+ {
+ "epoch": 0.21906161107811573,
+ "grad_norm": 0.6452648043632507,
+ "learning_rate": 0.0001973765284338167,
+ "loss": 1.6048,
+ "step": 219
+ },
+ {
+ "epoch": 0.22006189240723953,
+ "grad_norm": 0.6166092753410339,
+ "learning_rate": 0.00019735262160600127,
+ "loss": 1.4976,
+ "step": 220
+ },
+ {
+ "epoch": 0.22106217373636336,
+ "grad_norm": 0.7053269147872925,
+ "learning_rate": 0.00019732860780526088,
+ "loss": 1.6882,
+ "step": 221
+ },
+ {
+ "epoch": 0.22206245506548716,
+ "grad_norm": 0.7072796821594238,
+ "learning_rate": 0.00019730448705798239,
+ "loss": 1.5441,
+ "step": 222
+ },
+ {
+ "epoch": 0.223062736394611,
+ "grad_norm": 0.6704496145248413,
+ "learning_rate": 0.00019728025939067008,
+ "loss": 1.3791,
+ "step": 223
+ },
+ {
+ "epoch": 0.2240630177237348,
+ "grad_norm": 0.6141743659973145,
+ "learning_rate": 0.00019725592482994583,
+ "loss": 1.5831,
+ "step": 224
+ },
+ {
+ "epoch": 0.22506329905285863,
+ "grad_norm": 0.6235673427581787,
+ "learning_rate": 0.00019723148340254892,
+ "loss": 1.6103,
+ "step": 225
+ },
+ {
+ "epoch": 0.22606358038198243,
+ "grad_norm": 0.6383673548698425,
+ "learning_rate": 0.00019720693513533598,
+ "loss": 1.6284,
+ "step": 226
+ },
+ {
+ "epoch": 0.22706386171110624,
+ "grad_norm": 0.7666104435920715,
+ "learning_rate": 0.00019718228005528122,
+ "loss": 1.702,
+ "step": 227
+ },
+ {
+ "epoch": 0.22806414304023007,
+ "grad_norm": 0.6431383490562439,
+ "learning_rate": 0.00019715751818947603,
+ "loss": 1.4571,
+ "step": 228
+ },
+ {
+ "epoch": 0.22906442436935387,
+ "grad_norm": 0.6177626252174377,
+ "learning_rate": 0.0001971326495651293,
+ "loss": 1.4326,
+ "step": 229
+ },
+ {
+ "epoch": 0.2300647056984777,
+ "grad_norm": 0.7352898120880127,
+ "learning_rate": 0.00019710767420956705,
+ "loss": 1.7427,
+ "step": 230
+ },
+ {
+ "epoch": 0.2310649870276015,
+ "grad_norm": 0.6259469389915466,
+ "learning_rate": 0.0001970825921502328,
+ "loss": 1.634,
+ "step": 231
+ },
+ {
+ "epoch": 0.23206526835672533,
+ "grad_norm": 0.6699635982513428,
+ "learning_rate": 0.0001970574034146871,
+ "loss": 1.4705,
+ "step": 232
+ },
+ {
+ "epoch": 0.23306554968584914,
+ "grad_norm": 0.5577033162117004,
+ "learning_rate": 0.00019703210803060782,
+ "loss": 1.5438,
+ "step": 233
+ },
+ {
+ "epoch": 0.23406583101497297,
+ "grad_norm": 0.6063429117202759,
+ "learning_rate": 0.00019700670602579008,
+ "loss": 1.555,
+ "step": 234
+ },
+ {
+ "epoch": 0.23506611234409677,
+ "grad_norm": 0.6069104671478271,
+ "learning_rate": 0.00019698119742814606,
+ "loss": 1.5036,
+ "step": 235
+ },
+ {
+ "epoch": 0.2360663936732206,
+ "grad_norm": 0.6158379316329956,
+ "learning_rate": 0.00019695558226570507,
+ "loss": 1.3741,
+ "step": 236
+ },
+ {
+ "epoch": 0.2370666750023444,
+ "grad_norm": 0.6366294622421265,
+ "learning_rate": 0.00019692986056661356,
+ "loss": 1.4467,
+ "step": 237
+ },
+ {
+ "epoch": 0.23806695633146824,
+ "grad_norm": 0.6726595163345337,
+ "learning_rate": 0.00019690403235913504,
+ "loss": 1.3861,
+ "step": 238
+ },
+ {
+ "epoch": 0.23906723766059204,
+ "grad_norm": 0.6546512842178345,
+ "learning_rate": 0.00019687809767165,
+ "loss": 1.6886,
+ "step": 239
+ },
+ {
+ "epoch": 0.24006751898971584,
+ "grad_norm": 0.6623121500015259,
+ "learning_rate": 0.000196852056532656,
+ "loss": 1.5925,
+ "step": 240
+ },
+ {
+ "epoch": 0.24106780031883968,
+ "grad_norm": 0.6577529311180115,
+ "learning_rate": 0.00019682590897076752,
+ "loss": 1.4509,
+ "step": 241
+ },
+ {
+ "epoch": 0.24206808164796348,
+ "grad_norm": 0.5586327314376831,
+ "learning_rate": 0.00019679965501471608,
+ "loss": 1.6346,
+ "step": 242
+ },
+ {
+ "epoch": 0.2430683629770873,
+ "grad_norm": 0.6459937691688538,
+ "learning_rate": 0.0001967732946933499,
+ "loss": 1.4129,
+ "step": 243
+ },
+ {
+ "epoch": 0.2440686443062111,
+ "grad_norm": 0.778732180595398,
+ "learning_rate": 0.00019674682803563428,
+ "loss": 1.5129,
+ "step": 244
+ },
+ {
+ "epoch": 0.24506892563533494,
+ "grad_norm": 0.7264451384544373,
+ "learning_rate": 0.00019672025507065131,
+ "loss": 1.4483,
+ "step": 245
+ },
+ {
+ "epoch": 0.24606920696445875,
+ "grad_norm": 0.616084635257721,
+ "learning_rate": 0.00019669357582759983,
+ "loss": 1.5947,
+ "step": 246
+ },
+ {
+ "epoch": 0.24706948829358258,
+ "grad_norm": 0.5911642909049988,
+ "learning_rate": 0.00019666679033579552,
+ "loss": 1.6407,
+ "step": 247
+ },
+ {
+ "epoch": 0.24806976962270638,
+ "grad_norm": 0.6102796792984009,
+ "learning_rate": 0.00019663989862467082,
+ "loss": 1.5251,
+ "step": 248
+ },
+ {
+ "epoch": 0.2490700509518302,
+ "grad_norm": 0.5973434448242188,
+ "learning_rate": 0.00019661290072377482,
+ "loss": 1.3969,
+ "step": 249
+ },
+ {
+ "epoch": 0.25007033228095404,
+ "grad_norm": 0.8515523076057434,
+ "learning_rate": 0.00019658579666277334,
+ "loss": 1.5687,
+ "step": 250
+ },
+ {
+ "epoch": 0.2510706136100778,
+ "grad_norm": 0.5003417134284973,
+ "learning_rate": 0.0001965585864714488,
+ "loss": 1.4102,
+ "step": 251
+ },
+ {
+ "epoch": 0.25207089493920165,
+ "grad_norm": 0.5215190052986145,
+ "learning_rate": 0.00019653127017970034,
+ "loss": 1.2471,
+ "step": 252
+ },
+ {
+ "epoch": 0.2530711762683255,
+ "grad_norm": 0.6491619348526001,
+ "learning_rate": 0.0001965038478175436,
+ "loss": 1.6969,
+ "step": 253
+ },
+ {
+ "epoch": 0.25407145759744926,
+ "grad_norm": 0.6176133155822754,
+ "learning_rate": 0.00019647631941511082,
+ "loss": 1.5351,
+ "step": 254
+ },
+ {
+ "epoch": 0.2550717389265731,
+ "grad_norm": 0.6913408041000366,
+ "learning_rate": 0.0001964486850026507,
+ "loss": 1.4309,
+ "step": 255
+ },
+ {
+ "epoch": 0.2560720202556969,
+ "grad_norm": 0.5875718593597412,
+ "learning_rate": 0.00019642094461052852,
+ "loss": 1.4679,
+ "step": 256
+ },
+ {
+ "epoch": 0.25707230158482075,
+ "grad_norm": 0.6682264804840088,
+ "learning_rate": 0.00019639309826922585,
+ "loss": 1.5393,
+ "step": 257
+ },
+ {
+ "epoch": 0.2580725829139445,
+ "grad_norm": 0.7241432666778564,
+ "learning_rate": 0.0001963651460093409,
+ "loss": 1.4998,
+ "step": 258
+ },
+ {
+ "epoch": 0.25907286424306836,
+ "grad_norm": 0.5210353136062622,
+ "learning_rate": 0.00019633708786158806,
+ "loss": 1.3837,
+ "step": 259
+ },
+ {
+ "epoch": 0.2600731455721922,
+ "grad_norm": 0.584020733833313,
+ "learning_rate": 0.00019630892385679818,
+ "loss": 1.4961,
+ "step": 260
+ },
+ {
+ "epoch": 0.261073426901316,
+ "grad_norm": 0.6708115935325623,
+ "learning_rate": 0.00019628065402591845,
+ "loss": 1.5277,
+ "step": 261
+ },
+ {
+ "epoch": 0.2620737082304398,
+ "grad_norm": 0.5480003952980042,
+ "learning_rate": 0.00019625227840001225,
+ "loss": 1.556,
+ "step": 262
+ },
+ {
+ "epoch": 0.2630739895595636,
+ "grad_norm": 0.595191478729248,
+ "learning_rate": 0.0001962237970102593,
+ "loss": 1.3514,
+ "step": 263
+ },
+ {
+ "epoch": 0.26407427088868746,
+ "grad_norm": 0.7332099080085754,
+ "learning_rate": 0.0001961952098879555,
+ "loss": 1.5394,
+ "step": 264
+ },
+ {
+ "epoch": 0.26507455221781123,
+ "grad_norm": 0.596319317817688,
+ "learning_rate": 0.00019616651706451287,
+ "loss": 1.3828,
+ "step": 265
+ },
+ {
+ "epoch": 0.26607483354693506,
+ "grad_norm": 0.5998026132583618,
+ "learning_rate": 0.0001961377185714597,
+ "loss": 1.4479,
+ "step": 266
+ },
+ {
+ "epoch": 0.2670751148760589,
+ "grad_norm": 0.6220220923423767,
+ "learning_rate": 0.0001961088144404403,
+ "loss": 1.5121,
+ "step": 267
+ },
+ {
+ "epoch": 0.2680753962051827,
+ "grad_norm": 0.5865943431854248,
+ "learning_rate": 0.00019607980470321505,
+ "loss": 1.6747,
+ "step": 268
+ },
+ {
+ "epoch": 0.2690756775343065,
+ "grad_norm": 0.5790852904319763,
+ "learning_rate": 0.00019605068939166045,
+ "loss": 1.3798,
+ "step": 269
+ },
+ {
+ "epoch": 0.27007595886343033,
+ "grad_norm": 0.6157498955726624,
+ "learning_rate": 0.00019602146853776894,
+ "loss": 1.6799,
+ "step": 270
+ },
+ {
+ "epoch": 0.27107624019255416,
+ "grad_norm": 0.6214422583580017,
+ "learning_rate": 0.000195992142173649,
+ "loss": 1.4782,
+ "step": 271
+ },
+ {
+ "epoch": 0.272076521521678,
+ "grad_norm": 0.6460129618644714,
+ "learning_rate": 0.0001959627103315249,
+ "loss": 1.4874,
+ "step": 272
+ },
+ {
+ "epoch": 0.27307680285080177,
+ "grad_norm": 0.5928930640220642,
+ "learning_rate": 0.00019593317304373705,
+ "loss": 1.4557,
+ "step": 273
+ },
+ {
+ "epoch": 0.2740770841799256,
+ "grad_norm": 0.5123687982559204,
+ "learning_rate": 0.00019590353034274144,
+ "loss": 1.445,
+ "step": 274
+ },
+ {
+ "epoch": 0.27507736550904943,
+ "grad_norm": 0.607455313205719,
+ "learning_rate": 0.00019587378226111014,
+ "loss": 1.4468,
+ "step": 275
+ },
+ {
+ "epoch": 0.27607764683817326,
+ "grad_norm": 0.6108120083808899,
+ "learning_rate": 0.00019584392883153088,
+ "loss": 1.3834,
+ "step": 276
+ },
+ {
+ "epoch": 0.27707792816729704,
+ "grad_norm": 0.680404543876648,
+ "learning_rate": 0.00019581397008680717,
+ "loss": 1.5094,
+ "step": 277
+ },
+ {
+ "epoch": 0.27807820949642087,
+ "grad_norm": 0.6419563889503479,
+ "learning_rate": 0.00019578390605985826,
+ "loss": 1.6933,
+ "step": 278
+ },
+ {
+ "epoch": 0.2790784908255447,
+ "grad_norm": 0.5788853764533997,
+ "learning_rate": 0.00019575373678371909,
+ "loss": 1.4754,
+ "step": 279
+ },
+ {
+ "epoch": 0.2800787721546685,
+ "grad_norm": 0.5943770408630371,
+ "learning_rate": 0.00019572346229154025,
+ "loss": 1.2949,
+ "step": 280
+ },
+ {
+ "epoch": 0.2810790534837923,
+ "grad_norm": 0.5997135043144226,
+ "learning_rate": 0.00019569308261658787,
+ "loss": 1.5365,
+ "step": 281
+ },
+ {
+ "epoch": 0.28207933481291614,
+ "grad_norm": 0.692401647567749,
+ "learning_rate": 0.00019566259779224378,
+ "loss": 1.4946,
+ "step": 282
+ },
+ {
+ "epoch": 0.28307961614203997,
+ "grad_norm": 0.5856708884239197,
+ "learning_rate": 0.00019563200785200526,
+ "loss": 1.426,
+ "step": 283
+ },
+ {
+ "epoch": 0.28407989747116374,
+ "grad_norm": 1.2516822814941406,
+ "learning_rate": 0.00019560131282948516,
+ "loss": 1.5119,
+ "step": 284
+ },
+ {
+ "epoch": 0.2850801788002876,
+ "grad_norm": 0.6360501050949097,
+ "learning_rate": 0.0001955705127584117,
+ "loss": 1.3916,
+ "step": 285
+ },
+ {
+ "epoch": 0.2860804601294114,
+ "grad_norm": 0.6822036504745483,
+ "learning_rate": 0.00019553960767262863,
+ "loss": 1.5565,
+ "step": 286
+ },
+ {
+ "epoch": 0.28708074145853524,
+ "grad_norm": 0.6973714828491211,
+ "learning_rate": 0.00019550859760609503,
+ "loss": 1.5559,
+ "step": 287
+ },
+ {
+ "epoch": 0.288081022787659,
+ "grad_norm": 0.6595618724822998,
+ "learning_rate": 0.00019547748259288536,
+ "loss": 1.5824,
+ "step": 288
+ },
+ {
+ "epoch": 0.28908130411678284,
+ "grad_norm": 0.5625808238983154,
+ "learning_rate": 0.0001954462626671894,
+ "loss": 1.2669,
+ "step": 289
+ },
+ {
+ "epoch": 0.2900815854459067,
+ "grad_norm": 0.6318663358688354,
+ "learning_rate": 0.0001954149378633122,
+ "loss": 1.3896,
+ "step": 290
+ },
+ {
+ "epoch": 0.29108186677503045,
+ "grad_norm": 0.6655906438827515,
+ "learning_rate": 0.00019538350821567404,
+ "loss": 1.3889,
+ "step": 291
+ },
+ {
+ "epoch": 0.2920821481041543,
+ "grad_norm": 0.5947337746620178,
+ "learning_rate": 0.00019535197375881045,
+ "loss": 1.6112,
+ "step": 292
+ },
+ {
+ "epoch": 0.2930824294332781,
+ "grad_norm": 0.6139295101165771,
+ "learning_rate": 0.00019532033452737205,
+ "loss": 1.5185,
+ "step": 293
+ },
+ {
+ "epoch": 0.29408271076240194,
+ "grad_norm": 0.579953670501709,
+ "learning_rate": 0.00019528859055612468,
+ "loss": 1.3874,
+ "step": 294
+ },
+ {
+ "epoch": 0.2950829920915257,
+ "grad_norm": 0.6101506352424622,
+ "learning_rate": 0.0001952567418799492,
+ "loss": 1.5965,
+ "step": 295
+ },
+ {
+ "epoch": 0.29608327342064955,
+ "grad_norm": 0.6393965482711792,
+ "learning_rate": 0.00019522478853384155,
+ "loss": 1.4124,
+ "step": 296
+ },
+ {
+ "epoch": 0.2970835547497734,
+ "grad_norm": 0.6147856712341309,
+ "learning_rate": 0.00019519273055291266,
+ "loss": 1.3776,
+ "step": 297
+ },
+ {
+ "epoch": 0.2980838360788972,
+ "grad_norm": 0.6056416630744934,
+ "learning_rate": 0.00019516056797238846,
+ "loss": 1.4453,
+ "step": 298
+ },
+ {
+ "epoch": 0.299084117408021,
+ "grad_norm": 0.6705831289291382,
+ "learning_rate": 0.00019512830082760987,
+ "loss": 1.3248,
+ "step": 299
+ },
+ {
+ "epoch": 0.3000843987371448,
+ "grad_norm": 0.6664314866065979,
+ "learning_rate": 0.00019509592915403255,
+ "loss": 1.5865,
+ "step": 300
+ },
+ {
+ "epoch": 0.30108468006626865,
+ "grad_norm": 0.5325604677200317,
+ "learning_rate": 0.00019506345298722717,
+ "loss": 1.0646,
+ "step": 301
+ },
+ {
+ "epoch": 0.3020849613953925,
+ "grad_norm": 0.589242160320282,
+ "learning_rate": 0.00019503087236287913,
+ "loss": 1.2297,
+ "step": 302
+ },
+ {
+ "epoch": 0.30308524272451626,
+ "grad_norm": 0.5677699446678162,
+ "learning_rate": 0.00019499818731678873,
+ "loss": 1.3961,
+ "step": 303
+ },
+ {
+ "epoch": 0.3040855240536401,
+ "grad_norm": 0.5676394701004028,
+ "learning_rate": 0.00019496539788487082,
+ "loss": 1.3276,
+ "step": 304
+ },
+ {
+ "epoch": 0.3050858053827639,
+ "grad_norm": 0.7280861139297485,
+ "learning_rate": 0.0001949325041031551,
+ "loss": 1.6731,
+ "step": 305
+ },
+ {
+ "epoch": 0.3060860867118877,
+ "grad_norm": 0.690636396408081,
+ "learning_rate": 0.0001948995060077859,
+ "loss": 1.5443,
+ "step": 306
+ },
+ {
+ "epoch": 0.3070863680410115,
+ "grad_norm": 0.611426055431366,
+ "learning_rate": 0.0001948664036350221,
+ "loss": 1.5827,
+ "step": 307
+ },
+ {
+ "epoch": 0.30808664937013536,
+ "grad_norm": 0.7112497091293335,
+ "learning_rate": 0.00019483319702123732,
+ "loss": 1.5401,
+ "step": 308
+ },
+ {
+ "epoch": 0.3090869306992592,
+ "grad_norm": 0.6598275303840637,
+ "learning_rate": 0.00019479988620291956,
+ "loss": 1.6432,
+ "step": 309
+ },
+ {
+ "epoch": 0.31008721202838296,
+ "grad_norm": 0.5019932985305786,
+ "learning_rate": 0.00019476647121667137,
+ "loss": 1.2561,
+ "step": 310
+ },
+ {
+ "epoch": 0.3110874933575068,
+ "grad_norm": 0.7777897715568542,
+ "learning_rate": 0.00019473295209920983,
+ "loss": 1.6118,
+ "step": 311
+ },
+ {
+ "epoch": 0.3120877746866306,
+ "grad_norm": 0.6028640866279602,
+ "learning_rate": 0.00019469932888736632,
+ "loss": 1.4682,
+ "step": 312
+ },
+ {
+ "epoch": 0.31308805601575446,
+ "grad_norm": 0.554381251335144,
+ "learning_rate": 0.00019466560161808674,
+ "loss": 1.4179,
+ "step": 313
+ },
+ {
+ "epoch": 0.31408833734487823,
+ "grad_norm": 0.6212736368179321,
+ "learning_rate": 0.00019463177032843124,
+ "loss": 1.4327,
+ "step": 314
+ },
+ {
+ "epoch": 0.31508861867400206,
+ "grad_norm": 0.6829814910888672,
+ "learning_rate": 0.00019459783505557424,
+ "loss": 1.4455,
+ "step": 315
+ },
+ {
+ "epoch": 0.3160889000031259,
+ "grad_norm": 0.5808065533638,
+ "learning_rate": 0.00019456379583680452,
+ "loss": 1.3583,
+ "step": 316
+ },
+ {
+ "epoch": 0.31708918133224967,
+ "grad_norm": 0.6354159712791443,
+ "learning_rate": 0.000194529652709525,
+ "loss": 1.6916,
+ "step": 317
+ },
+ {
+ "epoch": 0.3180894626613735,
+ "grad_norm": 0.6299159526824951,
+ "learning_rate": 0.00019449540571125286,
+ "loss": 1.47,
+ "step": 318
+ },
+ {
+ "epoch": 0.31908974399049733,
+ "grad_norm": 0.6222877502441406,
+ "learning_rate": 0.00019446105487961926,
+ "loss": 1.4137,
+ "step": 319
+ },
+ {
+ "epoch": 0.32009002531962116,
+ "grad_norm": 0.5995916724205017,
+ "learning_rate": 0.0001944266002523696,
+ "loss": 1.3679,
+ "step": 320
+ },
+ {
+ "epoch": 0.32109030664874494,
+ "grad_norm": 0.599814236164093,
+ "learning_rate": 0.0001943920418673633,
+ "loss": 1.4075,
+ "step": 321
+ },
+ {
+ "epoch": 0.32209058797786877,
+ "grad_norm": 0.5409269332885742,
+ "learning_rate": 0.00019435737976257377,
+ "loss": 1.4289,
+ "step": 322
+ },
+ {
+ "epoch": 0.3230908693069926,
+ "grad_norm": 0.5298951864242554,
+ "learning_rate": 0.00019432261397608834,
+ "loss": 1.2834,
+ "step": 323
+ },
+ {
+ "epoch": 0.32409115063611643,
+ "grad_norm": 0.7196112871170044,
+ "learning_rate": 0.00019428774454610843,
+ "loss": 1.4845,
+ "step": 324
+ },
+ {
+ "epoch": 0.3250914319652402,
+ "grad_norm": 0.5605450868606567,
+ "learning_rate": 0.00019425277151094913,
+ "loss": 1.4575,
+ "step": 325
+ },
+ {
+ "epoch": 0.32609171329436404,
+ "grad_norm": 0.573080837726593,
+ "learning_rate": 0.00019421769490903957,
+ "loss": 1.5757,
+ "step": 326
+ },
+ {
+ "epoch": 0.32709199462348787,
+ "grad_norm": 0.5017902851104736,
+ "learning_rate": 0.0001941825147789225,
+ "loss": 1.5794,
+ "step": 327
+ },
+ {
+ "epoch": 0.32809227595261165,
+ "grad_norm": 0.643267810344696,
+ "learning_rate": 0.00019414723115925456,
+ "loss": 1.4903,
+ "step": 328
+ },
+ {
+ "epoch": 0.3290925572817355,
+ "grad_norm": 0.6522070169448853,
+ "learning_rate": 0.0001941118440888061,
+ "loss": 1.5907,
+ "step": 329
+ },
+ {
+ "epoch": 0.3300928386108593,
+ "grad_norm": 0.6496105790138245,
+ "learning_rate": 0.0001940763536064611,
+ "loss": 1.4225,
+ "step": 330
+ },
+ {
+ "epoch": 0.33109311993998314,
+ "grad_norm": 0.6011468768119812,
+ "learning_rate": 0.00019404075975121716,
+ "loss": 1.5022,
+ "step": 331
+ },
+ {
+ "epoch": 0.3320934012691069,
+ "grad_norm": 0.6327878832817078,
+ "learning_rate": 0.0001940050625621855,
+ "loss": 1.468,
+ "step": 332
+ },
+ {
+ "epoch": 0.33309368259823074,
+ "grad_norm": 0.6187490820884705,
+ "learning_rate": 0.00019396926207859084,
+ "loss": 1.5183,
+ "step": 333
+ },
+ {
+ "epoch": 0.3340939639273546,
+ "grad_norm": 0.7625093460083008,
+ "learning_rate": 0.0001939333583397715,
+ "loss": 1.4813,
+ "step": 334
+ },
+ {
+ "epoch": 0.3350942452564784,
+ "grad_norm": 0.5286359190940857,
+ "learning_rate": 0.00019389735138517915,
+ "loss": 1.3674,
+ "step": 335
+ },
+ {
+ "epoch": 0.3360945265856022,
+ "grad_norm": 0.5798503160476685,
+ "learning_rate": 0.00019386124125437895,
+ "loss": 1.3016,
+ "step": 336
+ },
+ {
+ "epoch": 0.337094807914726,
+ "grad_norm": 0.48794126510620117,
+ "learning_rate": 0.00019382502798704935,
+ "loss": 1.3642,
+ "step": 337
+ },
+ {
+ "epoch": 0.33809508924384984,
+ "grad_norm": 0.7394312620162964,
+ "learning_rate": 0.00019378871162298227,
+ "loss": 1.327,
+ "step": 338
+ },
+ {
+ "epoch": 0.3390953705729737,
+ "grad_norm": 0.5598319172859192,
+ "learning_rate": 0.00019375229220208276,
+ "loss": 1.4247,
+ "step": 339
+ },
+ {
+ "epoch": 0.34009565190209745,
+ "grad_norm": 0.6099628806114197,
+ "learning_rate": 0.00019371576976436917,
+ "loss": 1.4906,
+ "step": 340
+ },
+ {
+ "epoch": 0.3410959332312213,
+ "grad_norm": 0.6749781370162964,
+ "learning_rate": 0.00019367914434997312,
+ "loss": 1.367,
+ "step": 341
+ },
+ {
+ "epoch": 0.3420962145603451,
+ "grad_norm": 0.7721238136291504,
+ "learning_rate": 0.00019364241599913924,
+ "loss": 1.4464,
+ "step": 342
+ },
+ {
+ "epoch": 0.3430964958894689,
+ "grad_norm": 0.5762369632720947,
+ "learning_rate": 0.0001936055847522254,
+ "loss": 1.409,
+ "step": 343
+ },
+ {
+ "epoch": 0.3440967772185927,
+ "grad_norm": 0.6960498690605164,
+ "learning_rate": 0.00019356865064970244,
+ "loss": 1.3907,
+ "step": 344
+ },
+ {
+ "epoch": 0.34509705854771655,
+ "grad_norm": 0.5805984735488892,
+ "learning_rate": 0.0001935316137321543,
+ "loss": 1.4539,
+ "step": 345
+ },
+ {
+ "epoch": 0.3460973398768404,
+ "grad_norm": 0.5686045289039612,
+ "learning_rate": 0.00019349447404027782,
+ "loss": 1.4493,
+ "step": 346
+ },
+ {
+ "epoch": 0.34709762120596416,
+ "grad_norm": 0.5448501706123352,
+ "learning_rate": 0.00019345723161488283,
+ "loss": 1.5633,
+ "step": 347
+ },
+ {
+ "epoch": 0.348097902535088,
+ "grad_norm": 0.6388784050941467,
+ "learning_rate": 0.000193419886496892,
+ "loss": 1.7179,
+ "step": 348
+ },
+ {
+ "epoch": 0.3490981838642118,
+ "grad_norm": 0.5240457653999329,
+ "learning_rate": 0.00019338243872734086,
+ "loss": 1.4411,
+ "step": 349
+ },
+ {
+ "epoch": 0.35009846519333565,
+ "grad_norm": 0.5460641384124756,
+ "learning_rate": 0.00019334488834737775,
+ "loss": 1.361,
+ "step": 350
+ },
+ {
+ "epoch": 0.3510987465224594,
+ "grad_norm": 0.5495695471763611,
+ "learning_rate": 0.00019330723539826375,
+ "loss": 1.5891,
+ "step": 351
+ },
+ {
+ "epoch": 0.35209902785158326,
+ "grad_norm": 0.5618153214454651,
+ "learning_rate": 0.00019326947992137262,
+ "loss": 1.3084,
+ "step": 352
+ },
+ {
+ "epoch": 0.3530993091807071,
+ "grad_norm": 0.5603707432746887,
+ "learning_rate": 0.00019323162195819082,
+ "loss": 1.5732,
+ "step": 353
+ },
+ {
+ "epoch": 0.35409959050983086,
+ "grad_norm": 0.5732563138008118,
+ "learning_rate": 0.0001931936615503174,
+ "loss": 1.5045,
+ "step": 354
+ },
+ {
+ "epoch": 0.3550998718389547,
+ "grad_norm": 0.5997583866119385,
+ "learning_rate": 0.000193155598739464,
+ "loss": 1.4175,
+ "step": 355
+ },
+ {
+ "epoch": 0.3561001531680785,
+ "grad_norm": 0.5769765377044678,
+ "learning_rate": 0.0001931174335674547,
+ "loss": 1.4834,
+ "step": 356
+ },
+ {
+ "epoch": 0.35710043449720236,
+ "grad_norm": 0.5902683138847351,
+ "learning_rate": 0.0001930791660762262,
+ "loss": 1.4664,
+ "step": 357
+ },
+ {
+ "epoch": 0.35810071582632613,
+ "grad_norm": 0.6354758143424988,
+ "learning_rate": 0.00019304079630782752,
+ "loss": 1.3891,
+ "step": 358
+ },
+ {
+ "epoch": 0.35910099715544996,
+ "grad_norm": 0.6018317341804504,
+ "learning_rate": 0.0001930023243044201,
+ "loss": 1.4514,
+ "step": 359
+ },
+ {
+ "epoch": 0.3601012784845738,
+ "grad_norm": 0.5409123301506042,
+ "learning_rate": 0.00019296375010827773,
+ "loss": 1.4708,
+ "step": 360
+ },
+ {
+ "epoch": 0.3611015598136976,
+ "grad_norm": 0.5457523465156555,
+ "learning_rate": 0.00019292507376178643,
+ "loss": 1.4988,
+ "step": 361
+ },
+ {
+ "epoch": 0.3621018411428214,
+ "grad_norm": 0.626768946647644,
+ "learning_rate": 0.00019288629530744454,
+ "loss": 1.5722,
+ "step": 362
+ },
+ {
+ "epoch": 0.36310212247194523,
+ "grad_norm": 0.566554069519043,
+ "learning_rate": 0.0001928474147878626,
+ "loss": 1.2135,
+ "step": 363
+ },
+ {
+ "epoch": 0.36410240380106906,
+ "grad_norm": 0.7327786684036255,
+ "learning_rate": 0.0001928084322457632,
+ "loss": 1.5245,
+ "step": 364
+ },
+ {
+ "epoch": 0.3651026851301929,
+ "grad_norm": 0.5205698609352112,
+ "learning_rate": 0.00019276934772398114,
+ "loss": 1.2068,
+ "step": 365
+ },
+ {
+ "epoch": 0.36610296645931667,
+ "grad_norm": 1.0956753492355347,
+ "learning_rate": 0.00019273016126546323,
+ "loss": 1.5044,
+ "step": 366
+ },
+ {
+ "epoch": 0.3671032477884405,
+ "grad_norm": 0.6484043598175049,
+ "learning_rate": 0.00019269087291326833,
+ "loss": 1.6369,
+ "step": 367
+ },
+ {
+ "epoch": 0.36810352911756433,
+ "grad_norm": 0.6363429427146912,
+ "learning_rate": 0.00019265148271056722,
+ "loss": 1.4338,
+ "step": 368
+ },
+ {
+ "epoch": 0.3691038104466881,
+ "grad_norm": 0.6295244693756104,
+ "learning_rate": 0.0001926119907006426,
+ "loss": 1.4701,
+ "step": 369
+ },
+ {
+ "epoch": 0.37010409177581194,
+ "grad_norm": 0.6013259887695312,
+ "learning_rate": 0.00019257239692688907,
+ "loss": 1.7629,
+ "step": 370
+ },
+ {
+ "epoch": 0.37110437310493577,
+ "grad_norm": 0.6949493885040283,
+ "learning_rate": 0.00019253270143281296,
+ "loss": 1.6713,
+ "step": 371
+ },
+ {
+ "epoch": 0.3721046544340596,
+ "grad_norm": 0.6933801174163818,
+ "learning_rate": 0.00019249290426203252,
+ "loss": 1.6131,
+ "step": 372
+ },
+ {
+ "epoch": 0.3731049357631834,
+ "grad_norm": 0.5847527384757996,
+ "learning_rate": 0.0001924530054582776,
+ "loss": 1.3968,
+ "step": 373
+ },
+ {
+ "epoch": 0.3741052170923072,
+ "grad_norm": 0.6053057312965393,
+ "learning_rate": 0.0001924130050653898,
+ "loss": 1.3311,
+ "step": 374
+ },
+ {
+ "epoch": 0.37510549842143104,
+ "grad_norm": 0.5513793230056763,
+ "learning_rate": 0.00019237290312732226,
+ "loss": 1.5063,
+ "step": 375
+ },
+ {
+ "epoch": 0.37610577975055487,
+ "grad_norm": 0.5859197378158569,
+ "learning_rate": 0.00019233269968813984,
+ "loss": 1.3556,
+ "step": 376
+ },
+ {
+ "epoch": 0.37710606107967864,
+ "grad_norm": 0.5623495578765869,
+ "learning_rate": 0.00019229239479201876,
+ "loss": 1.3859,
+ "step": 377
+ },
+ {
+ "epoch": 0.3781063424088025,
+ "grad_norm": 0.602118968963623,
+ "learning_rate": 0.0001922519884832469,
+ "loss": 1.334,
+ "step": 378
+ },
+ {
+ "epoch": 0.3791066237379263,
+ "grad_norm": 0.5212380886077881,
+ "learning_rate": 0.0001922114808062234,
+ "loss": 1.401,
+ "step": 379
+ },
+ {
+ "epoch": 0.3801069050670501,
+ "grad_norm": 0.4969455599784851,
+ "learning_rate": 0.00019217087180545893,
+ "loss": 1.2292,
+ "step": 380
+ },
+ {
+ "epoch": 0.3811071863961739,
+ "grad_norm": 0.578629732131958,
+ "learning_rate": 0.0001921301615255754,
+ "loss": 1.5015,
+ "step": 381
+ },
+ {
+ "epoch": 0.38210746772529774,
+ "grad_norm": 0.593053936958313,
+ "learning_rate": 0.0001920893500113061,
+ "loss": 1.302,
+ "step": 382
+ },
+ {
+ "epoch": 0.3831077490544216,
+ "grad_norm": 0.5832563638687134,
+ "learning_rate": 0.00019204843730749547,
+ "loss": 1.3695,
+ "step": 383
+ },
+ {
+ "epoch": 0.38410803038354535,
+ "grad_norm": 0.5608510375022888,
+ "learning_rate": 0.00019200742345909915,
+ "loss": 1.3792,
+ "step": 384
+ },
+ {
+ "epoch": 0.3851083117126692,
+ "grad_norm": 0.5337334275245667,
+ "learning_rate": 0.00019196630851118398,
+ "loss": 1.4163,
+ "step": 385
+ },
+ {
+ "epoch": 0.386108593041793,
+ "grad_norm": 0.5460125803947449,
+ "learning_rate": 0.0001919250925089278,
+ "loss": 1.2439,
+ "step": 386
+ },
+ {
+ "epoch": 0.38710887437091684,
+ "grad_norm": 0.6217851638793945,
+ "learning_rate": 0.00019188377549761963,
+ "loss": 1.6428,
+ "step": 387
+ },
+ {
+ "epoch": 0.3881091557000406,
+ "grad_norm": 0.7154502868652344,
+ "learning_rate": 0.00019184235752265928,
+ "loss": 1.3468,
+ "step": 388
+ },
+ {
+ "epoch": 0.38910943702916445,
+ "grad_norm": 0.5044635534286499,
+ "learning_rate": 0.00019180083862955772,
+ "loss": 1.1877,
+ "step": 389
+ },
+ {
+ "epoch": 0.3901097183582883,
+ "grad_norm": 0.5755971074104309,
+ "learning_rate": 0.00019175921886393666,
+ "loss": 1.3475,
+ "step": 390
+ },
+ {
+ "epoch": 0.39110999968741206,
+ "grad_norm": 0.6121137738227844,
+ "learning_rate": 0.00019171749827152869,
+ "loss": 1.4342,
+ "step": 391
+ },
+ {
+ "epoch": 0.3921102810165359,
+ "grad_norm": 0.5615536570549011,
+ "learning_rate": 0.0001916756768981772,
+ "loss": 1.5471,
+ "step": 392
+ },
+ {
+ "epoch": 0.3931105623456597,
+ "grad_norm": 0.6527026295661926,
+ "learning_rate": 0.00019163375478983632,
+ "loss": 1.6363,
+ "step": 393
+ },
+ {
+ "epoch": 0.39411084367478355,
+ "grad_norm": 0.6465044617652893,
+ "learning_rate": 0.00019159173199257085,
+ "loss": 1.3823,
+ "step": 394
+ },
+ {
+ "epoch": 0.3951111250039073,
+ "grad_norm": 0.5620000958442688,
+ "learning_rate": 0.00019154960855255628,
+ "loss": 1.5418,
+ "step": 395
+ },
+ {
+ "epoch": 0.39611140633303116,
+ "grad_norm": 0.7090588808059692,
+ "learning_rate": 0.0001915073845160786,
+ "loss": 1.4593,
+ "step": 396
+ },
+ {
+ "epoch": 0.397111687662155,
+ "grad_norm": 0.6644489169120789,
+ "learning_rate": 0.00019146505992953446,
+ "loss": 1.4236,
+ "step": 397
+ },
+ {
+ "epoch": 0.3981119689912788,
+ "grad_norm": 0.6038135886192322,
+ "learning_rate": 0.00019142263483943085,
+ "loss": 1.1805,
+ "step": 398
+ },
+ {
+ "epoch": 0.3991122503204026,
+ "grad_norm": 0.6746726036071777,
+ "learning_rate": 0.00019138010929238534,
+ "loss": 1.5264,
+ "step": 399
+ },
+ {
+ "epoch": 0.4001125316495264,
+ "grad_norm": 0.5871374607086182,
+ "learning_rate": 0.00019133748333512575,
+ "loss": 1.3709,
+ "step": 400
+ },
+ {
+ "epoch": 0.40111281297865026,
+ "grad_norm": 0.5743412375450134,
+ "learning_rate": 0.00019129475701449035,
+ "loss": 1.4677,
+ "step": 401
+ },
+ {
+ "epoch": 0.4021130943077741,
+ "grad_norm": 0.6184396743774414,
+ "learning_rate": 0.0001912519303774276,
+ "loss": 1.4228,
+ "step": 402
+ },
+ {
+ "epoch": 0.40311337563689786,
+ "grad_norm": 0.5872434973716736,
+ "learning_rate": 0.0001912090034709963,
+ "loss": 1.3495,
+ "step": 403
+ },
+ {
+ "epoch": 0.4041136569660217,
+ "grad_norm": 0.6500155925750732,
+ "learning_rate": 0.00019116597634236525,
+ "loss": 1.4315,
+ "step": 404
+ },
+ {
+ "epoch": 0.4051139382951455,
+ "grad_norm": 0.5240740180015564,
+ "learning_rate": 0.0001911228490388136,
+ "loss": 1.4954,
+ "step": 405
+ },
+ {
+ "epoch": 0.4061142196242693,
+ "grad_norm": 0.5531806945800781,
+ "learning_rate": 0.00019107962160773035,
+ "loss": 1.3949,
+ "step": 406
+ },
+ {
+ "epoch": 0.40711450095339313,
+ "grad_norm": 0.5266262888908386,
+ "learning_rate": 0.0001910362940966147,
+ "loss": 1.2859,
+ "step": 407
+ },
+ {
+ "epoch": 0.40811478228251696,
+ "grad_norm": 0.5734869241714478,
+ "learning_rate": 0.00019099286655307568,
+ "loss": 1.2451,
+ "step": 408
+ },
+ {
+ "epoch": 0.4091150636116408,
+ "grad_norm": 0.5922874212265015,
+ "learning_rate": 0.0001909493390248324,
+ "loss": 1.5429,
+ "step": 409
+ },
+ {
+ "epoch": 0.41011534494076457,
+ "grad_norm": 0.542540431022644,
+ "learning_rate": 0.00019090571155971366,
+ "loss": 1.4138,
+ "step": 410
+ },
+ {
+ "epoch": 0.4111156262698884,
+ "grad_norm": 0.57356196641922,
+ "learning_rate": 0.00019086198420565823,
+ "loss": 1.2592,
+ "step": 411
+ },
+ {
+ "epoch": 0.41211590759901223,
+ "grad_norm": 0.6042733192443848,
+ "learning_rate": 0.00019081815701071445,
+ "loss": 1.5524,
+ "step": 412
+ },
+ {
+ "epoch": 0.41311618892813606,
+ "grad_norm": 0.46550241112709045,
+ "learning_rate": 0.0001907742300230406,
+ "loss": 1.308,
+ "step": 413
+ },
+ {
+ "epoch": 0.41411647025725984,
+ "grad_norm": 0.6283137798309326,
+ "learning_rate": 0.00019073020329090444,
+ "loss": 1.4753,
+ "step": 414
+ },
+ {
+ "epoch": 0.41511675158638367,
+ "grad_norm": 0.5254876613616943,
+ "learning_rate": 0.0001906860768626834,
+ "loss": 1.2157,
+ "step": 415
+ },
+ {
+ "epoch": 0.4161170329155075,
+ "grad_norm": 0.59089195728302,
+ "learning_rate": 0.00019064185078686443,
+ "loss": 1.2684,
+ "step": 416
+ },
+ {
+ "epoch": 0.4171173142446313,
+ "grad_norm": 0.7129126787185669,
+ "learning_rate": 0.000190597525112044,
+ "loss": 1.3974,
+ "step": 417
+ },
+ {
+ "epoch": 0.4181175955737551,
+ "grad_norm": 0.607305109500885,
+ "learning_rate": 0.000190553099886928,
+ "loss": 1.4312,
+ "step": 418
+ },
+ {
+ "epoch": 0.41911787690287894,
+ "grad_norm": 0.49921515583992004,
+ "learning_rate": 0.00019050857516033173,
+ "loss": 1.3469,
+ "step": 419
+ },
+ {
+ "epoch": 0.42011815823200277,
+ "grad_norm": 0.6167325377464294,
+ "learning_rate": 0.00019046395098117983,
+ "loss": 1.4723,
+ "step": 420
+ },
+ {
+ "epoch": 0.42111843956112655,
+ "grad_norm": 0.6144593358039856,
+ "learning_rate": 0.00019041922739850616,
+ "loss": 1.5502,
+ "step": 421
+ },
+ {
+ "epoch": 0.4221187208902504,
+ "grad_norm": 0.61333167552948,
+ "learning_rate": 0.00019037440446145385,
+ "loss": 1.3283,
+ "step": 422
+ },
+ {
+ "epoch": 0.4231190022193742,
+ "grad_norm": 0.5881702303886414,
+ "learning_rate": 0.00019032948221927524,
+ "loss": 1.4206,
+ "step": 423
+ },
+ {
+ "epoch": 0.42411928354849804,
+ "grad_norm": 0.5334322452545166,
+ "learning_rate": 0.00019028446072133175,
+ "loss": 1.4603,
+ "step": 424
+ },
+ {
+ "epoch": 0.4251195648776218,
+ "grad_norm": 0.5730605721473694,
+ "learning_rate": 0.00019023934001709383,
+ "loss": 1.4375,
+ "step": 425
+ },
+ {
+ "epoch": 0.42611984620674564,
+ "grad_norm": 0.6227820515632629,
+ "learning_rate": 0.00019019412015614098,
+ "loss": 1.4888,
+ "step": 426
+ },
+ {
+ "epoch": 0.4271201275358695,
+ "grad_norm": 0.5811313390731812,
+ "learning_rate": 0.00019014880118816164,
+ "loss": 1.3492,
+ "step": 427
+ },
+ {
+ "epoch": 0.4281204088649933,
+ "grad_norm": 0.5685800313949585,
+ "learning_rate": 0.0001901033831629532,
+ "loss": 1.5052,
+ "step": 428
+ },
+ {
+ "epoch": 0.4291206901941171,
+ "grad_norm": 0.5961394309997559,
+ "learning_rate": 0.00019005786613042185,
+ "loss": 1.3324,
+ "step": 429
+ },
+ {
+ "epoch": 0.4301209715232409,
+ "grad_norm": 0.5845314860343933,
+ "learning_rate": 0.00019001225014058255,
+ "loss": 1.5733,
+ "step": 430
+ },
+ {
+ "epoch": 0.43112125285236474,
+ "grad_norm": 0.5400176048278809,
+ "learning_rate": 0.00018996653524355902,
+ "loss": 1.3973,
+ "step": 431
+ },
+ {
+ "epoch": 0.4321215341814885,
+ "grad_norm": 0.5462201833724976,
+ "learning_rate": 0.00018992072148958368,
+ "loss": 1.2167,
+ "step": 432
+ },
+ {
+ "epoch": 0.43312181551061235,
+ "grad_norm": 0.6200360059738159,
+ "learning_rate": 0.00018987480892899758,
+ "loss": 1.5596,
+ "step": 433
+ },
+ {
+ "epoch": 0.4341220968397362,
+ "grad_norm": 0.5230718851089478,
+ "learning_rate": 0.00018982879761225027,
+ "loss": 1.3661,
+ "step": 434
+ },
+ {
+ "epoch": 0.43512237816886,
+ "grad_norm": 0.5868643522262573,
+ "learning_rate": 0.00018978268758989991,
+ "loss": 1.4792,
+ "step": 435
+ },
+ {
+ "epoch": 0.4361226594979838,
+ "grad_norm": 0.580892026424408,
+ "learning_rate": 0.00018973647891261307,
+ "loss": 1.3275,
+ "step": 436
+ },
+ {
+ "epoch": 0.4371229408271076,
+ "grad_norm": 0.5903263688087463,
+ "learning_rate": 0.00018969017163116472,
+ "loss": 1.4721,
+ "step": 437
+ },
+ {
+ "epoch": 0.43812322215623145,
+ "grad_norm": 0.5108968019485474,
+ "learning_rate": 0.0001896437657964382,
+ "loss": 1.3785,
+ "step": 438
+ },
+ {
+ "epoch": 0.4391235034853553,
+ "grad_norm": 0.6707500219345093,
+ "learning_rate": 0.00018959726145942508,
+ "loss": 1.5033,
+ "step": 439
+ },
+ {
+ "epoch": 0.44012378481447906,
+ "grad_norm": 0.5793184638023376,
+ "learning_rate": 0.00018955065867122528,
+ "loss": 1.3629,
+ "step": 440
+ },
+ {
+ "epoch": 0.4411240661436029,
+ "grad_norm": 0.5549041628837585,
+ "learning_rate": 0.00018950395748304678,
+ "loss": 1.5557,
+ "step": 441
+ },
+ {
+ "epoch": 0.4421243474727267,
+ "grad_norm": 0.5406919121742249,
+ "learning_rate": 0.0001894571579462058,
+ "loss": 1.4441,
+ "step": 442
+ },
+ {
+ "epoch": 0.4431246288018505,
+ "grad_norm": 0.5131089091300964,
+ "learning_rate": 0.00018941026011212654,
+ "loss": 1.3051,
+ "step": 443
+ },
+ {
+ "epoch": 0.4441249101309743,
+ "grad_norm": 0.601586639881134,
+ "learning_rate": 0.00018936326403234125,
+ "loss": 1.5297,
+ "step": 444
+ },
+ {
+ "epoch": 0.44512519146009816,
+ "grad_norm": 0.5036457180976868,
+ "learning_rate": 0.00018931616975849006,
+ "loss": 1.357,
+ "step": 445
+ },
+ {
+ "epoch": 0.446125472789222,
+ "grad_norm": 0.5471266508102417,
+ "learning_rate": 0.00018926897734232115,
+ "loss": 1.2176,
+ "step": 446
+ },
+ {
+ "epoch": 0.44712575411834576,
+ "grad_norm": 0.6057867407798767,
+ "learning_rate": 0.0001892216868356904,
+ "loss": 1.4763,
+ "step": 447
+ },
+ {
+ "epoch": 0.4481260354474696,
+ "grad_norm": 0.5384593605995178,
+ "learning_rate": 0.0001891742982905615,
+ "loss": 1.513,
+ "step": 448
+ },
+ {
+ "epoch": 0.4491263167765934,
+ "grad_norm": 0.6144880056381226,
+ "learning_rate": 0.00018912681175900598,
+ "loss": 1.5782,
+ "step": 449
+ },
+ {
+ "epoch": 0.45012659810571726,
+ "grad_norm": 0.4838174879550934,
+ "learning_rate": 0.00018907922729320285,
+ "loss": 1.4085,
+ "step": 450
+ },
+ {
+ "epoch": 0.45112687943484103,
+ "grad_norm": 0.6852928400039673,
+ "learning_rate": 0.00018903154494543889,
+ "loss": 1.5989,
+ "step": 451
+ },
+ {
+ "epoch": 0.45212716076396486,
+ "grad_norm": 0.47527411580085754,
+ "learning_rate": 0.00018898376476810834,
+ "loss": 1.3409,
+ "step": 452
+ },
+ {
+ "epoch": 0.4531274420930887,
+ "grad_norm": 0.5665884613990784,
+ "learning_rate": 0.00018893588681371303,
+ "loss": 1.5395,
+ "step": 453
+ },
+ {
+ "epoch": 0.45412772342221247,
+ "grad_norm": 0.5792158246040344,
+ "learning_rate": 0.00018888791113486213,
+ "loss": 1.516,
+ "step": 454
+ },
+ {
+ "epoch": 0.4551280047513363,
+ "grad_norm": 0.5223523378372192,
+ "learning_rate": 0.00018883983778427227,
+ "loss": 1.3678,
+ "step": 455
+ },
+ {
+ "epoch": 0.45612828608046013,
+ "grad_norm": 0.5927590131759644,
+ "learning_rate": 0.0001887916668147673,
+ "loss": 1.3617,
+ "step": 456
+ },
+ {
+ "epoch": 0.45712856740958396,
+ "grad_norm": 0.7266496419906616,
+ "learning_rate": 0.00018874339827927846,
+ "loss": 1.3734,
+ "step": 457
+ },
+ {
+ "epoch": 0.45812884873870774,
+ "grad_norm": 0.6495805978775024,
+ "learning_rate": 0.00018869503223084414,
+ "loss": 1.5282,
+ "step": 458
+ },
+ {
+ "epoch": 0.45912913006783157,
+ "grad_norm": 0.6099816560745239,
+ "learning_rate": 0.00018864656872260985,
+ "loss": 1.4691,
+ "step": 459
+ },
+ {
+ "epoch": 0.4601294113969554,
+ "grad_norm": 0.5208227038383484,
+ "learning_rate": 0.00018859800780782828,
+ "loss": 1.3949,
+ "step": 460
+ },
+ {
+ "epoch": 0.46112969272607923,
+ "grad_norm": 0.5526600480079651,
+ "learning_rate": 0.000188549349539859,
+ "loss": 1.3557,
+ "step": 461
+ },
+ {
+ "epoch": 0.462129974055203,
+ "grad_norm": 0.5537740588188171,
+ "learning_rate": 0.00018850059397216876,
+ "loss": 1.4703,
+ "step": 462
+ },
+ {
+ "epoch": 0.46313025538432684,
+ "grad_norm": 0.5553976893424988,
+ "learning_rate": 0.00018845174115833099,
+ "loss": 1.4356,
+ "step": 463
+ },
+ {
+ "epoch": 0.46413053671345067,
+ "grad_norm": 0.6027779579162598,
+ "learning_rate": 0.0001884027911520262,
+ "loss": 1.4763,
+ "step": 464
+ },
+ {
+ "epoch": 0.4651308180425745,
+ "grad_norm": 0.5559154748916626,
+ "learning_rate": 0.00018835374400704154,
+ "loss": 1.4148,
+ "step": 465
+ },
+ {
+ "epoch": 0.4661310993716983,
+ "grad_norm": 0.6124109029769897,
+ "learning_rate": 0.00018830459977727096,
+ "loss": 1.4468,
+ "step": 466
+ },
+ {
+ "epoch": 0.4671313807008221,
+ "grad_norm": 0.4762580692768097,
+ "learning_rate": 0.0001882553585167151,
+ "loss": 1.3714,
+ "step": 467
+ },
+ {
+ "epoch": 0.46813166202994594,
+ "grad_norm": 0.5793487429618835,
+ "learning_rate": 0.00018820602027948114,
+ "loss": 1.4828,
+ "step": 468
+ },
+ {
+ "epoch": 0.4691319433590697,
+ "grad_norm": 0.55177241563797,
+ "learning_rate": 0.00018815658511978298,
+ "loss": 1.4157,
+ "step": 469
+ },
+ {
+ "epoch": 0.47013222468819355,
+ "grad_norm": 0.5065292716026306,
+ "learning_rate": 0.00018810705309194083,
+ "loss": 1.4519,
+ "step": 470
+ },
+ {
+ "epoch": 0.4711325060173174,
+ "grad_norm": 0.5401413440704346,
+ "learning_rate": 0.00018805742425038145,
+ "loss": 1.4344,
+ "step": 471
+ },
+ {
+ "epoch": 0.4721327873464412,
+ "grad_norm": 0.7173880338668823,
+ "learning_rate": 0.00018800769864963802,
+ "loss": 1.7325,
+ "step": 472
+ },
+ {
+ "epoch": 0.473133068675565,
+ "grad_norm": 0.507682204246521,
+ "learning_rate": 0.00018795787634434994,
+ "loss": 1.37,
+ "step": 473
+ },
+ {
+ "epoch": 0.4741333500046888,
+ "grad_norm": 0.551888644695282,
+ "learning_rate": 0.0001879079573892629,
+ "loss": 1.3695,
+ "step": 474
+ },
+ {
+ "epoch": 0.47513363133381264,
+ "grad_norm": 0.5109260082244873,
+ "learning_rate": 0.00018785794183922883,
+ "loss": 1.4001,
+ "step": 475
+ },
+ {
+ "epoch": 0.4761339126629365,
+ "grad_norm": 0.4565551280975342,
+ "learning_rate": 0.00018780782974920572,
+ "loss": 1.1752,
+ "step": 476
+ },
+ {
+ "epoch": 0.47713419399206025,
+ "grad_norm": 0.5651509761810303,
+ "learning_rate": 0.00018775762117425777,
+ "loss": 1.4291,
+ "step": 477
+ },
+ {
+ "epoch": 0.4781344753211841,
+ "grad_norm": 0.5827792286872864,
+ "learning_rate": 0.0001877073161695551,
+ "loss": 1.3438,
+ "step": 478
+ },
+ {
+ "epoch": 0.4791347566503079,
+ "grad_norm": 0.5719752907752991,
+ "learning_rate": 0.00018765691479037376,
+ "loss": 1.4683,
+ "step": 479
+ },
+ {
+ "epoch": 0.4801350379794317,
+ "grad_norm": 0.5153111815452576,
+ "learning_rate": 0.00018760641709209583,
+ "loss": 1.4392,
+ "step": 480
+ },
+ {
+ "epoch": 0.4811353193085555,
+ "grad_norm": 0.5455904603004456,
+ "learning_rate": 0.0001875558231302091,
+ "loss": 1.1603,
+ "step": 481
+ },
+ {
+ "epoch": 0.48213560063767935,
+ "grad_norm": 0.5857074856758118,
+ "learning_rate": 0.00018750513296030718,
+ "loss": 1.3099,
+ "step": 482
+ },
+ {
+ "epoch": 0.4831358819668032,
+ "grad_norm": 0.6051676273345947,
+ "learning_rate": 0.00018745434663808942,
+ "loss": 1.3587,
+ "step": 483
+ },
+ {
+ "epoch": 0.48413616329592696,
+ "grad_norm": 0.588749885559082,
+ "learning_rate": 0.0001874034642193608,
+ "loss": 1.5277,
+ "step": 484
+ },
+ {
+ "epoch": 0.4851364446250508,
+ "grad_norm": 0.5295410752296448,
+ "learning_rate": 0.0001873524857600319,
+ "loss": 1.2084,
+ "step": 485
+ },
+ {
+ "epoch": 0.4861367259541746,
+ "grad_norm": 0.5313368439674377,
+ "learning_rate": 0.00018730141131611882,
+ "loss": 1.4002,
+ "step": 486
+ },
+ {
+ "epoch": 0.48713700728329845,
+ "grad_norm": 0.5166353583335876,
+ "learning_rate": 0.00018725024094374315,
+ "loss": 1.208,
+ "step": 487
+ },
+ {
+ "epoch": 0.4881372886124222,
+ "grad_norm": 0.5478363037109375,
+ "learning_rate": 0.00018719897469913184,
+ "loss": 1.3236,
+ "step": 488
+ },
+ {
+ "epoch": 0.48913756994154606,
+ "grad_norm": 0.5531913042068481,
+ "learning_rate": 0.00018714761263861728,
+ "loss": 1.4938,
+ "step": 489
+ },
+ {
+ "epoch": 0.4901378512706699,
+ "grad_norm": 0.5334530472755432,
+ "learning_rate": 0.000187096154818637,
+ "loss": 1.4172,
+ "step": 490
+ },
+ {
+ "epoch": 0.4911381325997937,
+ "grad_norm": 0.5667001605033875,
+ "learning_rate": 0.00018704460129573391,
+ "loss": 1.3517,
+ "step": 491
+ },
+ {
+ "epoch": 0.4921384139289175,
+ "grad_norm": 0.5568780303001404,
+ "learning_rate": 0.00018699295212655596,
+ "loss": 1.4287,
+ "step": 492
+ },
+ {
+ "epoch": 0.4931386952580413,
+ "grad_norm": 0.6663610935211182,
+ "learning_rate": 0.00018694120736785632,
+ "loss": 1.5416,
+ "step": 493
+ },
+ {
+ "epoch": 0.49413897658716516,
+ "grad_norm": 0.5753045082092285,
+ "learning_rate": 0.00018688936707649304,
+ "loss": 1.5552,
+ "step": 494
+ },
+ {
+ "epoch": 0.49513925791628893,
+ "grad_norm": 0.5707410573959351,
+ "learning_rate": 0.00018683743130942928,
+ "loss": 1.5332,
+ "step": 495
+ },
+ {
+ "epoch": 0.49613953924541276,
+ "grad_norm": 0.5847951173782349,
+ "learning_rate": 0.00018678540012373302,
+ "loss": 1.3488,
+ "step": 496
+ },
+ {
+ "epoch": 0.4971398205745366,
+ "grad_norm": 0.60503751039505,
+ "learning_rate": 0.00018673327357657715,
+ "loss": 1.3924,
+ "step": 497
+ },
+ {
+ "epoch": 0.4981401019036604,
+ "grad_norm": 0.635142982006073,
+ "learning_rate": 0.0001866810517252393,
+ "loss": 1.4392,
+ "step": 498
+ },
+ {
+ "epoch": 0.4991403832327842,
+ "grad_norm": 0.5536782741546631,
+ "learning_rate": 0.00018662873462710184,
+ "loss": 1.286,
+ "step": 499
+ },
+ {
+ "epoch": 0.5001406645619081,
+ "grad_norm": 0.5676659345626831,
+ "learning_rate": 0.0001865763223396518,
+ "loss": 1.3006,
+ "step": 500
+ },
+ {
+ "epoch": 0.5011409458910319,
+ "grad_norm": 0.5546663403511047,
+ "learning_rate": 0.00018652381492048083,
+ "loss": 1.418,
+ "step": 501
+ },
+ {
+ "epoch": 0.5021412272201556,
+ "grad_norm": 0.5137162804603577,
+ "learning_rate": 0.00018647121242728506,
+ "loss": 1.3173,
+ "step": 502
+ },
+ {
+ "epoch": 0.5031415085492795,
+ "grad_norm": 0.5474348068237305,
+ "learning_rate": 0.00018641851491786512,
+ "loss": 1.6652,
+ "step": 503
+ },
+ {
+ "epoch": 0.5041417898784033,
+ "grad_norm": 0.5563383102416992,
+ "learning_rate": 0.00018636572245012606,
+ "loss": 1.4519,
+ "step": 504
+ },
+ {
+ "epoch": 0.5051420712075271,
+ "grad_norm": 0.5621083974838257,
+ "learning_rate": 0.00018631283508207725,
+ "loss": 1.5418,
+ "step": 505
+ },
+ {
+ "epoch": 0.506142352536651,
+ "grad_norm": 0.49915972352027893,
+ "learning_rate": 0.00018625985287183233,
+ "loss": 1.2969,
+ "step": 506
+ },
+ {
+ "epoch": 0.5071426338657747,
+ "grad_norm": 0.601996660232544,
+ "learning_rate": 0.00018620677587760916,
+ "loss": 1.4483,
+ "step": 507
+ },
+ {
+ "epoch": 0.5081429151948985,
+ "grad_norm": 0.5594652891159058,
+ "learning_rate": 0.00018615360415772978,
+ "loss": 1.4094,
+ "step": 508
+ },
+ {
+ "epoch": 0.5091431965240224,
+ "grad_norm": 0.557381808757782,
+ "learning_rate": 0.00018610033777062025,
+ "loss": 1.216,
+ "step": 509
+ },
+ {
+ "epoch": 0.5101434778531462,
+ "grad_norm": 0.5841740369796753,
+ "learning_rate": 0.0001860469767748108,
+ "loss": 1.4924,
+ "step": 510
+ },
+ {
+ "epoch": 0.5111437591822701,
+ "grad_norm": 0.4968324899673462,
+ "learning_rate": 0.00018599352122893539,
+ "loss": 1.2474,
+ "step": 511
+ },
+ {
+ "epoch": 0.5121440405113938,
+ "grad_norm": 0.5390318632125854,
+ "learning_rate": 0.00018593997119173205,
+ "loss": 1.4484,
+ "step": 512
+ },
+ {
+ "epoch": 0.5131443218405176,
+ "grad_norm": 0.6626128554344177,
+ "learning_rate": 0.00018588632672204264,
+ "loss": 1.5664,
+ "step": 513
+ },
+ {
+ "epoch": 0.5141446031696415,
+ "grad_norm": 0.6183133721351624,
+ "learning_rate": 0.0001858325878788126,
+ "loss": 1.5603,
+ "step": 514
+ },
+ {
+ "epoch": 0.5151448844987653,
+ "grad_norm": 0.5574773550033569,
+ "learning_rate": 0.00018577875472109134,
+ "loss": 1.3668,
+ "step": 515
+ },
+ {
+ "epoch": 0.516145165827889,
+ "grad_norm": 0.5127518773078918,
+ "learning_rate": 0.0001857248273080317,
+ "loss": 1.264,
+ "step": 516
+ },
+ {
+ "epoch": 0.5171454471570129,
+ "grad_norm": 0.6540619134902954,
+ "learning_rate": 0.00018567080569889015,
+ "loss": 1.3091,
+ "step": 517
+ },
+ {
+ "epoch": 0.5181457284861367,
+ "grad_norm": 0.5286336541175842,
+ "learning_rate": 0.00018561668995302667,
+ "loss": 1.3581,
+ "step": 518
+ },
+ {
+ "epoch": 0.5191460098152605,
+ "grad_norm": 0.6609972715377808,
+ "learning_rate": 0.00018556248012990468,
+ "loss": 1.3123,
+ "step": 519
+ },
+ {
+ "epoch": 0.5201462911443844,
+ "grad_norm": 0.48230236768722534,
+ "learning_rate": 0.000185508176289091,
+ "loss": 1.2372,
+ "step": 520
+ },
+ {
+ "epoch": 0.5211465724735082,
+ "grad_norm": 0.5173765420913696,
+ "learning_rate": 0.00018545377849025566,
+ "loss": 1.327,
+ "step": 521
+ },
+ {
+ "epoch": 0.522146853802632,
+ "grad_norm": 0.5822583436965942,
+ "learning_rate": 0.0001853992867931721,
+ "loss": 1.3851,
+ "step": 522
+ },
+ {
+ "epoch": 0.5231471351317558,
+ "grad_norm": 0.6025621891021729,
+ "learning_rate": 0.00018534470125771674,
+ "loss": 1.5627,
+ "step": 523
+ },
+ {
+ "epoch": 0.5241474164608796,
+ "grad_norm": 0.5516778230667114,
+ "learning_rate": 0.0001852900219438693,
+ "loss": 1.4036,
+ "step": 524
+ },
+ {
+ "epoch": 0.5251476977900035,
+ "grad_norm": 0.5738380551338196,
+ "learning_rate": 0.0001852352489117124,
+ "loss": 1.5042,
+ "step": 525
+ },
+ {
+ "epoch": 0.5261479791191273,
+ "grad_norm": 0.6360776424407959,
+ "learning_rate": 0.00018518038222143174,
+ "loss": 1.4101,
+ "step": 526
+ },
+ {
+ "epoch": 0.527148260448251,
+ "grad_norm": 0.5776675939559937,
+ "learning_rate": 0.00018512542193331583,
+ "loss": 1.6015,
+ "step": 527
+ },
+ {
+ "epoch": 0.5281485417773749,
+ "grad_norm": 0.5662726759910583,
+ "learning_rate": 0.00018507036810775615,
+ "loss": 1.3186,
+ "step": 528
+ },
+ {
+ "epoch": 0.5291488231064987,
+ "grad_norm": 0.6518335938453674,
+ "learning_rate": 0.00018501522080524688,
+ "loss": 1.4882,
+ "step": 529
+ },
+ {
+ "epoch": 0.5301491044356225,
+ "grad_norm": 0.5475590825080872,
+ "learning_rate": 0.0001849599800863849,
+ "loss": 1.487,
+ "step": 530
+ },
+ {
+ "epoch": 0.5311493857647464,
+ "grad_norm": 0.6275209188461304,
+ "learning_rate": 0.0001849046460118698,
+ "loss": 1.3563,
+ "step": 531
+ },
+ {
+ "epoch": 0.5321496670938701,
+ "grad_norm": 0.5629132390022278,
+ "learning_rate": 0.0001848492186425037,
+ "loss": 1.516,
+ "step": 532
+ },
+ {
+ "epoch": 0.533149948422994,
+ "grad_norm": 0.5251057744026184,
+ "learning_rate": 0.0001847936980391913,
+ "loss": 1.5254,
+ "step": 533
+ },
+ {
+ "epoch": 0.5341502297521178,
+ "grad_norm": 0.5635396838188171,
+ "learning_rate": 0.00018473808426293964,
+ "loss": 1.3408,
+ "step": 534
+ },
+ {
+ "epoch": 0.5351505110812416,
+ "grad_norm": 0.527082622051239,
+ "learning_rate": 0.00018468237737485823,
+ "loss": 1.2664,
+ "step": 535
+ },
+ {
+ "epoch": 0.5361507924103655,
+ "grad_norm": 0.6555044054985046,
+ "learning_rate": 0.00018462657743615888,
+ "loss": 1.464,
+ "step": 536
+ },
+ {
+ "epoch": 0.5371510737394892,
+ "grad_norm": 0.5468676686286926,
+ "learning_rate": 0.00018457068450815562,
+ "loss": 1.3733,
+ "step": 537
+ },
+ {
+ "epoch": 0.538151355068613,
+ "grad_norm": 0.5662835836410522,
+ "learning_rate": 0.00018451469865226464,
+ "loss": 1.509,
+ "step": 538
+ },
+ {
+ "epoch": 0.5391516363977369,
+ "grad_norm": 0.5553548336029053,
+ "learning_rate": 0.00018445861993000436,
+ "loss": 1.2476,
+ "step": 539
+ },
+ {
+ "epoch": 0.5401519177268607,
+ "grad_norm": 0.6240925192832947,
+ "learning_rate": 0.00018440244840299506,
+ "loss": 1.5835,
+ "step": 540
+ },
+ {
+ "epoch": 0.5411521990559846,
+ "grad_norm": 0.6107541918754578,
+ "learning_rate": 0.0001843461841329591,
+ "loss": 1.7176,
+ "step": 541
+ },
+ {
+ "epoch": 0.5421524803851083,
+ "grad_norm": 0.6990326642990112,
+ "learning_rate": 0.0001842898271817208,
+ "loss": 1.4235,
+ "step": 542
+ },
+ {
+ "epoch": 0.5431527617142321,
+ "grad_norm": 0.583871603012085,
+ "learning_rate": 0.00018423337761120618,
+ "loss": 1.5283,
+ "step": 543
+ },
+ {
+ "epoch": 0.544153043043356,
+ "grad_norm": 0.5585455894470215,
+ "learning_rate": 0.00018417683548344318,
+ "loss": 1.4875,
+ "step": 544
+ },
+ {
+ "epoch": 0.5451533243724798,
+ "grad_norm": 0.5199955701828003,
+ "learning_rate": 0.00018412020086056133,
+ "loss": 1.3989,
+ "step": 545
+ },
+ {
+ "epoch": 0.5461536057016035,
+ "grad_norm": 0.5517343878746033,
+ "learning_rate": 0.0001840634738047918,
+ "loss": 1.4073,
+ "step": 546
+ },
+ {
+ "epoch": 0.5471538870307274,
+ "grad_norm": 0.7140716314315796,
+ "learning_rate": 0.0001840066543784675,
+ "loss": 1.4477,
+ "step": 547
+ },
+ {
+ "epoch": 0.5481541683598512,
+ "grad_norm": 0.548422634601593,
+ "learning_rate": 0.00018394974264402257,
+ "loss": 1.4198,
+ "step": 548
+ },
+ {
+ "epoch": 0.549154449688975,
+ "grad_norm": 0.5907624363899231,
+ "learning_rate": 0.00018389273866399275,
+ "loss": 1.4033,
+ "step": 549
+ },
+ {
+ "epoch": 0.5501547310180989,
+ "grad_norm": 0.5327603220939636,
+ "learning_rate": 0.00018383564250101512,
+ "loss": 1.2674,
+ "step": 550
+ },
+ {
+ "epoch": 0.5511550123472226,
+ "grad_norm": 0.4678132236003876,
+ "learning_rate": 0.000183778454217828,
+ "loss": 1.3644,
+ "step": 551
+ },
+ {
+ "epoch": 0.5521552936763465,
+ "grad_norm": 0.674040675163269,
+ "learning_rate": 0.0001837211738772711,
+ "loss": 1.6942,
+ "step": 552
+ },
+ {
+ "epoch": 0.5531555750054703,
+ "grad_norm": 0.5374539494514465,
+ "learning_rate": 0.000183663801542285,
+ "loss": 1.1887,
+ "step": 553
+ },
+ {
+ "epoch": 0.5541558563345941,
+ "grad_norm": 0.5528072118759155,
+ "learning_rate": 0.00018360633727591155,
+ "loss": 1.2,
+ "step": 554
+ },
+ {
+ "epoch": 0.555156137663718,
+ "grad_norm": 0.6597411632537842,
+ "learning_rate": 0.00018354878114129367,
+ "loss": 1.402,
+ "step": 555
+ },
+ {
+ "epoch": 0.5561564189928417,
+ "grad_norm": 0.5931501388549805,
+ "learning_rate": 0.00018349113320167504,
+ "loss": 1.5583,
+ "step": 556
+ },
+ {
+ "epoch": 0.5571567003219655,
+ "grad_norm": 0.6331121921539307,
+ "learning_rate": 0.00018343339352040042,
+ "loss": 1.7882,
+ "step": 557
+ },
+ {
+ "epoch": 0.5581569816510894,
+ "grad_norm": 0.5221824645996094,
+ "learning_rate": 0.00018337556216091517,
+ "loss": 1.2457,
+ "step": 558
+ },
+ {
+ "epoch": 0.5591572629802132,
+ "grad_norm": 0.6008853912353516,
+ "learning_rate": 0.00018331763918676556,
+ "loss": 1.5916,
+ "step": 559
+ },
+ {
+ "epoch": 0.560157544309337,
+ "grad_norm": 0.5409006476402283,
+ "learning_rate": 0.00018325962466159848,
+ "loss": 1.3457,
+ "step": 560
+ },
+ {
+ "epoch": 0.5611578256384608,
+ "grad_norm": 0.5095859169960022,
+ "learning_rate": 0.00018320151864916135,
+ "loss": 1.3622,
+ "step": 561
+ },
+ {
+ "epoch": 0.5621581069675846,
+ "grad_norm": 0.5716331005096436,
+ "learning_rate": 0.00018314332121330225,
+ "loss": 1.6168,
+ "step": 562
+ },
+ {
+ "epoch": 0.5631583882967085,
+ "grad_norm": 0.600307047367096,
+ "learning_rate": 0.0001830850324179695,
+ "loss": 1.4117,
+ "step": 563
+ },
+ {
+ "epoch": 0.5641586696258323,
+ "grad_norm": 0.7528484463691711,
+ "learning_rate": 0.00018302665232721208,
+ "loss": 1.3418,
+ "step": 564
+ },
+ {
+ "epoch": 0.565158950954956,
+ "grad_norm": 0.6119087338447571,
+ "learning_rate": 0.0001829681810051791,
+ "loss": 1.4908,
+ "step": 565
+ },
+ {
+ "epoch": 0.5661592322840799,
+ "grad_norm": 0.6440190672874451,
+ "learning_rate": 0.00018290961851611995,
+ "loss": 1.3511,
+ "step": 566
+ },
+ {
+ "epoch": 0.5671595136132037,
+ "grad_norm": 0.647294282913208,
+ "learning_rate": 0.00018285096492438424,
+ "loss": 1.5165,
+ "step": 567
+ },
+ {
+ "epoch": 0.5681597949423275,
+ "grad_norm": 0.5499668717384338,
+ "learning_rate": 0.00018279222029442163,
+ "loss": 1.2876,
+ "step": 568
+ },
+ {
+ "epoch": 0.5691600762714514,
+ "grad_norm": 0.5629482865333557,
+ "learning_rate": 0.00018273338469078186,
+ "loss": 1.2256,
+ "step": 569
+ },
+ {
+ "epoch": 0.5701603576005752,
+ "grad_norm": 0.48661288619041443,
+ "learning_rate": 0.00018267445817811466,
+ "loss": 1.44,
+ "step": 570
+ },
+ {
+ "epoch": 0.5711606389296989,
+ "grad_norm": 0.5713567733764648,
+ "learning_rate": 0.00018261544082116954,
+ "loss": 1.741,
+ "step": 571
+ },
+ {
+ "epoch": 0.5721609202588228,
+ "grad_norm": 0.6130850315093994,
+ "learning_rate": 0.00018255633268479595,
+ "loss": 1.526,
+ "step": 572
+ },
+ {
+ "epoch": 0.5731612015879466,
+ "grad_norm": 0.5415536761283875,
+ "learning_rate": 0.00018249713383394303,
+ "loss": 1.2405,
+ "step": 573
+ },
+ {
+ "epoch": 0.5741614829170705,
+ "grad_norm": 0.600574791431427,
+ "learning_rate": 0.0001824378443336596,
+ "loss": 1.4534,
+ "step": 574
+ },
+ {
+ "epoch": 0.5751617642461943,
+ "grad_norm": 0.5479387044906616,
+ "learning_rate": 0.00018237846424909413,
+ "loss": 1.4277,
+ "step": 575
+ },
+ {
+ "epoch": 0.576162045575318,
+ "grad_norm": 0.5536132454872131,
+ "learning_rate": 0.00018231899364549455,
+ "loss": 1.3918,
+ "step": 576
+ },
+ {
+ "epoch": 0.5771623269044419,
+ "grad_norm": 0.6228598356246948,
+ "learning_rate": 0.00018225943258820833,
+ "loss": 1.413,
+ "step": 577
+ },
+ {
+ "epoch": 0.5781626082335657,
+ "grad_norm": 0.5498123168945312,
+ "learning_rate": 0.00018219978114268227,
+ "loss": 1.3558,
+ "step": 578
+ },
+ {
+ "epoch": 0.5791628895626895,
+ "grad_norm": 0.5427498817443848,
+ "learning_rate": 0.00018214003937446253,
+ "loss": 1.509,
+ "step": 579
+ },
+ {
+ "epoch": 0.5801631708918134,
+ "grad_norm": 0.522285521030426,
+ "learning_rate": 0.00018208020734919455,
+ "loss": 1.3847,
+ "step": 580
+ },
+ {
+ "epoch": 0.5811634522209371,
+ "grad_norm": 0.5963860750198364,
+ "learning_rate": 0.00018202028513262288,
+ "loss": 1.4605,
+ "step": 581
+ },
+ {
+ "epoch": 0.5821637335500609,
+ "grad_norm": 0.4854499101638794,
+ "learning_rate": 0.00018196027279059117,
+ "loss": 1.4968,
+ "step": 582
+ },
+ {
+ "epoch": 0.5831640148791848,
+ "grad_norm": 0.503466010093689,
+ "learning_rate": 0.00018190017038904215,
+ "loss": 1.2568,
+ "step": 583
+ },
+ {
+ "epoch": 0.5841642962083086,
+ "grad_norm": 0.6027483940124512,
+ "learning_rate": 0.0001818399779940175,
+ "loss": 1.5744,
+ "step": 584
+ },
+ {
+ "epoch": 0.5851645775374325,
+ "grad_norm": 0.5450258851051331,
+ "learning_rate": 0.0001817796956716578,
+ "loss": 1.2672,
+ "step": 585
+ },
+ {
+ "epoch": 0.5861648588665562,
+ "grad_norm": 0.5376724600791931,
+ "learning_rate": 0.00018171932348820234,
+ "loss": 1.5099,
+ "step": 586
+ },
+ {
+ "epoch": 0.58716514019568,
+ "grad_norm": 0.513921856880188,
+ "learning_rate": 0.0001816588615099893,
+ "loss": 1.3213,
+ "step": 587
+ },
+ {
+ "epoch": 0.5881654215248039,
+ "grad_norm": 0.7540159225463867,
+ "learning_rate": 0.00018159830980345548,
+ "loss": 1.2231,
+ "step": 588
+ },
+ {
+ "epoch": 0.5891657028539277,
+ "grad_norm": 0.5917702317237854,
+ "learning_rate": 0.0001815376684351362,
+ "loss": 1.6094,
+ "step": 589
+ },
+ {
+ "epoch": 0.5901659841830514,
+ "grad_norm": 0.5507463216781616,
+ "learning_rate": 0.00018147693747166534,
+ "loss": 1.3904,
+ "step": 590
+ },
+ {
+ "epoch": 0.5911662655121753,
+ "grad_norm": 0.545695960521698,
+ "learning_rate": 0.00018141611697977529,
+ "loss": 1.5172,
+ "step": 591
+ },
+ {
+ "epoch": 0.5921665468412991,
+ "grad_norm": 0.5876530408859253,
+ "learning_rate": 0.00018135520702629675,
+ "loss": 1.3676,
+ "step": 592
+ },
+ {
+ "epoch": 0.5931668281704229,
+ "grad_norm": 0.5510894060134888,
+ "learning_rate": 0.0001812942076781588,
+ "loss": 1.4379,
+ "step": 593
+ },
+ {
+ "epoch": 0.5941671094995468,
+ "grad_norm": 0.5105913877487183,
+ "learning_rate": 0.0001812331190023886,
+ "loss": 1.3687,
+ "step": 594
+ },
+ {
+ "epoch": 0.5951673908286705,
+ "grad_norm": 0.47876060009002686,
+ "learning_rate": 0.0001811719410661116,
+ "loss": 1.3178,
+ "step": 595
+ },
+ {
+ "epoch": 0.5961676721577944,
+ "grad_norm": 0.6079074144363403,
+ "learning_rate": 0.00018111067393655132,
+ "loss": 1.4713,
+ "step": 596
+ },
+ {
+ "epoch": 0.5971679534869182,
+ "grad_norm": 0.5363487601280212,
+ "learning_rate": 0.0001810493176810292,
+ "loss": 1.1868,
+ "step": 597
+ },
+ {
+ "epoch": 0.598168234816042,
+ "grad_norm": 0.5252292156219482,
+ "learning_rate": 0.00018098787236696474,
+ "loss": 1.303,
+ "step": 598
+ },
+ {
+ "epoch": 0.5991685161451659,
+ "grad_norm": 0.5377137064933777,
+ "learning_rate": 0.00018092633806187513,
+ "loss": 1.3653,
+ "step": 599
+ },
+ {
+ "epoch": 0.6001687974742896,
+ "grad_norm": 0.5274302363395691,
+ "learning_rate": 0.0001808647148333755,
+ "loss": 1.3693,
+ "step": 600
+ },
+ {
+ "epoch": 0.6011690788034134,
+ "grad_norm": 0.5664658546447754,
+ "learning_rate": 0.00018080300274917862,
+ "loss": 1.3807,
+ "step": 601
+ },
+ {
+ "epoch": 0.6021693601325373,
+ "grad_norm": 0.6609538197517395,
+ "learning_rate": 0.00018074120187709495,
+ "loss": 1.5015,
+ "step": 602
+ },
+ {
+ "epoch": 0.6031696414616611,
+ "grad_norm": 0.4943195879459381,
+ "learning_rate": 0.00018067931228503246,
+ "loss": 1.4436,
+ "step": 603
+ },
+ {
+ "epoch": 0.604169922790785,
+ "grad_norm": 0.549712598323822,
+ "learning_rate": 0.00018061733404099655,
+ "loss": 1.455,
+ "step": 604
+ },
+ {
+ "epoch": 0.6051702041199087,
+ "grad_norm": 0.5765941143035889,
+ "learning_rate": 0.00018055526721309016,
+ "loss": 1.3317,
+ "step": 605
+ },
+ {
+ "epoch": 0.6061704854490325,
+ "grad_norm": 0.5223068594932556,
+ "learning_rate": 0.0001804931118695135,
+ "loss": 1.3456,
+ "step": 606
+ },
+ {
+ "epoch": 0.6071707667781564,
+ "grad_norm": 0.5385129451751709,
+ "learning_rate": 0.00018043086807856403,
+ "loss": 1.3388,
+ "step": 607
+ },
+ {
+ "epoch": 0.6081710481072802,
+ "grad_norm": 0.5244528651237488,
+ "learning_rate": 0.00018036853590863648,
+ "loss": 1.398,
+ "step": 608
+ },
+ {
+ "epoch": 0.609171329436404,
+ "grad_norm": 0.5274112224578857,
+ "learning_rate": 0.00018030611542822257,
+ "loss": 1.3105,
+ "step": 609
+ },
+ {
+ "epoch": 0.6101716107655278,
+ "grad_norm": 0.5351893305778503,
+ "learning_rate": 0.00018024360670591114,
+ "loss": 1.3128,
+ "step": 610
+ },
+ {
+ "epoch": 0.6111718920946516,
+ "grad_norm": 0.5729460120201111,
+ "learning_rate": 0.00018018100981038798,
+ "loss": 1.3606,
+ "step": 611
+ },
+ {
+ "epoch": 0.6121721734237754,
+ "grad_norm": 0.5494408011436462,
+ "learning_rate": 0.00018011832481043576,
+ "loss": 1.4517,
+ "step": 612
+ },
+ {
+ "epoch": 0.6131724547528993,
+ "grad_norm": 0.5205882787704468,
+ "learning_rate": 0.00018005555177493394,
+ "loss": 1.4943,
+ "step": 613
+ },
+ {
+ "epoch": 0.614172736082023,
+ "grad_norm": 0.5488479137420654,
+ "learning_rate": 0.00017999269077285875,
+ "loss": 1.3939,
+ "step": 614
+ },
+ {
+ "epoch": 0.6151730174111469,
+ "grad_norm": 0.5779786109924316,
+ "learning_rate": 0.00017992974187328305,
+ "loss": 1.5744,
+ "step": 615
+ },
+ {
+ "epoch": 0.6161732987402707,
+ "grad_norm": 0.5576769113540649,
+ "learning_rate": 0.00017986670514537627,
+ "loss": 1.2284,
+ "step": 616
+ },
+ {
+ "epoch": 0.6171735800693945,
+ "grad_norm": 0.4912784993648529,
+ "learning_rate": 0.00017980358065840444,
+ "loss": 1.292,
+ "step": 617
+ },
+ {
+ "epoch": 0.6181738613985184,
+ "grad_norm": 0.657666027545929,
+ "learning_rate": 0.0001797403684817299,
+ "loss": 1.4918,
+ "step": 618
+ },
+ {
+ "epoch": 0.6191741427276422,
+ "grad_norm": 0.5642833113670349,
+ "learning_rate": 0.00017967706868481144,
+ "loss": 1.4718,
+ "step": 619
+ },
+ {
+ "epoch": 0.6201744240567659,
+ "grad_norm": 0.7243106961250305,
+ "learning_rate": 0.00017961368133720407,
+ "loss": 1.4342,
+ "step": 620
+ },
+ {
+ "epoch": 0.6211747053858898,
+ "grad_norm": 0.4982456564903259,
+ "learning_rate": 0.000179550206508559,
+ "loss": 1.4478,
+ "step": 621
+ },
+ {
+ "epoch": 0.6221749867150136,
+ "grad_norm": 0.5249592065811157,
+ "learning_rate": 0.00017948664426862364,
+ "loss": 1.485,
+ "step": 622
+ },
+ {
+ "epoch": 0.6231752680441374,
+ "grad_norm": 0.6167681217193604,
+ "learning_rate": 0.00017942299468724134,
+ "loss": 1.4813,
+ "step": 623
+ },
+ {
+ "epoch": 0.6241755493732613,
+ "grad_norm": 0.5300460457801819,
+ "learning_rate": 0.0001793592578343515,
+ "loss": 1.1364,
+ "step": 624
+ },
+ {
+ "epoch": 0.625175830702385,
+ "grad_norm": 0.5908417105674744,
+ "learning_rate": 0.0001792954337799894,
+ "loss": 1.4402,
+ "step": 625
+ },
+ {
+ "epoch": 0.6261761120315089,
+ "grad_norm": 0.5684035420417786,
+ "learning_rate": 0.00017923152259428612,
+ "loss": 1.4847,
+ "step": 626
+ },
+ {
+ "epoch": 0.6271763933606327,
+ "grad_norm": 0.5421493053436279,
+ "learning_rate": 0.00017916752434746856,
+ "loss": 1.3348,
+ "step": 627
+ },
+ {
+ "epoch": 0.6281766746897565,
+ "grad_norm": 0.5295160412788391,
+ "learning_rate": 0.0001791034391098591,
+ "loss": 1.4703,
+ "step": 628
+ },
+ {
+ "epoch": 0.6291769560188804,
+ "grad_norm": 0.5196051001548767,
+ "learning_rate": 0.00017903926695187595,
+ "loss": 1.3478,
+ "step": 629
+ },
+ {
+ "epoch": 0.6301772373480041,
+ "grad_norm": 0.4994469881057739,
+ "learning_rate": 0.0001789750079440326,
+ "loss": 1.2368,
+ "step": 630
+ },
+ {
+ "epoch": 0.6311775186771279,
+ "grad_norm": 0.5117055177688599,
+ "learning_rate": 0.00017891066215693817,
+ "loss": 1.3429,
+ "step": 631
+ },
+ {
+ "epoch": 0.6321778000062518,
+ "grad_norm": 0.49438026547431946,
+ "learning_rate": 0.00017884622966129695,
+ "loss": 1.301,
+ "step": 632
+ },
+ {
+ "epoch": 0.6331780813353756,
+ "grad_norm": 0.6113334894180298,
+ "learning_rate": 0.00017878171052790868,
+ "loss": 1.4636,
+ "step": 633
+ },
+ {
+ "epoch": 0.6341783626644993,
+ "grad_norm": 0.6063141822814941,
+ "learning_rate": 0.00017871710482766817,
+ "loss": 1.2262,
+ "step": 634
+ },
+ {
+ "epoch": 0.6351786439936232,
+ "grad_norm": 0.5604403614997864,
+ "learning_rate": 0.00017865241263156546,
+ "loss": 1.4112,
+ "step": 635
+ },
+ {
+ "epoch": 0.636178925322747,
+ "grad_norm": 0.523415207862854,
+ "learning_rate": 0.0001785876340106855,
+ "loss": 1.3281,
+ "step": 636
+ },
+ {
+ "epoch": 0.6371792066518709,
+ "grad_norm": 0.5602991580963135,
+ "learning_rate": 0.0001785227690362083,
+ "loss": 1.44,
+ "step": 637
+ },
+ {
+ "epoch": 0.6381794879809947,
+ "grad_norm": 0.46946853399276733,
+ "learning_rate": 0.00017845781777940878,
+ "loss": 1.2956,
+ "step": 638
+ },
+ {
+ "epoch": 0.6391797693101184,
+ "grad_norm": 0.5586503744125366,
+ "learning_rate": 0.00017839278031165658,
+ "loss": 1.5419,
+ "step": 639
+ },
+ {
+ "epoch": 0.6401800506392423,
+ "grad_norm": 0.5270752310752869,
+ "learning_rate": 0.00017832765670441612,
+ "loss": 1.305,
+ "step": 640
+ },
+ {
+ "epoch": 0.6411803319683661,
+ "grad_norm": 0.57756108045578,
+ "learning_rate": 0.0001782624470292465,
+ "loss": 1.2145,
+ "step": 641
+ },
+ {
+ "epoch": 0.6421806132974899,
+ "grad_norm": 0.5709058046340942,
+ "learning_rate": 0.0001781971513578013,
+ "loss": 1.4804,
+ "step": 642
+ },
+ {
+ "epoch": 0.6431808946266138,
+ "grad_norm": 0.505849301815033,
+ "learning_rate": 0.00017813176976182873,
+ "loss": 1.3964,
+ "step": 643
+ },
+ {
+ "epoch": 0.6441811759557375,
+ "grad_norm": 0.5171617269515991,
+ "learning_rate": 0.00017806630231317127,
+ "loss": 1.3283,
+ "step": 644
+ },
+ {
+ "epoch": 0.6451814572848613,
+ "grad_norm": 0.5567512512207031,
+ "learning_rate": 0.00017800074908376584,
+ "loss": 1.481,
+ "step": 645
+ },
+ {
+ "epoch": 0.6461817386139852,
+ "grad_norm": 0.5000666379928589,
+ "learning_rate": 0.00017793511014564358,
+ "loss": 1.2856,
+ "step": 646
+ },
+ {
+ "epoch": 0.647182019943109,
+ "grad_norm": 0.49550777673721313,
+ "learning_rate": 0.00017786938557092983,
+ "loss": 1.3447,
+ "step": 647
+ },
+ {
+ "epoch": 0.6481823012722329,
+ "grad_norm": 0.5904624462127686,
+ "learning_rate": 0.00017780357543184397,
+ "loss": 1.241,
+ "step": 648
+ },
+ {
+ "epoch": 0.6491825826013566,
+ "grad_norm": 0.4615901708602905,
+ "learning_rate": 0.00017773767980069945,
+ "loss": 1.3436,
+ "step": 649
+ },
+ {
+ "epoch": 0.6501828639304804,
+ "grad_norm": 0.48083069920539856,
+ "learning_rate": 0.0001776716987499037,
+ "loss": 1.3906,
+ "step": 650
+ },
+ {
+ "epoch": 0.6511831452596043,
+ "grad_norm": 0.4525931775569916,
+ "learning_rate": 0.0001776056323519579,
+ "loss": 1.3417,
+ "step": 651
+ },
+ {
+ "epoch": 0.6521834265887281,
+ "grad_norm": 0.6179555058479309,
+ "learning_rate": 0.00017753948067945712,
+ "loss": 1.3438,
+ "step": 652
+ },
+ {
+ "epoch": 0.6531837079178519,
+ "grad_norm": 0.5525293946266174,
+ "learning_rate": 0.00017747324380509006,
+ "loss": 1.4551,
+ "step": 653
+ },
+ {
+ "epoch": 0.6541839892469757,
+ "grad_norm": 0.533028781414032,
+ "learning_rate": 0.00017740692180163908,
+ "loss": 1.4396,
+ "step": 654
+ },
+ {
+ "epoch": 0.6551842705760995,
+ "grad_norm": 0.5196881890296936,
+ "learning_rate": 0.00017734051474198003,
+ "loss": 1.3032,
+ "step": 655
+ },
+ {
+ "epoch": 0.6561845519052233,
+ "grad_norm": 0.5190469622612,
+ "learning_rate": 0.0001772740226990823,
+ "loss": 1.4049,
+ "step": 656
+ },
+ {
+ "epoch": 0.6571848332343472,
+ "grad_norm": 0.49517175555229187,
+ "learning_rate": 0.00017720744574600863,
+ "loss": 1.3696,
+ "step": 657
+ },
+ {
+ "epoch": 0.658185114563471,
+ "grad_norm": 0.5165138244628906,
+ "learning_rate": 0.00017714078395591502,
+ "loss": 1.3667,
+ "step": 658
+ },
+ {
+ "epoch": 0.6591853958925948,
+ "grad_norm": 0.5624507665634155,
+ "learning_rate": 0.00017707403740205071,
+ "loss": 1.2109,
+ "step": 659
+ },
+ {
+ "epoch": 0.6601856772217186,
+ "grad_norm": 0.45942649245262146,
+ "learning_rate": 0.00017700720615775812,
+ "loss": 1.259,
+ "step": 660
+ },
+ {
+ "epoch": 0.6611859585508424,
+ "grad_norm": 0.5019019842147827,
+ "learning_rate": 0.0001769402902964727,
+ "loss": 1.3739,
+ "step": 661
+ },
+ {
+ "epoch": 0.6621862398799663,
+ "grad_norm": 0.4661652743816376,
+ "learning_rate": 0.00017687328989172288,
+ "loss": 1.2606,
+ "step": 662
+ },
+ {
+ "epoch": 0.66318652120909,
+ "grad_norm": 0.5310545563697815,
+ "learning_rate": 0.00017680620501712996,
+ "loss": 1.3406,
+ "step": 663
+ },
+ {
+ "epoch": 0.6641868025382138,
+ "grad_norm": 0.5190532207489014,
+ "learning_rate": 0.00017673903574640814,
+ "loss": 1.3052,
+ "step": 664
+ },
+ {
+ "epoch": 0.6651870838673377,
+ "grad_norm": 0.5265533328056335,
+ "learning_rate": 0.00017667178215336423,
+ "loss": 1.2326,
+ "step": 665
+ },
+ {
+ "epoch": 0.6661873651964615,
+ "grad_norm": 0.5971291065216064,
+ "learning_rate": 0.0001766044443118978,
+ "loss": 1.4291,
+ "step": 666
+ },
+ {
+ "epoch": 0.6671876465255854,
+ "grad_norm": 0.5295760631561279,
+ "learning_rate": 0.000176537022296001,
+ "loss": 1.2781,
+ "step": 667
+ },
+ {
+ "epoch": 0.6681879278547092,
+ "grad_norm": 0.5124595761299133,
+ "learning_rate": 0.00017646951617975837,
+ "loss": 1.318,
+ "step": 668
+ },
+ {
+ "epoch": 0.6691882091838329,
+ "grad_norm": 0.5968078970909119,
+ "learning_rate": 0.00017640192603734692,
+ "loss": 1.1483,
+ "step": 669
+ },
+ {
+ "epoch": 0.6701884905129568,
+ "grad_norm": 0.6211404204368591,
+ "learning_rate": 0.00017633425194303606,
+ "loss": 1.1164,
+ "step": 670
+ },
+ {
+ "epoch": 0.6711887718420806,
+ "grad_norm": 0.5539883375167847,
+ "learning_rate": 0.00017626649397118734,
+ "loss": 1.453,
+ "step": 671
+ },
+ {
+ "epoch": 0.6721890531712044,
+ "grad_norm": 0.5188294649124146,
+ "learning_rate": 0.00017619865219625452,
+ "loss": 1.5201,
+ "step": 672
+ },
+ {
+ "epoch": 0.6731893345003283,
+ "grad_norm": 0.531973659992218,
+ "learning_rate": 0.00017613072669278343,
+ "loss": 1.3176,
+ "step": 673
+ },
+ {
+ "epoch": 0.674189615829452,
+ "grad_norm": 0.5878707766532898,
+ "learning_rate": 0.00017606271753541192,
+ "loss": 1.5326,
+ "step": 674
+ },
+ {
+ "epoch": 0.6751898971585758,
+ "grad_norm": 0.595443844795227,
+ "learning_rate": 0.00017599462479886974,
+ "loss": 1.4033,
+ "step": 675
+ },
+ {
+ "epoch": 0.6761901784876997,
+ "grad_norm": 0.5093846321105957,
+ "learning_rate": 0.00017592644855797854,
+ "loss": 1.2995,
+ "step": 676
+ },
+ {
+ "epoch": 0.6771904598168235,
+ "grad_norm": 0.5521978735923767,
+ "learning_rate": 0.00017585818888765168,
+ "loss": 1.2912,
+ "step": 677
+ },
+ {
+ "epoch": 0.6781907411459474,
+ "grad_norm": 0.4612530469894409,
+ "learning_rate": 0.0001757898458628941,
+ "loss": 1.1902,
+ "step": 678
+ },
+ {
+ "epoch": 0.6791910224750711,
+ "grad_norm": 0.4973600506782532,
+ "learning_rate": 0.00017572141955880252,
+ "loss": 1.3547,
+ "step": 679
+ },
+ {
+ "epoch": 0.6801913038041949,
+ "grad_norm": 0.606407105922699,
+ "learning_rate": 0.00017565291005056504,
+ "loss": 1.371,
+ "step": 680
+ },
+ {
+ "epoch": 0.6811915851333188,
+ "grad_norm": 0.5027814507484436,
+ "learning_rate": 0.00017558431741346122,
+ "loss": 1.4551,
+ "step": 681
+ },
+ {
+ "epoch": 0.6821918664624426,
+ "grad_norm": 0.5732039213180542,
+ "learning_rate": 0.00017551564172286197,
+ "loss": 1.4181,
+ "step": 682
+ },
+ {
+ "epoch": 0.6831921477915663,
+ "grad_norm": 0.6327995657920837,
+ "learning_rate": 0.00017544688305422943,
+ "loss": 1.237,
+ "step": 683
+ },
+ {
+ "epoch": 0.6841924291206902,
+ "grad_norm": 0.5779625177383423,
+ "learning_rate": 0.00017537804148311695,
+ "loss": 1.5356,
+ "step": 684
+ },
+ {
+ "epoch": 0.685192710449814,
+ "grad_norm": 0.6031951308250427,
+ "learning_rate": 0.00017530911708516902,
+ "loss": 1.3776,
+ "step": 685
+ },
+ {
+ "epoch": 0.6861929917789378,
+ "grad_norm": 0.4811258018016815,
+ "learning_rate": 0.00017524010993612098,
+ "loss": 1.185,
+ "step": 686
+ },
+ {
+ "epoch": 0.6871932731080617,
+ "grad_norm": 0.5048002600669861,
+ "learning_rate": 0.00017517102011179933,
+ "loss": 1.3335,
+ "step": 687
+ },
+ {
+ "epoch": 0.6881935544371854,
+ "grad_norm": 0.5963343977928162,
+ "learning_rate": 0.0001751018476881212,
+ "loss": 1.4326,
+ "step": 688
+ },
+ {
+ "epoch": 0.6891938357663093,
+ "grad_norm": 0.4770168960094452,
+ "learning_rate": 0.00017503259274109464,
+ "loss": 1.4664,
+ "step": 689
+ },
+ {
+ "epoch": 0.6901941170954331,
+ "grad_norm": 0.5020537376403809,
+ "learning_rate": 0.00017496325534681825,
+ "loss": 1.349,
+ "step": 690
+ },
+ {
+ "epoch": 0.6911943984245569,
+ "grad_norm": 0.5567785501480103,
+ "learning_rate": 0.00017489383558148136,
+ "loss": 1.452,
+ "step": 691
+ },
+ {
+ "epoch": 0.6921946797536808,
+ "grad_norm": 0.5167350769042969,
+ "learning_rate": 0.00017482433352136365,
+ "loss": 1.1148,
+ "step": 692
+ },
+ {
+ "epoch": 0.6931949610828045,
+ "grad_norm": 0.6030716300010681,
+ "learning_rate": 0.00017475474924283536,
+ "loss": 1.3473,
+ "step": 693
+ },
+ {
+ "epoch": 0.6941952424119283,
+ "grad_norm": 0.5643062591552734,
+ "learning_rate": 0.00017468508282235704,
+ "loss": 1.3476,
+ "step": 694
+ },
+ {
+ "epoch": 0.6951955237410522,
+ "grad_norm": 0.5124102234840393,
+ "learning_rate": 0.00017461533433647946,
+ "loss": 1.339,
+ "step": 695
+ },
+ {
+ "epoch": 0.696195805070176,
+ "grad_norm": 0.5690215229988098,
+ "learning_rate": 0.00017454550386184362,
+ "loss": 1.3816,
+ "step": 696
+ },
+ {
+ "epoch": 0.6971960863992998,
+ "grad_norm": 0.5938367247581482,
+ "learning_rate": 0.00017447559147518055,
+ "loss": 1.4554,
+ "step": 697
+ },
+ {
+ "epoch": 0.6981963677284236,
+ "grad_norm": 0.5288996696472168,
+ "learning_rate": 0.00017440559725331135,
+ "loss": 1.2904,
+ "step": 698
+ },
+ {
+ "epoch": 0.6991966490575474,
+ "grad_norm": 0.5047140121459961,
+ "learning_rate": 0.000174335521273147,
+ "loss": 1.2362,
+ "step": 699
+ },
+ {
+ "epoch": 0.7001969303866713,
+ "grad_norm": 0.5563321709632874,
+ "learning_rate": 0.00017426536361168834,
+ "loss": 1.2863,
+ "step": 700
+ },
+ {
+ "epoch": 0.7011972117157951,
+ "grad_norm": 0.48857688903808594,
+ "learning_rate": 0.00017419512434602594,
+ "loss": 1.3387,
+ "step": 701
+ },
+ {
+ "epoch": 0.7021974930449189,
+ "grad_norm": 0.5205016732215881,
+ "learning_rate": 0.00017412480355334005,
+ "loss": 1.3874,
+ "step": 702
+ },
+ {
+ "epoch": 0.7031977743740427,
+ "grad_norm": 0.5850381851196289,
+ "learning_rate": 0.00017405440131090048,
+ "loss": 1.5369,
+ "step": 703
+ },
+ {
+ "epoch": 0.7041980557031665,
+ "grad_norm": 0.5708681344985962,
+ "learning_rate": 0.00017398391769606658,
+ "loss": 1.3622,
+ "step": 704
+ },
+ {
+ "epoch": 0.7051983370322903,
+ "grad_norm": 0.5743641257286072,
+ "learning_rate": 0.00017391335278628712,
+ "loss": 1.2946,
+ "step": 705
+ },
+ {
+ "epoch": 0.7061986183614142,
+ "grad_norm": 0.5376024842262268,
+ "learning_rate": 0.00017384270665910014,
+ "loss": 1.2952,
+ "step": 706
+ },
+ {
+ "epoch": 0.707198899690538,
+ "grad_norm": 0.6123641133308411,
+ "learning_rate": 0.000173771979392133,
+ "loss": 1.4239,
+ "step": 707
+ },
+ {
+ "epoch": 0.7081991810196617,
+ "grad_norm": 0.5639240741729736,
+ "learning_rate": 0.00017370117106310214,
+ "loss": 1.3627,
+ "step": 708
+ },
+ {
+ "epoch": 0.7091994623487856,
+ "grad_norm": 0.5551653504371643,
+ "learning_rate": 0.0001736302817498131,
+ "loss": 1.3435,
+ "step": 709
+ },
+ {
+ "epoch": 0.7101997436779094,
+ "grad_norm": 0.4746958911418915,
+ "learning_rate": 0.00017355931153016044,
+ "loss": 1.2402,
+ "step": 710
+ },
+ {
+ "epoch": 0.7112000250070333,
+ "grad_norm": 0.4722553491592407,
+ "learning_rate": 0.0001734882604821276,
+ "loss": 1.3962,
+ "step": 711
+ },
+ {
+ "epoch": 0.712200306336157,
+ "grad_norm": 0.5038101077079773,
+ "learning_rate": 0.0001734171286837868,
+ "loss": 1.3261,
+ "step": 712
+ },
+ {
+ "epoch": 0.7132005876652808,
+ "grad_norm": 0.5004639625549316,
+ "learning_rate": 0.00017334591621329906,
+ "loss": 1.4943,
+ "step": 713
+ },
+ {
+ "epoch": 0.7142008689944047,
+ "grad_norm": 0.5141516327857971,
+ "learning_rate": 0.00017327462314891402,
+ "loss": 1.2754,
+ "step": 714
+ },
+ {
+ "epoch": 0.7152011503235285,
+ "grad_norm": 0.5491873025894165,
+ "learning_rate": 0.00017320324956896977,
+ "loss": 1.3052,
+ "step": 715
+ },
+ {
+ "epoch": 0.7162014316526523,
+ "grad_norm": 0.49937358498573303,
+ "learning_rate": 0.00017313179555189306,
+ "loss": 1.2277,
+ "step": 716
+ },
+ {
+ "epoch": 0.7172017129817762,
+ "grad_norm": 0.6419594287872314,
+ "learning_rate": 0.00017306026117619889,
+ "loss": 1.4844,
+ "step": 717
+ },
+ {
+ "epoch": 0.7182019943108999,
+ "grad_norm": 0.521108090877533,
+ "learning_rate": 0.0001729886465204906,
+ "loss": 1.2917,
+ "step": 718
+ },
+ {
+ "epoch": 0.7192022756400237,
+ "grad_norm": 0.532421886920929,
+ "learning_rate": 0.0001729169516634598,
+ "loss": 1.4555,
+ "step": 719
+ },
+ {
+ "epoch": 0.7202025569691476,
+ "grad_norm": 0.5168073177337646,
+ "learning_rate": 0.0001728451766838861,
+ "loss": 1.2116,
+ "step": 720
+ },
+ {
+ "epoch": 0.7212028382982714,
+ "grad_norm": 0.5593972206115723,
+ "learning_rate": 0.00017277332166063726,
+ "loss": 1.4345,
+ "step": 721
+ },
+ {
+ "epoch": 0.7222031196273953,
+ "grad_norm": 0.5317432284355164,
+ "learning_rate": 0.00017270138667266894,
+ "loss": 1.2987,
+ "step": 722
+ },
+ {
+ "epoch": 0.723203400956519,
+ "grad_norm": 0.6262248158454895,
+ "learning_rate": 0.00017262937179902472,
+ "loss": 1.2591,
+ "step": 723
+ },
+ {
+ "epoch": 0.7242036822856428,
+ "grad_norm": 0.5377100110054016,
+ "learning_rate": 0.00017255727711883588,
+ "loss": 1.366,
+ "step": 724
+ },
+ {
+ "epoch": 0.7252039636147667,
+ "grad_norm": 0.5637168288230896,
+ "learning_rate": 0.00017248510271132144,
+ "loss": 1.4593,
+ "step": 725
+ },
+ {
+ "epoch": 0.7262042449438905,
+ "grad_norm": 0.5360320210456848,
+ "learning_rate": 0.00017241284865578802,
+ "loss": 1.4797,
+ "step": 726
+ },
+ {
+ "epoch": 0.7272045262730142,
+ "grad_norm": 0.48500168323516846,
+ "learning_rate": 0.00017234051503162978,
+ "loss": 1.3875,
+ "step": 727
+ },
+ {
+ "epoch": 0.7282048076021381,
+ "grad_norm": 0.5666176080703735,
+ "learning_rate": 0.0001722681019183283,
+ "loss": 1.4683,
+ "step": 728
+ },
+ {
+ "epoch": 0.7292050889312619,
+ "grad_norm": 0.5710940361022949,
+ "learning_rate": 0.00017219560939545246,
+ "loss": 1.5538,
+ "step": 729
+ },
+ {
+ "epoch": 0.7302053702603858,
+ "grad_norm": 0.5658044219017029,
+ "learning_rate": 0.00017212303754265843,
+ "loss": 1.248,
+ "step": 730
+ },
+ {
+ "epoch": 0.7312056515895096,
+ "grad_norm": 0.5355331301689148,
+ "learning_rate": 0.0001720503864396896,
+ "loss": 1.259,
+ "step": 731
+ },
+ {
+ "epoch": 0.7322059329186333,
+ "grad_norm": 0.5683363676071167,
+ "learning_rate": 0.00017197765616637636,
+ "loss": 1.4242,
+ "step": 732
+ },
+ {
+ "epoch": 0.7332062142477572,
+ "grad_norm": 0.488972932100296,
+ "learning_rate": 0.0001719048468026361,
+ "loss": 1.3442,
+ "step": 733
+ },
+ {
+ "epoch": 0.734206495576881,
+ "grad_norm": 0.45563748478889465,
+ "learning_rate": 0.00017183195842847322,
+ "loss": 1.3236,
+ "step": 734
+ },
+ {
+ "epoch": 0.7352067769060048,
+ "grad_norm": 0.5114185214042664,
+ "learning_rate": 0.0001717589911239788,
+ "loss": 1.3071,
+ "step": 735
+ },
+ {
+ "epoch": 0.7362070582351287,
+ "grad_norm": 0.558686375617981,
+ "learning_rate": 0.00017168594496933074,
+ "loss": 1.2889,
+ "step": 736
+ },
+ {
+ "epoch": 0.7372073395642524,
+ "grad_norm": 0.49099281430244446,
+ "learning_rate": 0.00017161282004479351,
+ "loss": 1.1701,
+ "step": 737
+ },
+ {
+ "epoch": 0.7382076208933762,
+ "grad_norm": 0.549524188041687,
+ "learning_rate": 0.0001715396164307182,
+ "loss": 1.2853,
+ "step": 738
+ },
+ {
+ "epoch": 0.7392079022225001,
+ "grad_norm": 0.5683863162994385,
+ "learning_rate": 0.0001714663342075424,
+ "loss": 1.4201,
+ "step": 739
+ },
+ {
+ "epoch": 0.7402081835516239,
+ "grad_norm": 0.5957104563713074,
+ "learning_rate": 0.00017139297345578994,
+ "loss": 1.3406,
+ "step": 740
+ },
+ {
+ "epoch": 0.7412084648807478,
+ "grad_norm": 0.4645147919654846,
+ "learning_rate": 0.00017131953425607104,
+ "loss": 1.2344,
+ "step": 741
+ },
+ {
+ "epoch": 0.7422087462098715,
+ "grad_norm": 0.4981783330440521,
+ "learning_rate": 0.00017124601668908212,
+ "loss": 1.422,
+ "step": 742
+ },
+ {
+ "epoch": 0.7432090275389953,
+ "grad_norm": 0.5426530838012695,
+ "learning_rate": 0.00017117242083560568,
+ "loss": 1.4275,
+ "step": 743
+ },
+ {
+ "epoch": 0.7442093088681192,
+ "grad_norm": 0.5585354566574097,
+ "learning_rate": 0.00017109874677651024,
+ "loss": 1.5049,
+ "step": 744
+ },
+ {
+ "epoch": 0.745209590197243,
+ "grad_norm": 0.5639151930809021,
+ "learning_rate": 0.0001710249945927503,
+ "loss": 1.4019,
+ "step": 745
+ },
+ {
+ "epoch": 0.7462098715263668,
+ "grad_norm": 0.8334717750549316,
+ "learning_rate": 0.00017095116436536612,
+ "loss": 1.5607,
+ "step": 746
+ },
+ {
+ "epoch": 0.7472101528554906,
+ "grad_norm": 0.513970673084259,
+ "learning_rate": 0.00017087725617548385,
+ "loss": 1.1967,
+ "step": 747
+ },
+ {
+ "epoch": 0.7482104341846144,
+ "grad_norm": 0.6200702786445618,
+ "learning_rate": 0.00017080327010431513,
+ "loss": 1.2298,
+ "step": 748
+ },
+ {
+ "epoch": 0.7492107155137382,
+ "grad_norm": 0.54522305727005,
+ "learning_rate": 0.00017072920623315734,
+ "loss": 1.3214,
+ "step": 749
+ },
+ {
+ "epoch": 0.7502109968428621,
+ "grad_norm": 0.6682360172271729,
+ "learning_rate": 0.00017065506464339326,
+ "loss": 1.4631,
+ "step": 750
+ },
+ {
+ "epoch": 0.7512112781719859,
+ "grad_norm": 0.5061535239219666,
+ "learning_rate": 0.00017058084541649106,
+ "loss": 1.5062,
+ "step": 751
+ },
+ {
+ "epoch": 0.7522115595011097,
+ "grad_norm": 0.5790627598762512,
+ "learning_rate": 0.00017050654863400429,
+ "loss": 1.1371,
+ "step": 752
+ },
+ {
+ "epoch": 0.7532118408302335,
+ "grad_norm": 0.6058077216148376,
+ "learning_rate": 0.00017043217437757164,
+ "loss": 1.2185,
+ "step": 753
+ },
+ {
+ "epoch": 0.7542121221593573,
+ "grad_norm": 0.5494515895843506,
+ "learning_rate": 0.00017035772272891702,
+ "loss": 1.2468,
+ "step": 754
+ },
+ {
+ "epoch": 0.7552124034884812,
+ "grad_norm": 0.5687912106513977,
+ "learning_rate": 0.00017028319376984928,
+ "loss": 1.5621,
+ "step": 755
+ },
+ {
+ "epoch": 0.756212684817605,
+ "grad_norm": 0.5341185927391052,
+ "learning_rate": 0.00017020858758226229,
+ "loss": 1.3598,
+ "step": 756
+ },
+ {
+ "epoch": 0.7572129661467287,
+ "grad_norm": 0.5373026132583618,
+ "learning_rate": 0.0001701339042481347,
+ "loss": 1.4185,
+ "step": 757
+ },
+ {
+ "epoch": 0.7582132474758526,
+ "grad_norm": 0.46508973836898804,
+ "learning_rate": 0.00017005914384953007,
+ "loss": 1.2962,
+ "step": 758
+ },
+ {
+ "epoch": 0.7592135288049764,
+ "grad_norm": 0.4580937325954437,
+ "learning_rate": 0.00016998430646859654,
+ "loss": 1.0707,
+ "step": 759
+ },
+ {
+ "epoch": 0.7602138101341002,
+ "grad_norm": 0.5277093052864075,
+ "learning_rate": 0.00016990939218756683,
+ "loss": 1.2529,
+ "step": 760
+ },
+ {
+ "epoch": 0.761214091463224,
+ "grad_norm": 0.5356671214103699,
+ "learning_rate": 0.0001698344010887582,
+ "loss": 1.4032,
+ "step": 761
+ },
+ {
+ "epoch": 0.7622143727923478,
+ "grad_norm": 0.6881769299507141,
+ "learning_rate": 0.0001697593332545723,
+ "loss": 1.4885,
+ "step": 762
+ },
+ {
+ "epoch": 0.7632146541214717,
+ "grad_norm": 0.5370383262634277,
+ "learning_rate": 0.0001696841887674951,
+ "loss": 1.3271,
+ "step": 763
+ },
+ {
+ "epoch": 0.7642149354505955,
+ "grad_norm": 0.4792316257953644,
+ "learning_rate": 0.00016960896771009684,
+ "loss": 1.2274,
+ "step": 764
+ },
+ {
+ "epoch": 0.7652152167797193,
+ "grad_norm": 0.5276592373847961,
+ "learning_rate": 0.00016953367016503182,
+ "loss": 1.2399,
+ "step": 765
+ },
+ {
+ "epoch": 0.7662154981088432,
+ "grad_norm": 0.4789050221443176,
+ "learning_rate": 0.00016945829621503838,
+ "loss": 1.4002,
+ "step": 766
+ },
+ {
+ "epoch": 0.7672157794379669,
+ "grad_norm": 0.492712140083313,
+ "learning_rate": 0.00016938284594293897,
+ "loss": 1.3897,
+ "step": 767
+ },
+ {
+ "epoch": 0.7682160607670907,
+ "grad_norm": 0.5009675621986389,
+ "learning_rate": 0.00016930731943163972,
+ "loss": 1.3797,
+ "step": 768
+ },
+ {
+ "epoch": 0.7692163420962146,
+ "grad_norm": 0.4863432049751282,
+ "learning_rate": 0.00016923171676413063,
+ "loss": 1.4251,
+ "step": 769
+ },
+ {
+ "epoch": 0.7702166234253384,
+ "grad_norm": 0.5190616846084595,
+ "learning_rate": 0.00016915603802348535,
+ "loss": 1.4265,
+ "step": 770
+ },
+ {
+ "epoch": 0.7712169047544621,
+ "grad_norm": 0.5603469610214233,
+ "learning_rate": 0.00016908028329286112,
+ "loss": 1.2852,
+ "step": 771
+ },
+ {
+ "epoch": 0.772217186083586,
+ "grad_norm": 0.5128753185272217,
+ "learning_rate": 0.0001690044526554987,
+ "loss": 1.3324,
+ "step": 772
+ },
+ {
+ "epoch": 0.7732174674127098,
+ "grad_norm": 0.4992072284221649,
+ "learning_rate": 0.00016892854619472223,
+ "loss": 1.2498,
+ "step": 773
+ },
+ {
+ "epoch": 0.7742177487418337,
+ "grad_norm": 0.6128407716751099,
+ "learning_rate": 0.00016885256399393924,
+ "loss": 1.2967,
+ "step": 774
+ },
+ {
+ "epoch": 0.7752180300709575,
+ "grad_norm": 0.5186858177185059,
+ "learning_rate": 0.00016877650613664034,
+ "loss": 1.2654,
+ "step": 775
+ },
+ {
+ "epoch": 0.7762183114000812,
+ "grad_norm": 0.5207421183586121,
+ "learning_rate": 0.00016870037270639942,
+ "loss": 1.2994,
+ "step": 776
+ },
+ {
+ "epoch": 0.7772185927292051,
+ "grad_norm": 0.509912371635437,
+ "learning_rate": 0.0001686241637868734,
+ "loss": 1.3971,
+ "step": 777
+ },
+ {
+ "epoch": 0.7782188740583289,
+ "grad_norm": 0.47703370451927185,
+ "learning_rate": 0.00016854787946180198,
+ "loss": 1.282,
+ "step": 778
+ },
+ {
+ "epoch": 0.7792191553874527,
+ "grad_norm": 0.5404442548751831,
+ "learning_rate": 0.00016847151981500789,
+ "loss": 1.1986,
+ "step": 779
+ },
+ {
+ "epoch": 0.7802194367165766,
+ "grad_norm": 0.541050136089325,
+ "learning_rate": 0.00016839508493039657,
+ "loss": 1.4478,
+ "step": 780
+ },
+ {
+ "epoch": 0.7812197180457003,
+ "grad_norm": 0.46520569920539856,
+ "learning_rate": 0.00016831857489195618,
+ "loss": 1.2385,
+ "step": 781
+ },
+ {
+ "epoch": 0.7822199993748241,
+ "grad_norm": 0.5150445699691772,
+ "learning_rate": 0.00016824198978375736,
+ "loss": 1.3695,
+ "step": 782
+ },
+ {
+ "epoch": 0.783220280703948,
+ "grad_norm": 0.5754334926605225,
+ "learning_rate": 0.00016816532968995328,
+ "loss": 1.3026,
+ "step": 783
+ },
+ {
+ "epoch": 0.7842205620330718,
+ "grad_norm": 0.5335776209831238,
+ "learning_rate": 0.0001680885946947796,
+ "loss": 1.3391,
+ "step": 784
+ },
+ {
+ "epoch": 0.7852208433621957,
+ "grad_norm": 0.6596659421920776,
+ "learning_rate": 0.00016801178488255413,
+ "loss": 1.3224,
+ "step": 785
+ },
+ {
+ "epoch": 0.7862211246913194,
+ "grad_norm": 0.5251991748809814,
+ "learning_rate": 0.00016793490033767698,
+ "loss": 1.1744,
+ "step": 786
+ },
+ {
+ "epoch": 0.7872214060204432,
+ "grad_norm": 0.5112204551696777,
+ "learning_rate": 0.00016785794114463037,
+ "loss": 1.2455,
+ "step": 787
+ },
+ {
+ "epoch": 0.7882216873495671,
+ "grad_norm": 0.532893717288971,
+ "learning_rate": 0.00016778090738797853,
+ "loss": 1.2437,
+ "step": 788
+ },
+ {
+ "epoch": 0.7892219686786909,
+ "grad_norm": 0.5534240007400513,
+ "learning_rate": 0.00016770379915236766,
+ "loss": 1.396,
+ "step": 789
+ },
+ {
+ "epoch": 0.7902222500078147,
+ "grad_norm": 0.5164292454719543,
+ "learning_rate": 0.00016762661652252567,
+ "loss": 1.3138,
+ "step": 790
+ },
+ {
+ "epoch": 0.7912225313369385,
+ "grad_norm": 0.5660764575004578,
+ "learning_rate": 0.00016754935958326244,
+ "loss": 1.3014,
+ "step": 791
+ },
+ {
+ "epoch": 0.7922228126660623,
+ "grad_norm": 0.5137651562690735,
+ "learning_rate": 0.00016747202841946928,
+ "loss": 1.2834,
+ "step": 792
+ },
+ {
+ "epoch": 0.7932230939951862,
+ "grad_norm": 0.5546874403953552,
+ "learning_rate": 0.00016739462311611919,
+ "loss": 1.2841,
+ "step": 793
+ },
+ {
+ "epoch": 0.79422337532431,
+ "grad_norm": 0.5112007260322571,
+ "learning_rate": 0.00016731714375826657,
+ "loss": 1.1873,
+ "step": 794
+ },
+ {
+ "epoch": 0.7952236566534338,
+ "grad_norm": 0.5462679862976074,
+ "learning_rate": 0.00016723959043104728,
+ "loss": 1.2602,
+ "step": 795
+ },
+ {
+ "epoch": 0.7962239379825576,
+ "grad_norm": 0.5083702802658081,
+ "learning_rate": 0.00016716196321967832,
+ "loss": 1.334,
+ "step": 796
+ },
+ {
+ "epoch": 0.7972242193116814,
+ "grad_norm": 0.5491913557052612,
+ "learning_rate": 0.00016708426220945802,
+ "loss": 1.335,
+ "step": 797
+ },
+ {
+ "epoch": 0.7982245006408052,
+ "grad_norm": 0.5257419943809509,
+ "learning_rate": 0.00016700648748576574,
+ "loss": 1.374,
+ "step": 798
+ },
+ {
+ "epoch": 0.7992247819699291,
+ "grad_norm": 0.5252013206481934,
+ "learning_rate": 0.0001669286391340618,
+ "loss": 1.281,
+ "step": 799
+ },
+ {
+ "epoch": 0.8002250632990529,
+ "grad_norm": 0.5784058570861816,
+ "learning_rate": 0.00016685071723988748,
+ "loss": 1.385,
+ "step": 800
+ },
+ {
+ "epoch": 0.8012253446281766,
+ "grad_norm": 0.5508819818496704,
+ "learning_rate": 0.00016677272188886483,
+ "loss": 1.5138,
+ "step": 801
+ },
+ {
+ "epoch": 0.8022256259573005,
+ "grad_norm": 0.5943104028701782,
+ "learning_rate": 0.00016669465316669667,
+ "loss": 1.2341,
+ "step": 802
+ },
+ {
+ "epoch": 0.8032259072864243,
+ "grad_norm": 0.5109750032424927,
+ "learning_rate": 0.00016661651115916642,
+ "loss": 1.361,
+ "step": 803
+ },
+ {
+ "epoch": 0.8042261886155482,
+ "grad_norm": 0.5322972536087036,
+ "learning_rate": 0.00016653829595213794,
+ "loss": 1.3383,
+ "step": 804
+ },
+ {
+ "epoch": 0.805226469944672,
+ "grad_norm": 0.4870489537715912,
+ "learning_rate": 0.00016646000763155568,
+ "loss": 1.2932,
+ "step": 805
+ },
+ {
+ "epoch": 0.8062267512737957,
+ "grad_norm": 0.6070749163627625,
+ "learning_rate": 0.00016638164628344425,
+ "loss": 1.3517,
+ "step": 806
+ },
+ {
+ "epoch": 0.8072270326029196,
+ "grad_norm": 0.5695485472679138,
+ "learning_rate": 0.00016630321199390867,
+ "loss": 1.295,
+ "step": 807
+ },
+ {
+ "epoch": 0.8082273139320434,
+ "grad_norm": 0.49092933535575867,
+ "learning_rate": 0.00016622470484913406,
+ "loss": 1.1708,
+ "step": 808
+ },
+ {
+ "epoch": 0.8092275952611672,
+ "grad_norm": 0.5488709807395935,
+ "learning_rate": 0.00016614612493538551,
+ "loss": 1.3101,
+ "step": 809
+ },
+ {
+ "epoch": 0.810227876590291,
+ "grad_norm": 0.6875150799751282,
+ "learning_rate": 0.00016606747233900815,
+ "loss": 1.3,
+ "step": 810
+ },
+ {
+ "epoch": 0.8112281579194148,
+ "grad_norm": 0.5599775910377502,
+ "learning_rate": 0.00016598874714642697,
+ "loss": 1.5711,
+ "step": 811
+ },
+ {
+ "epoch": 0.8122284392485386,
+ "grad_norm": 0.7102994322776794,
+ "learning_rate": 0.00016590994944414678,
+ "loss": 1.4553,
+ "step": 812
+ },
+ {
+ "epoch": 0.8132287205776625,
+ "grad_norm": 0.5191233158111572,
+ "learning_rate": 0.00016583107931875192,
+ "loss": 1.4292,
+ "step": 813
+ },
+ {
+ "epoch": 0.8142290019067863,
+ "grad_norm": 0.4739600718021393,
+ "learning_rate": 0.0001657521368569064,
+ "loss": 1.3776,
+ "step": 814
+ },
+ {
+ "epoch": 0.8152292832359102,
+ "grad_norm": 0.5282078981399536,
+ "learning_rate": 0.0001656731221453537,
+ "loss": 1.4359,
+ "step": 815
+ },
+ {
+ "epoch": 0.8162295645650339,
+ "grad_norm": 0.690367579460144,
+ "learning_rate": 0.00016559403527091675,
+ "loss": 1.1747,
+ "step": 816
+ },
+ {
+ "epoch": 0.8172298458941577,
+ "grad_norm": 0.5715120434761047,
+ "learning_rate": 0.0001655148763204977,
+ "loss": 1.3289,
+ "step": 817
+ },
+ {
+ "epoch": 0.8182301272232816,
+ "grad_norm": 0.7024423480033875,
+ "learning_rate": 0.00016543564538107797,
+ "loss": 1.4758,
+ "step": 818
+ },
+ {
+ "epoch": 0.8192304085524054,
+ "grad_norm": 0.5568886399269104,
+ "learning_rate": 0.00016535634253971794,
+ "loss": 1.5172,
+ "step": 819
+ },
+ {
+ "epoch": 0.8202306898815291,
+ "grad_norm": 0.5847441554069519,
+ "learning_rate": 0.00016527696788355714,
+ "loss": 1.1993,
+ "step": 820
+ },
+ {
+ "epoch": 0.821230971210653,
+ "grad_norm": 0.5402149558067322,
+ "learning_rate": 0.00016519752149981397,
+ "loss": 1.2921,
+ "step": 821
+ },
+ {
+ "epoch": 0.8222312525397768,
+ "grad_norm": 0.6050311326980591,
+ "learning_rate": 0.0001651180034757856,
+ "loss": 1.59,
+ "step": 822
+ },
+ {
+ "epoch": 0.8232315338689006,
+ "grad_norm": 0.6215486526489258,
+ "learning_rate": 0.00016503841389884798,
+ "loss": 1.4562,
+ "step": 823
+ },
+ {
+ "epoch": 0.8242318151980245,
+ "grad_norm": 0.6507789492607117,
+ "learning_rate": 0.00016495875285645566,
+ "loss": 1.349,
+ "step": 824
+ },
+ {
+ "epoch": 0.8252320965271482,
+ "grad_norm": 0.5273147225379944,
+ "learning_rate": 0.00016487902043614173,
+ "loss": 1.4016,
+ "step": 825
+ },
+ {
+ "epoch": 0.8262323778562721,
+ "grad_norm": 0.579987645149231,
+ "learning_rate": 0.0001647992167255177,
+ "loss": 1.4077,
+ "step": 826
+ },
+ {
+ "epoch": 0.8272326591853959,
+ "grad_norm": 0.5068405270576477,
+ "learning_rate": 0.0001647193418122734,
+ "loss": 1.5075,
+ "step": 827
+ },
+ {
+ "epoch": 0.8282329405145197,
+ "grad_norm": 0.519982099533081,
+ "learning_rate": 0.00016463939578417692,
+ "loss": 1.2721,
+ "step": 828
+ },
+ {
+ "epoch": 0.8292332218436436,
+ "grad_norm": 0.5181561708450317,
+ "learning_rate": 0.0001645593787290745,
+ "loss": 1.2299,
+ "step": 829
+ },
+ {
+ "epoch": 0.8302335031727673,
+ "grad_norm": 0.47413337230682373,
+ "learning_rate": 0.0001644792907348904,
+ "loss": 1.2462,
+ "step": 830
+ },
+ {
+ "epoch": 0.8312337845018911,
+ "grad_norm": 0.5426570773124695,
+ "learning_rate": 0.00016439913188962685,
+ "loss": 1.4496,
+ "step": 831
+ },
+ {
+ "epoch": 0.832234065831015,
+ "grad_norm": 0.5744379758834839,
+ "learning_rate": 0.0001643189022813639,
+ "loss": 1.3284,
+ "step": 832
+ },
+ {
+ "epoch": 0.8332343471601388,
+ "grad_norm": 0.49693235754966736,
+ "learning_rate": 0.0001642386019982594,
+ "loss": 1.4082,
+ "step": 833
+ },
+ {
+ "epoch": 0.8342346284892626,
+ "grad_norm": 0.5346773862838745,
+ "learning_rate": 0.00016415823112854883,
+ "loss": 1.4238,
+ "step": 834
+ },
+ {
+ "epoch": 0.8352349098183864,
+ "grad_norm": 0.6201802492141724,
+ "learning_rate": 0.00016407778976054526,
+ "loss": 1.3288,
+ "step": 835
+ },
+ {
+ "epoch": 0.8362351911475102,
+ "grad_norm": 0.5161807537078857,
+ "learning_rate": 0.0001639972779826392,
+ "loss": 1.3798,
+ "step": 836
+ },
+ {
+ "epoch": 0.8372354724766341,
+ "grad_norm": 0.4670160412788391,
+ "learning_rate": 0.0001639166958832985,
+ "loss": 1.3765,
+ "step": 837
+ },
+ {
+ "epoch": 0.8382357538057579,
+ "grad_norm": 0.6492543816566467,
+ "learning_rate": 0.00016383604355106837,
+ "loss": 1.5277,
+ "step": 838
+ },
+ {
+ "epoch": 0.8392360351348817,
+ "grad_norm": 0.5766328573226929,
+ "learning_rate": 0.00016375532107457108,
+ "loss": 1.2481,
+ "step": 839
+ },
+ {
+ "epoch": 0.8402363164640055,
+ "grad_norm": 0.6431072950363159,
+ "learning_rate": 0.00016367452854250603,
+ "loss": 1.2755,
+ "step": 840
+ },
+ {
+ "epoch": 0.8412365977931293,
+ "grad_norm": 0.5121828317642212,
+ "learning_rate": 0.00016359366604364972,
+ "loss": 1.2927,
+ "step": 841
+ },
+ {
+ "epoch": 0.8422368791222531,
+ "grad_norm": 0.5222392678260803,
+ "learning_rate": 0.00016351273366685526,
+ "loss": 1.2626,
+ "step": 842
+ },
+ {
+ "epoch": 0.843237160451377,
+ "grad_norm": 0.5536903142929077,
+ "learning_rate": 0.00016343173150105278,
+ "loss": 1.1892,
+ "step": 843
+ },
+ {
+ "epoch": 0.8442374417805008,
+ "grad_norm": 0.5569381713867188,
+ "learning_rate": 0.00016335065963524897,
+ "loss": 1.4263,
+ "step": 844
+ },
+ {
+ "epoch": 0.8452377231096245,
+ "grad_norm": 0.6490715742111206,
+ "learning_rate": 0.0001632695181585272,
+ "loss": 1.452,
+ "step": 845
+ },
+ {
+ "epoch": 0.8462380044387484,
+ "grad_norm": 0.5965350270271301,
+ "learning_rate": 0.00016318830716004722,
+ "loss": 1.4189,
+ "step": 846
+ },
+ {
+ "epoch": 0.8472382857678722,
+ "grad_norm": 0.45904603600502014,
+ "learning_rate": 0.00016310702672904528,
+ "loss": 1.4024,
+ "step": 847
+ },
+ {
+ "epoch": 0.8482385670969961,
+ "grad_norm": 0.4320334494113922,
+ "learning_rate": 0.00016302567695483382,
+ "loss": 1.2105,
+ "step": 848
+ },
+ {
+ "epoch": 0.8492388484261199,
+ "grad_norm": 0.527032196521759,
+ "learning_rate": 0.0001629442579268016,
+ "loss": 1.1996,
+ "step": 849
+ },
+ {
+ "epoch": 0.8502391297552436,
+ "grad_norm": 0.6317036747932434,
+ "learning_rate": 0.00016286276973441333,
+ "loss": 1.4811,
+ "step": 850
+ },
+ {
+ "epoch": 0.8512394110843675,
+ "grad_norm": 0.5726277828216553,
+ "learning_rate": 0.00016278121246720987,
+ "loss": 1.3249,
+ "step": 851
+ },
+ {
+ "epoch": 0.8522396924134913,
+ "grad_norm": 0.4624577462673187,
+ "learning_rate": 0.00016269958621480788,
+ "loss": 1.3291,
+ "step": 852
+ },
+ {
+ "epoch": 0.8532399737426151,
+ "grad_norm": 0.5774461627006531,
+ "learning_rate": 0.0001626178910668998,
+ "loss": 1.2891,
+ "step": 853
+ },
+ {
+ "epoch": 0.854240255071739,
+ "grad_norm": 0.503584086894989,
+ "learning_rate": 0.00016253612711325386,
+ "loss": 1.3048,
+ "step": 854
+ },
+ {
+ "epoch": 0.8552405364008627,
+ "grad_norm": 0.4560583233833313,
+ "learning_rate": 0.0001624542944437139,
+ "loss": 1.2658,
+ "step": 855
+ },
+ {
+ "epoch": 0.8562408177299866,
+ "grad_norm": 0.49611610174179077,
+ "learning_rate": 0.00016237239314819917,
+ "loss": 1.1017,
+ "step": 856
+ },
+ {
+ "epoch": 0.8572410990591104,
+ "grad_norm": 0.5600405931472778,
+ "learning_rate": 0.0001622904233167044,
+ "loss": 1.3274,
+ "step": 857
+ },
+ {
+ "epoch": 0.8582413803882342,
+ "grad_norm": 0.5849353075027466,
+ "learning_rate": 0.0001622083850392996,
+ "loss": 1.274,
+ "step": 858
+ },
+ {
+ "epoch": 0.859241661717358,
+ "grad_norm": 0.5781377553939819,
+ "learning_rate": 0.00016212627840613003,
+ "loss": 1.4157,
+ "step": 859
+ },
+ {
+ "epoch": 0.8602419430464818,
+ "grad_norm": 0.4908173680305481,
+ "learning_rate": 0.000162044103507416,
+ "loss": 1.3,
+ "step": 860
+ },
+ {
+ "epoch": 0.8612422243756056,
+ "grad_norm": 0.5844553112983704,
+ "learning_rate": 0.00016196186043345288,
+ "loss": 1.2325,
+ "step": 861
+ },
+ {
+ "epoch": 0.8622425057047295,
+ "grad_norm": 0.5381117463111877,
+ "learning_rate": 0.00016187954927461093,
+ "loss": 1.41,
+ "step": 862
+ },
+ {
+ "epoch": 0.8632427870338533,
+ "grad_norm": 0.5468165278434753,
+ "learning_rate": 0.00016179717012133521,
+ "loss": 1.4272,
+ "step": 863
+ },
+ {
+ "epoch": 0.864243068362977,
+ "grad_norm": 0.5702970027923584,
+ "learning_rate": 0.00016171472306414554,
+ "loss": 1.3624,
+ "step": 864
+ },
+ {
+ "epoch": 0.8652433496921009,
+ "grad_norm": 0.5430637001991272,
+ "learning_rate": 0.00016163220819363628,
+ "loss": 1.2555,
+ "step": 865
+ },
+ {
+ "epoch": 0.8662436310212247,
+ "grad_norm": 0.5266844034194946,
+ "learning_rate": 0.00016154962560047643,
+ "loss": 1.3743,
+ "step": 866
+ },
+ {
+ "epoch": 0.8672439123503486,
+ "grad_norm": 0.5201333165168762,
+ "learning_rate": 0.00016146697537540924,
+ "loss": 1.3959,
+ "step": 867
+ },
+ {
+ "epoch": 0.8682441936794724,
+ "grad_norm": 0.44362199306488037,
+ "learning_rate": 0.0001613842576092524,
+ "loss": 1.2661,
+ "step": 868
+ },
+ {
+ "epoch": 0.8692444750085961,
+ "grad_norm": 0.5465226769447327,
+ "learning_rate": 0.00016130147239289778,
+ "loss": 1.3688,
+ "step": 869
+ },
+ {
+ "epoch": 0.87024475633772,
+ "grad_norm": 0.5353460907936096,
+ "learning_rate": 0.00016121861981731135,
+ "loss": 1.2327,
+ "step": 870
+ },
+ {
+ "epoch": 0.8712450376668438,
+ "grad_norm": 0.5463739633560181,
+ "learning_rate": 0.00016113569997353312,
+ "loss": 1.2994,
+ "step": 871
+ },
+ {
+ "epoch": 0.8722453189959676,
+ "grad_norm": 0.5219647288322449,
+ "learning_rate": 0.000161052712952677,
+ "loss": 1.3916,
+ "step": 872
+ },
+ {
+ "epoch": 0.8732456003250915,
+ "grad_norm": 0.4675636887550354,
+ "learning_rate": 0.0001609696588459307,
+ "loss": 1.2786,
+ "step": 873
+ },
+ {
+ "epoch": 0.8742458816542152,
+ "grad_norm": 0.48863986134529114,
+ "learning_rate": 0.00016088653774455568,
+ "loss": 1.1762,
+ "step": 874
+ },
+ {
+ "epoch": 0.875246162983339,
+ "grad_norm": 0.48759785294532776,
+ "learning_rate": 0.00016080334973988695,
+ "loss": 1.2107,
+ "step": 875
+ },
+ {
+ "epoch": 0.8762464443124629,
+ "grad_norm": 0.7353807687759399,
+ "learning_rate": 0.00016072009492333318,
+ "loss": 1.4855,
+ "step": 876
+ },
+ {
+ "epoch": 0.8772467256415867,
+ "grad_norm": 0.4878953993320465,
+ "learning_rate": 0.0001606367733863763,
+ "loss": 1.2343,
+ "step": 877
+ },
+ {
+ "epoch": 0.8782470069707106,
+ "grad_norm": 0.4764840304851532,
+ "learning_rate": 0.00016055338522057158,
+ "loss": 1.3159,
+ "step": 878
+ },
+ {
+ "epoch": 0.8792472882998343,
+ "grad_norm": 0.5289160013198853,
+ "learning_rate": 0.00016046993051754756,
+ "loss": 1.3298,
+ "step": 879
+ },
+ {
+ "epoch": 0.8802475696289581,
+ "grad_norm": 0.5421459078788757,
+ "learning_rate": 0.00016038640936900586,
+ "loss": 1.4081,
+ "step": 880
+ },
+ {
+ "epoch": 0.881247850958082,
+ "grad_norm": 0.5096681118011475,
+ "learning_rate": 0.00016030282186672116,
+ "loss": 1.2406,
+ "step": 881
+ },
+ {
+ "epoch": 0.8822481322872058,
+ "grad_norm": 0.5783627033233643,
+ "learning_rate": 0.00016021916810254097,
+ "loss": 1.3505,
+ "step": 882
+ },
+ {
+ "epoch": 0.8832484136163296,
+ "grad_norm": 0.5718142986297607,
+ "learning_rate": 0.00016013544816838565,
+ "loss": 1.4106,
+ "step": 883
+ },
+ {
+ "epoch": 0.8842486949454534,
+ "grad_norm": 0.551607072353363,
+ "learning_rate": 0.00016005166215624827,
+ "loss": 1.3474,
+ "step": 884
+ },
+ {
+ "epoch": 0.8852489762745772,
+ "grad_norm": 0.5464247465133667,
+ "learning_rate": 0.0001599678101581945,
+ "loss": 1.4443,
+ "step": 885
+ },
+ {
+ "epoch": 0.886249257603701,
+ "grad_norm": 0.5075456500053406,
+ "learning_rate": 0.00015988389226636253,
+ "loss": 1.4919,
+ "step": 886
+ },
+ {
+ "epoch": 0.8872495389328249,
+ "grad_norm": 0.48557186126708984,
+ "learning_rate": 0.00015979990857296295,
+ "loss": 1.4225,
+ "step": 887
+ },
+ {
+ "epoch": 0.8882498202619487,
+ "grad_norm": 0.5385611653327942,
+ "learning_rate": 0.00015971585917027862,
+ "loss": 1.2937,
+ "step": 888
+ },
+ {
+ "epoch": 0.8892501015910725,
+ "grad_norm": 0.6477749943733215,
+ "learning_rate": 0.00015963174415066468,
+ "loss": 1.5628,
+ "step": 889
+ },
+ {
+ "epoch": 0.8902503829201963,
+ "grad_norm": 0.6205973029136658,
+ "learning_rate": 0.0001595475636065483,
+ "loss": 1.4902,
+ "step": 890
+ },
+ {
+ "epoch": 0.8912506642493201,
+ "grad_norm": 0.45717301964759827,
+ "learning_rate": 0.00015946331763042867,
+ "loss": 1.1998,
+ "step": 891
+ },
+ {
+ "epoch": 0.892250945578444,
+ "grad_norm": 0.5279855132102966,
+ "learning_rate": 0.00015937900631487686,
+ "loss": 1.0668,
+ "step": 892
+ },
+ {
+ "epoch": 0.8932512269075678,
+ "grad_norm": 0.5207269787788391,
+ "learning_rate": 0.00015929462975253585,
+ "loss": 1.2774,
+ "step": 893
+ },
+ {
+ "epoch": 0.8942515082366915,
+ "grad_norm": 0.5200834274291992,
+ "learning_rate": 0.00015921018803612014,
+ "loss": 1.4316,
+ "step": 894
+ },
+ {
+ "epoch": 0.8952517895658154,
+ "grad_norm": 0.48317649960517883,
+ "learning_rate": 0.0001591256812584159,
+ "loss": 1.4101,
+ "step": 895
+ },
+ {
+ "epoch": 0.8962520708949392,
+ "grad_norm": 0.475483775138855,
+ "learning_rate": 0.00015904110951228082,
+ "loss": 1.2011,
+ "step": 896
+ },
+ {
+ "epoch": 0.897252352224063,
+ "grad_norm": 0.6542660593986511,
+ "learning_rate": 0.00015895647289064396,
+ "loss": 1.56,
+ "step": 897
+ },
+ {
+ "epoch": 0.8982526335531869,
+ "grad_norm": 0.5154829621315002,
+ "learning_rate": 0.00015887177148650564,
+ "loss": 1.3748,
+ "step": 898
+ },
+ {
+ "epoch": 0.8992529148823106,
+ "grad_norm": 0.5744799375534058,
+ "learning_rate": 0.0001587870053929374,
+ "loss": 1.4072,
+ "step": 899
+ },
+ {
+ "epoch": 0.9002531962114345,
+ "grad_norm": 0.4835909307003021,
+ "learning_rate": 0.00015870217470308188,
+ "loss": 1.3037,
+ "step": 900
+ },
+ {
+ "epoch": 0.9012534775405583,
+ "grad_norm": 0.5292366743087769,
+ "learning_rate": 0.0001586172795101526,
+ "loss": 1.2395,
+ "step": 901
+ },
+ {
+ "epoch": 0.9022537588696821,
+ "grad_norm": 0.5905430912971497,
+ "learning_rate": 0.00015853231990743406,
+ "loss": 1.29,
+ "step": 902
+ },
+ {
+ "epoch": 0.903254040198806,
+ "grad_norm": 0.4918007254600525,
+ "learning_rate": 0.0001584472959882815,
+ "loss": 1.2593,
+ "step": 903
+ },
+ {
+ "epoch": 0.9042543215279297,
+ "grad_norm": 0.4735652208328247,
+ "learning_rate": 0.00015836220784612085,
+ "loss": 1.1669,
+ "step": 904
+ },
+ {
+ "epoch": 0.9052546028570535,
+ "grad_norm": 0.6272550821304321,
+ "learning_rate": 0.00015827705557444852,
+ "loss": 1.3692,
+ "step": 905
+ },
+ {
+ "epoch": 0.9062548841861774,
+ "grad_norm": 0.5333564877510071,
+ "learning_rate": 0.00015819183926683153,
+ "loss": 1.3672,
+ "step": 906
+ },
+ {
+ "epoch": 0.9072551655153012,
+ "grad_norm": 0.44029948115348816,
+ "learning_rate": 0.00015810655901690715,
+ "loss": 1.2124,
+ "step": 907
+ },
+ {
+ "epoch": 0.9082554468444249,
+ "grad_norm": 0.5636379718780518,
+ "learning_rate": 0.00015802121491838297,
+ "loss": 1.3507,
+ "step": 908
+ },
+ {
+ "epoch": 0.9092557281735488,
+ "grad_norm": 0.4394778907299042,
+ "learning_rate": 0.0001579358070650367,
+ "loss": 1.3159,
+ "step": 909
+ },
+ {
+ "epoch": 0.9102560095026726,
+ "grad_norm": 0.5382723212242126,
+ "learning_rate": 0.00015785033555071616,
+ "loss": 1.3733,
+ "step": 910
+ },
+ {
+ "epoch": 0.9112562908317965,
+ "grad_norm": 0.5251659750938416,
+ "learning_rate": 0.00015776480046933905,
+ "loss": 1.2253,
+ "step": 911
+ },
+ {
+ "epoch": 0.9122565721609203,
+ "grad_norm": 0.4791383743286133,
+ "learning_rate": 0.000157679201914893,
+ "loss": 1.2341,
+ "step": 912
+ },
+ {
+ "epoch": 0.913256853490044,
+ "grad_norm": 0.5058613419532776,
+ "learning_rate": 0.00015759353998143528,
+ "loss": 1.2717,
+ "step": 913
+ },
+ {
+ "epoch": 0.9142571348191679,
+ "grad_norm": 0.46837320923805237,
+ "learning_rate": 0.00015750781476309288,
+ "loss": 1.2484,
+ "step": 914
+ },
+ {
+ "epoch": 0.9152574161482917,
+ "grad_norm": 0.524444580078125,
+ "learning_rate": 0.00015742202635406235,
+ "loss": 1.5512,
+ "step": 915
+ },
+ {
+ "epoch": 0.9162576974774155,
+ "grad_norm": 0.6169744729995728,
+ "learning_rate": 0.00015733617484860963,
+ "loss": 1.271,
+ "step": 916
+ },
+ {
+ "epoch": 0.9172579788065394,
+ "grad_norm": 0.48883670568466187,
+ "learning_rate": 0.00015725026034106996,
+ "loss": 1.4779,
+ "step": 917
+ },
+ {
+ "epoch": 0.9182582601356631,
+ "grad_norm": 0.5408645272254944,
+ "learning_rate": 0.00015716428292584787,
+ "loss": 1.3574,
+ "step": 918
+ },
+ {
+ "epoch": 0.919258541464787,
+ "grad_norm": 0.5622221231460571,
+ "learning_rate": 0.00015707824269741702,
+ "loss": 1.2146,
+ "step": 919
+ },
+ {
+ "epoch": 0.9202588227939108,
+ "grad_norm": 0.477328896522522,
+ "learning_rate": 0.00015699213975031996,
+ "loss": 1.162,
+ "step": 920
+ },
+ {
+ "epoch": 0.9212591041230346,
+ "grad_norm": 0.503027081489563,
+ "learning_rate": 0.0001569059741791684,
+ "loss": 1.1674,
+ "step": 921
+ },
+ {
+ "epoch": 0.9222593854521585,
+ "grad_norm": 0.5951637625694275,
+ "learning_rate": 0.0001568197460786426,
+ "loss": 1.3737,
+ "step": 922
+ },
+ {
+ "epoch": 0.9232596667812822,
+ "grad_norm": 0.5276626348495483,
+ "learning_rate": 0.0001567334555434917,
+ "loss": 1.2494,
+ "step": 923
+ },
+ {
+ "epoch": 0.924259948110406,
+ "grad_norm": 0.6354761123657227,
+ "learning_rate": 0.0001566471026685334,
+ "loss": 1.2052,
+ "step": 924
+ },
+ {
+ "epoch": 0.9252602294395299,
+ "grad_norm": 0.4227287471294403,
+ "learning_rate": 0.00015656068754865387,
+ "loss": 1.1446,
+ "step": 925
+ },
+ {
+ "epoch": 0.9262605107686537,
+ "grad_norm": 0.5290839076042175,
+ "learning_rate": 0.00015647421027880772,
+ "loss": 1.2057,
+ "step": 926
+ },
+ {
+ "epoch": 0.9272607920977775,
+ "grad_norm": 0.4961225986480713,
+ "learning_rate": 0.0001563876709540178,
+ "loss": 1.2788,
+ "step": 927
+ },
+ {
+ "epoch": 0.9282610734269013,
+ "grad_norm": 0.5095213651657104,
+ "learning_rate": 0.0001563010696693752,
+ "loss": 1.2751,
+ "step": 928
+ },
+ {
+ "epoch": 0.9292613547560251,
+ "grad_norm": 0.5027223825454712,
+ "learning_rate": 0.00015621440652003907,
+ "loss": 1.3653,
+ "step": 929
+ },
+ {
+ "epoch": 0.930261636085149,
+ "grad_norm": 0.49251896142959595,
+ "learning_rate": 0.00015612768160123652,
+ "loss": 1.1556,
+ "step": 930
+ },
+ {
+ "epoch": 0.9312619174142728,
+ "grad_norm": 0.5187139511108398,
+ "learning_rate": 0.00015604089500826257,
+ "loss": 1.3623,
+ "step": 931
+ },
+ {
+ "epoch": 0.9322621987433966,
+ "grad_norm": 0.5004428029060364,
+ "learning_rate": 0.00015595404683648,
+ "loss": 1.185,
+ "step": 932
+ },
+ {
+ "epoch": 0.9332624800725204,
+ "grad_norm": 0.5750531554222107,
+ "learning_rate": 0.00015586713718131922,
+ "loss": 1.2999,
+ "step": 933
+ },
+ {
+ "epoch": 0.9342627614016442,
+ "grad_norm": 0.482732355594635,
+ "learning_rate": 0.0001557801661382782,
+ "loss": 1.2635,
+ "step": 934
+ },
+ {
+ "epoch": 0.935263042730768,
+ "grad_norm": 0.47854143381118774,
+ "learning_rate": 0.00015569313380292248,
+ "loss": 1.2833,
+ "step": 935
+ },
+ {
+ "epoch": 0.9362633240598919,
+ "grad_norm": 0.49382665753364563,
+ "learning_rate": 0.00015560604027088477,
+ "loss": 1.2327,
+ "step": 936
+ },
+ {
+ "epoch": 0.9372636053890157,
+ "grad_norm": 0.5009885430335999,
+ "learning_rate": 0.00015551888563786515,
+ "loss": 1.2967,
+ "step": 937
+ },
+ {
+ "epoch": 0.9382638867181394,
+ "grad_norm": 0.5012707114219666,
+ "learning_rate": 0.00015543166999963076,
+ "loss": 1.3231,
+ "step": 938
+ },
+ {
+ "epoch": 0.9392641680472633,
+ "grad_norm": 0.6908506751060486,
+ "learning_rate": 0.0001553443934520159,
+ "loss": 1.4055,
+ "step": 939
+ },
+ {
+ "epoch": 0.9402644493763871,
+ "grad_norm": 0.7104817032814026,
+ "learning_rate": 0.00015525705609092157,
+ "loss": 1.3435,
+ "step": 940
+ },
+ {
+ "epoch": 0.941264730705511,
+ "grad_norm": 0.49263522028923035,
+ "learning_rate": 0.00015516965801231586,
+ "loss": 1.2259,
+ "step": 941
+ },
+ {
+ "epoch": 0.9422650120346348,
+ "grad_norm": 0.5337693691253662,
+ "learning_rate": 0.0001550821993122334,
+ "loss": 1.2863,
+ "step": 942
+ },
+ {
+ "epoch": 0.9432652933637585,
+ "grad_norm": 0.5506749153137207,
+ "learning_rate": 0.0001549946800867755,
+ "loss": 1.4061,
+ "step": 943
+ },
+ {
+ "epoch": 0.9442655746928824,
+ "grad_norm": 0.5121364593505859,
+ "learning_rate": 0.00015490710043210997,
+ "loss": 1.3567,
+ "step": 944
+ },
+ {
+ "epoch": 0.9452658560220062,
+ "grad_norm": 0.5326678156852722,
+ "learning_rate": 0.00015481946044447099,
+ "loss": 1.2719,
+ "step": 945
+ },
+ {
+ "epoch": 0.94626613735113,
+ "grad_norm": 0.6023722290992737,
+ "learning_rate": 0.00015473176022015906,
+ "loss": 1.1512,
+ "step": 946
+ },
+ {
+ "epoch": 0.9472664186802539,
+ "grad_norm": 0.4953387975692749,
+ "learning_rate": 0.0001546439998555409,
+ "loss": 1.556,
+ "step": 947
+ },
+ {
+ "epoch": 0.9482667000093776,
+ "grad_norm": 0.5187799334526062,
+ "learning_rate": 0.0001545561794470492,
+ "loss": 1.279,
+ "step": 948
+ },
+ {
+ "epoch": 0.9492669813385014,
+ "grad_norm": 0.5788894295692444,
+ "learning_rate": 0.00015446829909118275,
+ "loss": 1.3246,
+ "step": 949
+ },
+ {
+ "epoch": 0.9502672626676253,
+ "grad_norm": 0.5551681518554688,
+ "learning_rate": 0.00015438035888450623,
+ "loss": 1.2231,
+ "step": 950
+ },
+ {
+ "epoch": 0.9512675439967491,
+ "grad_norm": 0.4898390471935272,
+ "learning_rate": 0.00015429235892364994,
+ "loss": 1.2036,
+ "step": 951
+ },
+ {
+ "epoch": 0.952267825325873,
+ "grad_norm": 0.5427507162094116,
+ "learning_rate": 0.00015420429930530996,
+ "loss": 1.3614,
+ "step": 952
+ },
+ {
+ "epoch": 0.9532681066549967,
+ "grad_norm": 0.557054340839386,
+ "learning_rate": 0.00015411618012624786,
+ "loss": 1.4249,
+ "step": 953
+ },
+ {
+ "epoch": 0.9542683879841205,
+ "grad_norm": 0.5793543457984924,
+ "learning_rate": 0.00015402800148329071,
+ "loss": 1.4341,
+ "step": 954
+ },
+ {
+ "epoch": 0.9552686693132444,
+ "grad_norm": 0.5993456244468689,
+ "learning_rate": 0.00015393976347333088,
+ "loss": 1.0259,
+ "step": 955
+ },
+ {
+ "epoch": 0.9562689506423682,
+ "grad_norm": 0.554904580116272,
+ "learning_rate": 0.00015385146619332596,
+ "loss": 1.3558,
+ "step": 956
+ },
+ {
+ "epoch": 0.9572692319714919,
+ "grad_norm": 0.5488478541374207,
+ "learning_rate": 0.00015376310974029873,
+ "loss": 1.358,
+ "step": 957
+ },
+ {
+ "epoch": 0.9582695133006158,
+ "grad_norm": 0.5108879208564758,
+ "learning_rate": 0.00015367469421133695,
+ "loss": 1.3865,
+ "step": 958
+ },
+ {
+ "epoch": 0.9592697946297396,
+ "grad_norm": 0.4606814682483673,
+ "learning_rate": 0.00015358621970359325,
+ "loss": 1.2055,
+ "step": 959
+ },
+ {
+ "epoch": 0.9602700759588634,
+ "grad_norm": 0.4974004328250885,
+ "learning_rate": 0.00015349768631428519,
+ "loss": 1.2541,
+ "step": 960
+ },
+ {
+ "epoch": 0.9612703572879873,
+ "grad_norm": 0.5107241272926331,
+ "learning_rate": 0.00015340909414069488,
+ "loss": 1.1624,
+ "step": 961
+ },
+ {
+ "epoch": 0.962270638617111,
+ "grad_norm": 0.5587212443351746,
+ "learning_rate": 0.00015332044328016914,
+ "loss": 1.349,
+ "step": 962
+ },
+ {
+ "epoch": 0.9632709199462349,
+ "grad_norm": 0.5209497809410095,
+ "learning_rate": 0.0001532317338301192,
+ "loss": 1.3695,
+ "step": 963
+ },
+ {
+ "epoch": 0.9642712012753587,
+ "grad_norm": 0.4985620677471161,
+ "learning_rate": 0.00015314296588802076,
+ "loss": 1.4597,
+ "step": 964
+ },
+ {
+ "epoch": 0.9652714826044825,
+ "grad_norm": 0.5065100789070129,
+ "learning_rate": 0.00015305413955141365,
+ "loss": 1.4225,
+ "step": 965
+ },
+ {
+ "epoch": 0.9662717639336064,
+ "grad_norm": 0.5079792737960815,
+ "learning_rate": 0.00015296525491790205,
+ "loss": 1.057,
+ "step": 966
+ },
+ {
+ "epoch": 0.9672720452627301,
+ "grad_norm": 0.4673600196838379,
+ "learning_rate": 0.00015287631208515406,
+ "loss": 1.2531,
+ "step": 967
+ },
+ {
+ "epoch": 0.9682723265918539,
+ "grad_norm": 0.5309945344924927,
+ "learning_rate": 0.00015278731115090171,
+ "loss": 1.374,
+ "step": 968
+ },
+ {
+ "epoch": 0.9692726079209778,
+ "grad_norm": 0.4792092442512512,
+ "learning_rate": 0.00015269825221294098,
+ "loss": 1.3018,
+ "step": 969
+ },
+ {
+ "epoch": 0.9702728892501016,
+ "grad_norm": 0.5222868323326111,
+ "learning_rate": 0.00015260913536913154,
+ "loss": 1.4063,
+ "step": 970
+ },
+ {
+ "epoch": 0.9712731705792254,
+ "grad_norm": 0.5373417139053345,
+ "learning_rate": 0.00015251996071739664,
+ "loss": 1.2183,
+ "step": 971
+ },
+ {
+ "epoch": 0.9722734519083492,
+ "grad_norm": 0.5624721050262451,
+ "learning_rate": 0.00015243072835572318,
+ "loss": 1.2696,
+ "step": 972
+ },
+ {
+ "epoch": 0.973273733237473,
+ "grad_norm": 0.46938082575798035,
+ "learning_rate": 0.0001523414383821613,
+ "loss": 1.3544,
+ "step": 973
+ },
+ {
+ "epoch": 0.9742740145665969,
+ "grad_norm": 0.45348694920539856,
+ "learning_rate": 0.00015225209089482462,
+ "loss": 1.2078,
+ "step": 974
+ },
+ {
+ "epoch": 0.9752742958957207,
+ "grad_norm": 0.48000606894493103,
+ "learning_rate": 0.0001521626859918898,
+ "loss": 1.1914,
+ "step": 975
+ },
+ {
+ "epoch": 0.9762745772248445,
+ "grad_norm": 0.5106796622276306,
+ "learning_rate": 0.00015207322377159668,
+ "loss": 1.3249,
+ "step": 976
+ },
+ {
+ "epoch": 0.9772748585539683,
+ "grad_norm": 0.49865373969078064,
+ "learning_rate": 0.00015198370433224805,
+ "loss": 1.2876,
+ "step": 977
+ },
+ {
+ "epoch": 0.9782751398830921,
+ "grad_norm": 0.5271755456924438,
+ "learning_rate": 0.00015189412777220958,
+ "loss": 1.3049,
+ "step": 978
+ },
+ {
+ "epoch": 0.9792754212122159,
+ "grad_norm": 0.49824708700180054,
+ "learning_rate": 0.00015180449418990976,
+ "loss": 1.1614,
+ "step": 979
+ },
+ {
+ "epoch": 0.9802757025413398,
+ "grad_norm": 0.7327549457550049,
+ "learning_rate": 0.00015171480368383964,
+ "loss": 1.2923,
+ "step": 980
+ },
+ {
+ "epoch": 0.9812759838704636,
+ "grad_norm": 0.5170425176620483,
+ "learning_rate": 0.00015162505635255287,
+ "loss": 1.3097,
+ "step": 981
+ },
+ {
+ "epoch": 0.9822762651995874,
+ "grad_norm": 0.47041526436805725,
+ "learning_rate": 0.00015153525229466555,
+ "loss": 1.3508,
+ "step": 982
+ },
+ {
+ "epoch": 0.9832765465287112,
+ "grad_norm": 0.4670693278312683,
+ "learning_rate": 0.00015144539160885613,
+ "loss": 1.3974,
+ "step": 983
+ },
+ {
+ "epoch": 0.984276827857835,
+ "grad_norm": 0.5745754837989807,
+ "learning_rate": 0.00015135547439386516,
+ "loss": 1.2977,
+ "step": 984
+ },
+ {
+ "epoch": 0.9852771091869589,
+ "grad_norm": 0.5845474004745483,
+ "learning_rate": 0.0001512655007484955,
+ "loss": 1.3384,
+ "step": 985
+ },
+ {
+ "epoch": 0.9862773905160827,
+ "grad_norm": 0.5627439618110657,
+ "learning_rate": 0.00015117547077161185,
+ "loss": 1.1756,
+ "step": 986
+ },
+ {
+ "epoch": 0.9872776718452064,
+ "grad_norm": 0.6411226987838745,
+ "learning_rate": 0.0001510853845621409,
+ "loss": 1.3441,
+ "step": 987
+ },
+ {
+ "epoch": 0.9882779531743303,
+ "grad_norm": 0.545659601688385,
+ "learning_rate": 0.00015099524221907107,
+ "loss": 1.3766,
+ "step": 988
+ },
+ {
+ "epoch": 0.9892782345034541,
+ "grad_norm": 0.5058498382568359,
+ "learning_rate": 0.0001509050438414525,
+ "loss": 1.3171,
+ "step": 989
+ },
+ {
+ "epoch": 0.9902785158325779,
+ "grad_norm": 0.6247567534446716,
+ "learning_rate": 0.00015081478952839693,
+ "loss": 1.2141,
+ "step": 990
+ },
+ {
+ "epoch": 0.9912787971617018,
+ "grad_norm": 0.5492308139801025,
+ "learning_rate": 0.00015072447937907753,
+ "loss": 1.1626,
+ "step": 991
+ },
+ {
+ "epoch": 0.9922790784908255,
+ "grad_norm": 0.4795534908771515,
+ "learning_rate": 0.00015063411349272877,
+ "loss": 1.218,
+ "step": 992
+ },
+ {
+ "epoch": 0.9932793598199494,
+ "grad_norm": 0.5527793169021606,
+ "learning_rate": 0.00015054369196864644,
+ "loss": 1.3816,
+ "step": 993
+ },
+ {
+ "epoch": 0.9942796411490732,
+ "grad_norm": 0.5297475457191467,
+ "learning_rate": 0.00015045321490618748,
+ "loss": 1.2515,
+ "step": 994
+ },
+ {
+ "epoch": 0.995279922478197,
+ "grad_norm": 0.518803596496582,
+ "learning_rate": 0.00015036268240476978,
+ "loss": 1.3631,
+ "step": 995
+ },
+ {
+ "epoch": 0.9962802038073209,
+ "grad_norm": 0.47196391224861145,
+ "learning_rate": 0.00015027209456387218,
+ "loss": 1.0932,
+ "step": 996
+ },
+ {
+ "epoch": 0.9972804851364446,
+ "grad_norm": 0.5369086861610413,
+ "learning_rate": 0.00015018145148303438,
+ "loss": 1.1181,
+ "step": 997
+ },
+ {
+ "epoch": 0.9982807664655684,
+ "grad_norm": 0.5940788388252258,
+ "learning_rate": 0.00015009075326185667,
+ "loss": 1.561,
+ "step": 998
+ },
+ {
+ "epoch": 0.9992810477946923,
+ "grad_norm": 0.5340734124183655,
+ "learning_rate": 0.00015000000000000001,
+ "loss": 1.2909,
+ "step": 999
+ },
+ {
+ "epoch": 1.0002813291238162,
+ "grad_norm": 0.5133704543113708,
+ "learning_rate": 0.00014990919179718584,
+ "loss": 1.0441,
+ "step": 1000
+ },
+ {
+ "epoch": 1.0012816104529398,
+ "grad_norm": 0.3812060058116913,
+ "learning_rate": 0.00014981832875319597,
+ "loss": 0.8215,
+ "step": 1001
+ },
+ {
+ "epoch": 1.0022818917820637,
+ "grad_norm": 0.40786364674568176,
+ "learning_rate": 0.00014972741096787242,
+ "loss": 0.8215,
+ "step": 1002
+ },
+ {
+ "epoch": 1.0032821731111876,
+ "grad_norm": 0.4328629672527313,
+ "learning_rate": 0.0001496364385411174,
+ "loss": 0.9506,
+ "step": 1003
+ },
+ {
+ "epoch": 1.0042824544403113,
+ "grad_norm": 0.4680945873260498,
+ "learning_rate": 0.0001495454115728932,
+ "loss": 0.8443,
+ "step": 1004
+ },
+ {
+ "epoch": 1.0052827357694352,
+ "grad_norm": 0.48512670397758484,
+ "learning_rate": 0.0001494543301632219,
+ "loss": 1.1143,
+ "step": 1005
+ },
+ {
+ "epoch": 1.006283017098559,
+ "grad_norm": 0.43949049711227417,
+ "learning_rate": 0.00014936319441218555,
+ "loss": 1.0257,
+ "step": 1006
+ },
+ {
+ "epoch": 1.0072832984276827,
+ "grad_norm": 0.5564325451850891,
+ "learning_rate": 0.0001492720044199259,
+ "loss": 0.967,
+ "step": 1007
+ },
+ {
+ "epoch": 1.0082835797568066,
+ "grad_norm": 0.47199952602386475,
+ "learning_rate": 0.0001491807602866442,
+ "loss": 1.0317,
+ "step": 1008
+ },
+ {
+ "epoch": 1.0092838610859305,
+ "grad_norm": 0.4625256657600403,
+ "learning_rate": 0.00014908946211260123,
+ "loss": 0.894,
+ "step": 1009
+ },
+ {
+ "epoch": 1.0102841424150542,
+ "grad_norm": 0.5081682801246643,
+ "learning_rate": 0.00014899810999811726,
+ "loss": 0.9647,
+ "step": 1010
+ },
+ {
+ "epoch": 1.011284423744178,
+ "grad_norm": 0.5240431427955627,
+ "learning_rate": 0.0001489067040435717,
+ "loss": 1.1076,
+ "step": 1011
+ },
+ {
+ "epoch": 1.012284705073302,
+ "grad_norm": 0.5996805429458618,
+ "learning_rate": 0.00014881524434940313,
+ "loss": 0.9063,
+ "step": 1012
+ },
+ {
+ "epoch": 1.0132849864024256,
+ "grad_norm": 0.4602286219596863,
+ "learning_rate": 0.0001487237310161093,
+ "loss": 0.8003,
+ "step": 1013
+ },
+ {
+ "epoch": 1.0142852677315495,
+ "grad_norm": 0.5298121571540833,
+ "learning_rate": 0.0001486321641442467,
+ "loss": 0.9616,
+ "step": 1014
+ },
+ {
+ "epoch": 1.0152855490606734,
+ "grad_norm": 0.47525477409362793,
+ "learning_rate": 0.00014854054383443081,
+ "loss": 1.0457,
+ "step": 1015
+ },
+ {
+ "epoch": 1.016285830389797,
+ "grad_norm": 0.5577285885810852,
+ "learning_rate": 0.00014844887018733582,
+ "loss": 0.8973,
+ "step": 1016
+ },
+ {
+ "epoch": 1.017286111718921,
+ "grad_norm": 0.5028079748153687,
+ "learning_rate": 0.00014835714330369446,
+ "loss": 1.0721,
+ "step": 1017
+ },
+ {
+ "epoch": 1.0182863930480448,
+ "grad_norm": 0.5401796102523804,
+ "learning_rate": 0.00014826536328429795,
+ "loss": 0.9595,
+ "step": 1018
+ },
+ {
+ "epoch": 1.0192866743771685,
+ "grad_norm": 0.4957962930202484,
+ "learning_rate": 0.000148173530229996,
+ "loss": 0.9871,
+ "step": 1019
+ },
+ {
+ "epoch": 1.0202869557062924,
+ "grad_norm": 0.4891825020313263,
+ "learning_rate": 0.00014808164424169647,
+ "loss": 0.9546,
+ "step": 1020
+ },
+ {
+ "epoch": 1.0212872370354162,
+ "grad_norm": 0.48703211545944214,
+ "learning_rate": 0.0001479897054203655,
+ "loss": 0.8863,
+ "step": 1021
+ },
+ {
+ "epoch": 1.0222875183645401,
+ "grad_norm": 0.5614656805992126,
+ "learning_rate": 0.00014789771386702717,
+ "loss": 0.9857,
+ "step": 1022
+ },
+ {
+ "epoch": 1.0232877996936638,
+ "grad_norm": 0.5903550982475281,
+ "learning_rate": 0.0001478056696827636,
+ "loss": 0.8347,
+ "step": 1023
+ },
+ {
+ "epoch": 1.0242880810227877,
+ "grad_norm": 0.47974926233291626,
+ "learning_rate": 0.0001477135729687147,
+ "loss": 1.0035,
+ "step": 1024
+ },
+ {
+ "epoch": 1.0252883623519116,
+ "grad_norm": 0.5049344897270203,
+ "learning_rate": 0.0001476214238260781,
+ "loss": 0.953,
+ "step": 1025
+ },
+ {
+ "epoch": 1.0262886436810352,
+ "grad_norm": 0.3981640636920929,
+ "learning_rate": 0.000147529222356109,
+ "loss": 0.7118,
+ "step": 1026
+ },
+ {
+ "epoch": 1.0272889250101591,
+ "grad_norm": 0.598785400390625,
+ "learning_rate": 0.0001474369686601202,
+ "loss": 0.9002,
+ "step": 1027
+ },
+ {
+ "epoch": 1.028289206339283,
+ "grad_norm": 0.5422918200492859,
+ "learning_rate": 0.0001473446628394818,
+ "loss": 1.192,
+ "step": 1028
+ },
+ {
+ "epoch": 1.0292894876684067,
+ "grad_norm": 0.592509925365448,
+ "learning_rate": 0.00014725230499562119,
+ "loss": 1.0989,
+ "step": 1029
+ },
+ {
+ "epoch": 1.0302897689975306,
+ "grad_norm": 0.5232793688774109,
+ "learning_rate": 0.00014715989523002296,
+ "loss": 1.0667,
+ "step": 1030
+ },
+ {
+ "epoch": 1.0312900503266544,
+ "grad_norm": 0.5362406373023987,
+ "learning_rate": 0.00014706743364422878,
+ "loss": 0.8933,
+ "step": 1031
+ },
+ {
+ "epoch": 1.032290331655778,
+ "grad_norm": 0.43486225605010986,
+ "learning_rate": 0.00014697492033983707,
+ "loss": 0.8525,
+ "step": 1032
+ },
+ {
+ "epoch": 1.033290612984902,
+ "grad_norm": 0.5187330842018127,
+ "learning_rate": 0.00014688235541850337,
+ "loss": 1.017,
+ "step": 1033
+ },
+ {
+ "epoch": 1.0342908943140259,
+ "grad_norm": 0.5081651210784912,
+ "learning_rate": 0.0001467897389819397,
+ "loss": 1.0135,
+ "step": 1034
+ },
+ {
+ "epoch": 1.0352911756431495,
+ "grad_norm": 0.49661391973495483,
+ "learning_rate": 0.00014669707113191483,
+ "loss": 0.8711,
+ "step": 1035
+ },
+ {
+ "epoch": 1.0362914569722734,
+ "grad_norm": 0.4899054169654846,
+ "learning_rate": 0.0001466043519702539,
+ "loss": 0.9924,
+ "step": 1036
+ },
+ {
+ "epoch": 1.0372917383013973,
+ "grad_norm": 0.47787439823150635,
+ "learning_rate": 0.00014651158159883855,
+ "loss": 0.9238,
+ "step": 1037
+ },
+ {
+ "epoch": 1.038292019630521,
+ "grad_norm": 0.509600818157196,
+ "learning_rate": 0.0001464187601196066,
+ "loss": 0.8854,
+ "step": 1038
+ },
+ {
+ "epoch": 1.0392923009596449,
+ "grad_norm": 0.3907245397567749,
+ "learning_rate": 0.00014632588763455212,
+ "loss": 0.8911,
+ "step": 1039
+ },
+ {
+ "epoch": 1.0402925822887688,
+ "grad_norm": 0.4939952492713928,
+ "learning_rate": 0.00014623296424572517,
+ "loss": 0.9069,
+ "step": 1040
+ },
+ {
+ "epoch": 1.0412928636178926,
+ "grad_norm": 0.4680919945240021,
+ "learning_rate": 0.00014613999005523174,
+ "loss": 0.9361,
+ "step": 1041
+ },
+ {
+ "epoch": 1.0422931449470163,
+ "grad_norm": 0.4871543347835541,
+ "learning_rate": 0.00014604696516523361,
+ "loss": 0.9268,
+ "step": 1042
+ },
+ {
+ "epoch": 1.0432934262761402,
+ "grad_norm": 0.5115481615066528,
+ "learning_rate": 0.00014595388967794835,
+ "loss": 0.9555,
+ "step": 1043
+ },
+ {
+ "epoch": 1.044293707605264,
+ "grad_norm": 0.5923699140548706,
+ "learning_rate": 0.00014586076369564908,
+ "loss": 1.0122,
+ "step": 1044
+ },
+ {
+ "epoch": 1.0452939889343877,
+ "grad_norm": 0.491161048412323,
+ "learning_rate": 0.00014576758732066442,
+ "loss": 0.9805,
+ "step": 1045
+ },
+ {
+ "epoch": 1.0462942702635116,
+ "grad_norm": 0.462168425321579,
+ "learning_rate": 0.00014567436065537835,
+ "loss": 0.9213,
+ "step": 1046
+ },
+ {
+ "epoch": 1.0472945515926355,
+ "grad_norm": 0.5082408785820007,
+ "learning_rate": 0.00014558108380223012,
+ "loss": 0.9073,
+ "step": 1047
+ },
+ {
+ "epoch": 1.0482948329217592,
+ "grad_norm": 0.6131752133369446,
+ "learning_rate": 0.00014548775686371412,
+ "loss": 0.9156,
+ "step": 1048
+ },
+ {
+ "epoch": 1.049295114250883,
+ "grad_norm": 0.6133660674095154,
+ "learning_rate": 0.00014539437994237977,
+ "loss": 1.2011,
+ "step": 1049
+ },
+ {
+ "epoch": 1.050295395580007,
+ "grad_norm": 0.542412519454956,
+ "learning_rate": 0.00014530095314083143,
+ "loss": 1.1075,
+ "step": 1050
+ },
+ {
+ "epoch": 1.0512956769091306,
+ "grad_norm": 0.5367622971534729,
+ "learning_rate": 0.00014520747656172824,
+ "loss": 1.0783,
+ "step": 1051
+ },
+ {
+ "epoch": 1.0522959582382545,
+ "grad_norm": 0.5243119597434998,
+ "learning_rate": 0.00014511395030778406,
+ "loss": 1.0865,
+ "step": 1052
+ },
+ {
+ "epoch": 1.0532962395673784,
+ "grad_norm": 0.5611020922660828,
+ "learning_rate": 0.00014502037448176734,
+ "loss": 0.9613,
+ "step": 1053
+ },
+ {
+ "epoch": 1.054296520896502,
+ "grad_norm": 0.506432294845581,
+ "learning_rate": 0.000144926749186501,
+ "loss": 1.1364,
+ "step": 1054
+ },
+ {
+ "epoch": 1.055296802225626,
+ "grad_norm": 0.5270103812217712,
+ "learning_rate": 0.00014483307452486227,
+ "loss": 1.042,
+ "step": 1055
+ },
+ {
+ "epoch": 1.0562970835547498,
+ "grad_norm": 0.5376967191696167,
+ "learning_rate": 0.0001447393505997827,
+ "loss": 0.9563,
+ "step": 1056
+ },
+ {
+ "epoch": 1.0572973648838735,
+ "grad_norm": 0.4821127653121948,
+ "learning_rate": 0.00014464557751424793,
+ "loss": 0.9241,
+ "step": 1057
+ },
+ {
+ "epoch": 1.0582976462129974,
+ "grad_norm": 0.6197866201400757,
+ "learning_rate": 0.00014455175537129758,
+ "loss": 1.0489,
+ "step": 1058
+ },
+ {
+ "epoch": 1.0592979275421213,
+ "grad_norm": 0.42820343375205994,
+ "learning_rate": 0.00014445788427402528,
+ "loss": 0.7755,
+ "step": 1059
+ },
+ {
+ "epoch": 1.0602982088712452,
+ "grad_norm": 0.49635690450668335,
+ "learning_rate": 0.00014436396432557835,
+ "loss": 0.8485,
+ "step": 1060
+ },
+ {
+ "epoch": 1.0612984902003688,
+ "grad_norm": 0.5529823899269104,
+ "learning_rate": 0.00014426999562915782,
+ "loss": 0.9589,
+ "step": 1061
+ },
+ {
+ "epoch": 1.0622987715294927,
+ "grad_norm": 0.5504932403564453,
+ "learning_rate": 0.00014417597828801832,
+ "loss": 0.9048,
+ "step": 1062
+ },
+ {
+ "epoch": 1.0632990528586166,
+ "grad_norm": 0.5755835175514221,
+ "learning_rate": 0.0001440819124054679,
+ "loss": 0.9542,
+ "step": 1063
+ },
+ {
+ "epoch": 1.0642993341877403,
+ "grad_norm": 0.4767759144306183,
+ "learning_rate": 0.00014398779808486793,
+ "loss": 0.9174,
+ "step": 1064
+ },
+ {
+ "epoch": 1.0652996155168641,
+ "grad_norm": 0.5343469381332397,
+ "learning_rate": 0.00014389363542963306,
+ "loss": 0.8493,
+ "step": 1065
+ },
+ {
+ "epoch": 1.066299896845988,
+ "grad_norm": 0.48161643743515015,
+ "learning_rate": 0.000143799424543231,
+ "loss": 0.8218,
+ "step": 1066
+ },
+ {
+ "epoch": 1.0673001781751117,
+ "grad_norm": 0.4958563446998596,
+ "learning_rate": 0.0001437051655291825,
+ "loss": 0.9849,
+ "step": 1067
+ },
+ {
+ "epoch": 1.0683004595042356,
+ "grad_norm": 0.5286628007888794,
+ "learning_rate": 0.0001436108584910611,
+ "loss": 0.8935,
+ "step": 1068
+ },
+ {
+ "epoch": 1.0693007408333595,
+ "grad_norm": 0.6096596121788025,
+ "learning_rate": 0.0001435165035324933,
+ "loss": 1.0577,
+ "step": 1069
+ },
+ {
+ "epoch": 1.0703010221624831,
+ "grad_norm": 0.4895448088645935,
+ "learning_rate": 0.000143422100757158,
+ "loss": 0.865,
+ "step": 1070
+ },
+ {
+ "epoch": 1.071301303491607,
+ "grad_norm": 0.5186201930046082,
+ "learning_rate": 0.00014332765026878687,
+ "loss": 0.8414,
+ "step": 1071
+ },
+ {
+ "epoch": 1.072301584820731,
+ "grad_norm": 0.5639254450798035,
+ "learning_rate": 0.0001432331521711639,
+ "loss": 0.9401,
+ "step": 1072
+ },
+ {
+ "epoch": 1.0733018661498546,
+ "grad_norm": 0.48865774273872375,
+ "learning_rate": 0.00014313860656812536,
+ "loss": 0.7894,
+ "step": 1073
+ },
+ {
+ "epoch": 1.0743021474789785,
+ "grad_norm": 0.4796544313430786,
+ "learning_rate": 0.00014304401356355983,
+ "loss": 0.8153,
+ "step": 1074
+ },
+ {
+ "epoch": 1.0753024288081023,
+ "grad_norm": 0.5578910708427429,
+ "learning_rate": 0.00014294937326140788,
+ "loss": 1.1675,
+ "step": 1075
+ },
+ {
+ "epoch": 1.076302710137226,
+ "grad_norm": 0.5607575178146362,
+ "learning_rate": 0.00014285468576566207,
+ "loss": 0.9133,
+ "step": 1076
+ },
+ {
+ "epoch": 1.07730299146635,
+ "grad_norm": 0.48808708786964417,
+ "learning_rate": 0.00014275995118036693,
+ "loss": 0.8884,
+ "step": 1077
+ },
+ {
+ "epoch": 1.0783032727954738,
+ "grad_norm": 0.4981604814529419,
+ "learning_rate": 0.00014266516960961852,
+ "loss": 0.9235,
+ "step": 1078
+ },
+ {
+ "epoch": 1.0793035541245974,
+ "grad_norm": 0.6323955655097961,
+ "learning_rate": 0.00014257034115756472,
+ "loss": 1.1617,
+ "step": 1079
+ },
+ {
+ "epoch": 1.0803038354537213,
+ "grad_norm": 0.5465244650840759,
+ "learning_rate": 0.0001424754659284048,
+ "loss": 1.0126,
+ "step": 1080
+ },
+ {
+ "epoch": 1.0813041167828452,
+ "grad_norm": 0.504200279712677,
+ "learning_rate": 0.0001423805440263895,
+ "loss": 1.0069,
+ "step": 1081
+ },
+ {
+ "epoch": 1.0823043981119689,
+ "grad_norm": 0.8698700070381165,
+ "learning_rate": 0.0001422855755558208,
+ "loss": 0.9653,
+ "step": 1082
+ },
+ {
+ "epoch": 1.0833046794410928,
+ "grad_norm": 0.41991496086120605,
+ "learning_rate": 0.00014219056062105193,
+ "loss": 1.089,
+ "step": 1083
+ },
+ {
+ "epoch": 1.0843049607702167,
+ "grad_norm": 0.5334717035293579,
+ "learning_rate": 0.0001420954993264871,
+ "loss": 1.0137,
+ "step": 1084
+ },
+ {
+ "epoch": 1.0853052420993405,
+ "grad_norm": 0.5418859124183655,
+ "learning_rate": 0.00014200039177658145,
+ "loss": 0.9302,
+ "step": 1085
+ },
+ {
+ "epoch": 1.0863055234284642,
+ "grad_norm": 0.515819251537323,
+ "learning_rate": 0.000141905238075841,
+ "loss": 1.0703,
+ "step": 1086
+ },
+ {
+ "epoch": 1.087305804757588,
+ "grad_norm": 0.43046239018440247,
+ "learning_rate": 0.00014181003832882248,
+ "loss": 1.0722,
+ "step": 1087
+ },
+ {
+ "epoch": 1.088306086086712,
+ "grad_norm": 0.6555958390235901,
+ "learning_rate": 0.00014171479264013311,
+ "loss": 0.806,
+ "step": 1088
+ },
+ {
+ "epoch": 1.0893063674158356,
+ "grad_norm": 0.5608332753181458,
+ "learning_rate": 0.00014161950111443077,
+ "loss": 0.9925,
+ "step": 1089
+ },
+ {
+ "epoch": 1.0903066487449595,
+ "grad_norm": 0.5866970419883728,
+ "learning_rate": 0.00014152416385642357,
+ "loss": 0.9278,
+ "step": 1090
+ },
+ {
+ "epoch": 1.0913069300740834,
+ "grad_norm": 0.4913788437843323,
+ "learning_rate": 0.00014142878097086995,
+ "loss": 0.7394,
+ "step": 1091
+ },
+ {
+ "epoch": 1.092307211403207,
+ "grad_norm": 0.4942512512207031,
+ "learning_rate": 0.0001413333525625784,
+ "loss": 0.8891,
+ "step": 1092
+ },
+ {
+ "epoch": 1.093307492732331,
+ "grad_norm": 0.5537131428718567,
+ "learning_rate": 0.00014123787873640754,
+ "loss": 0.9632,
+ "step": 1093
+ },
+ {
+ "epoch": 1.0943077740614549,
+ "grad_norm": 0.49271076917648315,
+ "learning_rate": 0.00014114235959726575,
+ "loss": 0.8708,
+ "step": 1094
+ },
+ {
+ "epoch": 1.0953080553905785,
+ "grad_norm": 0.448188841342926,
+ "learning_rate": 0.0001410467952501114,
+ "loss": 0.9727,
+ "step": 1095
+ },
+ {
+ "epoch": 1.0963083367197024,
+ "grad_norm": 0.4975283741950989,
+ "learning_rate": 0.00014095118579995235,
+ "loss": 0.9971,
+ "step": 1096
+ },
+ {
+ "epoch": 1.0973086180488263,
+ "grad_norm": 0.46382221579551697,
+ "learning_rate": 0.0001408555313518461,
+ "loss": 0.8853,
+ "step": 1097
+ },
+ {
+ "epoch": 1.09830889937795,
+ "grad_norm": 0.5071414113044739,
+ "learning_rate": 0.00014075983201089964,
+ "loss": 0.7723,
+ "step": 1098
+ },
+ {
+ "epoch": 1.0993091807070738,
+ "grad_norm": 0.41700050234794617,
+ "learning_rate": 0.0001406640878822692,
+ "loss": 0.7892,
+ "step": 1099
+ },
+ {
+ "epoch": 1.1003094620361977,
+ "grad_norm": 0.497175395488739,
+ "learning_rate": 0.00014056829907116024,
+ "loss": 0.9791,
+ "step": 1100
+ },
+ {
+ "epoch": 1.1013097433653214,
+ "grad_norm": 0.4512806236743927,
+ "learning_rate": 0.00014047246568282736,
+ "loss": 0.9878,
+ "step": 1101
+ },
+ {
+ "epoch": 1.1023100246944453,
+ "grad_norm": 0.5804361701011658,
+ "learning_rate": 0.00014037658782257414,
+ "loss": 1.1583,
+ "step": 1102
+ },
+ {
+ "epoch": 1.1033103060235692,
+ "grad_norm": 0.5334234237670898,
+ "learning_rate": 0.00014028066559575302,
+ "loss": 1.0705,
+ "step": 1103
+ },
+ {
+ "epoch": 1.104310587352693,
+ "grad_norm": 0.4683452844619751,
+ "learning_rate": 0.00014018469910776513,
+ "loss": 0.8608,
+ "step": 1104
+ },
+ {
+ "epoch": 1.1053108686818167,
+ "grad_norm": 0.5595771074295044,
+ "learning_rate": 0.0001400886884640603,
+ "loss": 1.0804,
+ "step": 1105
+ },
+ {
+ "epoch": 1.1063111500109406,
+ "grad_norm": 0.45048126578330994,
+ "learning_rate": 0.00013999263377013693,
+ "loss": 0.7782,
+ "step": 1106
+ },
+ {
+ "epoch": 1.1073114313400645,
+ "grad_norm": 0.4472745954990387,
+ "learning_rate": 0.00013989653513154165,
+ "loss": 0.8599,
+ "step": 1107
+ },
+ {
+ "epoch": 1.1083117126691882,
+ "grad_norm": 0.5168829560279846,
+ "learning_rate": 0.00013980039265386955,
+ "loss": 0.9984,
+ "step": 1108
+ },
+ {
+ "epoch": 1.109311993998312,
+ "grad_norm": 0.5712297558784485,
+ "learning_rate": 0.00013970420644276383,
+ "loss": 0.957,
+ "step": 1109
+ },
+ {
+ "epoch": 1.110312275327436,
+ "grad_norm": 0.5360589027404785,
+ "learning_rate": 0.0001396079766039157,
+ "loss": 1.0957,
+ "step": 1110
+ },
+ {
+ "epoch": 1.1113125566565596,
+ "grad_norm": 0.49815621972084045,
+ "learning_rate": 0.00013951170324306435,
+ "loss": 1.1143,
+ "step": 1111
+ },
+ {
+ "epoch": 1.1123128379856835,
+ "grad_norm": 0.45044735074043274,
+ "learning_rate": 0.00013941538646599687,
+ "loss": 0.8463,
+ "step": 1112
+ },
+ {
+ "epoch": 1.1133131193148074,
+ "grad_norm": 0.5086628198623657,
+ "learning_rate": 0.0001393190263785479,
+ "loss": 0.9061,
+ "step": 1113
+ },
+ {
+ "epoch": 1.114313400643931,
+ "grad_norm": 0.4669632315635681,
+ "learning_rate": 0.0001392226230865998,
+ "loss": 0.7891,
+ "step": 1114
+ },
+ {
+ "epoch": 1.115313681973055,
+ "grad_norm": 0.43681180477142334,
+ "learning_rate": 0.0001391261766960823,
+ "loss": 0.7687,
+ "step": 1115
+ },
+ {
+ "epoch": 1.1163139633021788,
+ "grad_norm": 0.47354501485824585,
+ "learning_rate": 0.00013902968731297255,
+ "loss": 1.0181,
+ "step": 1116
+ },
+ {
+ "epoch": 1.1173142446313025,
+ "grad_norm": 0.5224591493606567,
+ "learning_rate": 0.00013893315504329498,
+ "loss": 0.9072,
+ "step": 1117
+ },
+ {
+ "epoch": 1.1183145259604264,
+ "grad_norm": 0.5648715496063232,
+ "learning_rate": 0.00013883657999312109,
+ "loss": 1.0256,
+ "step": 1118
+ },
+ {
+ "epoch": 1.1193148072895502,
+ "grad_norm": 0.4603082239627838,
+ "learning_rate": 0.00013873996226856933,
+ "loss": 0.9129,
+ "step": 1119
+ },
+ {
+ "epoch": 1.120315088618674,
+ "grad_norm": 0.48259446024894714,
+ "learning_rate": 0.00013864330197580513,
+ "loss": 0.8335,
+ "step": 1120
+ },
+ {
+ "epoch": 1.1213153699477978,
+ "grad_norm": 0.5239295363426208,
+ "learning_rate": 0.0001385465992210407,
+ "loss": 1.1409,
+ "step": 1121
+ },
+ {
+ "epoch": 1.1223156512769217,
+ "grad_norm": 0.5242553949356079,
+ "learning_rate": 0.00013844985411053492,
+ "loss": 0.9542,
+ "step": 1122
+ },
+ {
+ "epoch": 1.1233159326060456,
+ "grad_norm": 0.5396201014518738,
+ "learning_rate": 0.00013835306675059308,
+ "loss": 1.0786,
+ "step": 1123
+ },
+ {
+ "epoch": 1.1243162139351692,
+ "grad_norm": 1.818426251411438,
+ "learning_rate": 0.00013825623724756704,
+ "loss": 0.9336,
+ "step": 1124
+ },
+ {
+ "epoch": 1.1253164952642931,
+ "grad_norm": 0.5364382863044739,
+ "learning_rate": 0.00013815936570785487,
+ "loss": 0.8096,
+ "step": 1125
+ },
+ {
+ "epoch": 1.1263167765934168,
+ "grad_norm": 0.47344619035720825,
+ "learning_rate": 0.00013806245223790088,
+ "loss": 0.8777,
+ "step": 1126
+ },
+ {
+ "epoch": 1.1273170579225407,
+ "grad_norm": 0.48119789361953735,
+ "learning_rate": 0.0001379654969441955,
+ "loss": 0.9965,
+ "step": 1127
+ },
+ {
+ "epoch": 1.1283173392516646,
+ "grad_norm": 0.5970126390457153,
+ "learning_rate": 0.000137868499933275,
+ "loss": 1.1389,
+ "step": 1128
+ },
+ {
+ "epoch": 1.1293176205807884,
+ "grad_norm": 0.5217893719673157,
+ "learning_rate": 0.00013777146131172162,
+ "loss": 1.1361,
+ "step": 1129
+ },
+ {
+ "epoch": 1.130317901909912,
+ "grad_norm": 0.4322263300418854,
+ "learning_rate": 0.00013767438118616318,
+ "loss": 0.8632,
+ "step": 1130
+ },
+ {
+ "epoch": 1.131318183239036,
+ "grad_norm": 0.49836596846580505,
+ "learning_rate": 0.00013757725966327322,
+ "loss": 0.9594,
+ "step": 1131
+ },
+ {
+ "epoch": 1.1323184645681599,
+ "grad_norm": 0.5220472812652588,
+ "learning_rate": 0.00013748009684977073,
+ "loss": 1.0783,
+ "step": 1132
+ },
+ {
+ "epoch": 1.1333187458972835,
+ "grad_norm": 0.5030301809310913,
+ "learning_rate": 0.0001373828928524201,
+ "loss": 0.9482,
+ "step": 1133
+ },
+ {
+ "epoch": 1.1343190272264074,
+ "grad_norm": 0.5477299094200134,
+ "learning_rate": 0.00013728564777803088,
+ "loss": 1.1119,
+ "step": 1134
+ },
+ {
+ "epoch": 1.1353193085555313,
+ "grad_norm": 0.5505563020706177,
+ "learning_rate": 0.00013718836173345783,
+ "loss": 1.0315,
+ "step": 1135
+ },
+ {
+ "epoch": 1.136319589884655,
+ "grad_norm": 0.5921071171760559,
+ "learning_rate": 0.00013709103482560078,
+ "loss": 0.98,
+ "step": 1136
+ },
+ {
+ "epoch": 1.1373198712137789,
+ "grad_norm": 0.4483082890510559,
+ "learning_rate": 0.00013699366716140435,
+ "loss": 0.9203,
+ "step": 1137
+ },
+ {
+ "epoch": 1.1383201525429028,
+ "grad_norm": 0.4304388165473938,
+ "learning_rate": 0.00013689625884785798,
+ "loss": 0.824,
+ "step": 1138
+ },
+ {
+ "epoch": 1.1393204338720264,
+ "grad_norm": 0.5273844003677368,
+ "learning_rate": 0.00013679880999199583,
+ "loss": 1.0061,
+ "step": 1139
+ },
+ {
+ "epoch": 1.1403207152011503,
+ "grad_norm": 0.5016499161720276,
+ "learning_rate": 0.00013670132070089653,
+ "loss": 0.8692,
+ "step": 1140
+ },
+ {
+ "epoch": 1.1413209965302742,
+ "grad_norm": 0.5045731067657471,
+ "learning_rate": 0.00013660379108168324,
+ "loss": 0.958,
+ "step": 1141
+ },
+ {
+ "epoch": 1.142321277859398,
+ "grad_norm": 0.484275221824646,
+ "learning_rate": 0.00013650622124152334,
+ "loss": 0.8589,
+ "step": 1142
+ },
+ {
+ "epoch": 1.1433215591885217,
+ "grad_norm": 0.6210789680480957,
+ "learning_rate": 0.0001364086112876284,
+ "loss": 0.931,
+ "step": 1143
+ },
+ {
+ "epoch": 1.1443218405176456,
+ "grad_norm": 0.59291011095047,
+ "learning_rate": 0.00013631096132725413,
+ "loss": 0.9706,
+ "step": 1144
+ },
+ {
+ "epoch": 1.1453221218467693,
+ "grad_norm": 0.48909759521484375,
+ "learning_rate": 0.00013621327146770025,
+ "loss": 0.9696,
+ "step": 1145
+ },
+ {
+ "epoch": 1.1463224031758932,
+ "grad_norm": 0.5022495985031128,
+ "learning_rate": 0.00013611554181631013,
+ "loss": 0.9349,
+ "step": 1146
+ },
+ {
+ "epoch": 1.147322684505017,
+ "grad_norm": 0.6155623197555542,
+ "learning_rate": 0.00013601777248047105,
+ "loss": 0.9161,
+ "step": 1147
+ },
+ {
+ "epoch": 1.148322965834141,
+ "grad_norm": 0.49372079968452454,
+ "learning_rate": 0.0001359199635676138,
+ "loss": 0.8598,
+ "step": 1148
+ },
+ {
+ "epoch": 1.1493232471632646,
+ "grad_norm": 0.504294753074646,
+ "learning_rate": 0.00013582211518521273,
+ "loss": 0.9334,
+ "step": 1149
+ },
+ {
+ "epoch": 1.1503235284923885,
+ "grad_norm": 0.44594088196754456,
+ "learning_rate": 0.00013572422744078551,
+ "loss": 1.0443,
+ "step": 1150
+ },
+ {
+ "epoch": 1.1513238098215124,
+ "grad_norm": 0.4689579904079437,
+ "learning_rate": 0.00013562630044189304,
+ "loss": 0.9192,
+ "step": 1151
+ },
+ {
+ "epoch": 1.152324091150636,
+ "grad_norm": 0.49370667338371277,
+ "learning_rate": 0.00013552833429613938,
+ "loss": 0.8638,
+ "step": 1152
+ },
+ {
+ "epoch": 1.15332437247976,
+ "grad_norm": 0.4459637403488159,
+ "learning_rate": 0.0001354303291111716,
+ "loss": 0.8719,
+ "step": 1153
+ },
+ {
+ "epoch": 1.1543246538088838,
+ "grad_norm": 0.41995370388031006,
+ "learning_rate": 0.0001353322849946797,
+ "loss": 0.9429,
+ "step": 1154
+ },
+ {
+ "epoch": 1.1553249351380075,
+ "grad_norm": 0.5358927249908447,
+ "learning_rate": 0.00013523420205439646,
+ "loss": 1.0724,
+ "step": 1155
+ },
+ {
+ "epoch": 1.1563252164671314,
+ "grad_norm": 0.48797738552093506,
+ "learning_rate": 0.0001351360803980972,
+ "loss": 1.0191,
+ "step": 1156
+ },
+ {
+ "epoch": 1.1573254977962553,
+ "grad_norm": 0.46079760789871216,
+ "learning_rate": 0.00013503792013359997,
+ "loss": 0.8731,
+ "step": 1157
+ },
+ {
+ "epoch": 1.158325779125379,
+ "grad_norm": 0.5278632044792175,
+ "learning_rate": 0.00013493972136876509,
+ "loss": 1.0689,
+ "step": 1158
+ },
+ {
+ "epoch": 1.1593260604545028,
+ "grad_norm": 0.6085927486419678,
+ "learning_rate": 0.00013484148421149527,
+ "loss": 1.0228,
+ "step": 1159
+ },
+ {
+ "epoch": 1.1603263417836267,
+ "grad_norm": 0.49424564838409424,
+ "learning_rate": 0.0001347432087697354,
+ "loss": 0.9622,
+ "step": 1160
+ },
+ {
+ "epoch": 1.1613266231127504,
+ "grad_norm": 0.4577535092830658,
+ "learning_rate": 0.00013464489515147238,
+ "loss": 0.795,
+ "step": 1161
+ },
+ {
+ "epoch": 1.1623269044418743,
+ "grad_norm": 0.5331981778144836,
+ "learning_rate": 0.0001345465434647351,
+ "loss": 1.2866,
+ "step": 1162
+ },
+ {
+ "epoch": 1.1633271857709981,
+ "grad_norm": 0.4657655954360962,
+ "learning_rate": 0.00013444815381759425,
+ "loss": 0.8171,
+ "step": 1163
+ },
+ {
+ "epoch": 1.1643274671001218,
+ "grad_norm": 0.44027647376060486,
+ "learning_rate": 0.00013434972631816235,
+ "loss": 0.9448,
+ "step": 1164
+ },
+ {
+ "epoch": 1.1653277484292457,
+ "grad_norm": 1.996617317199707,
+ "learning_rate": 0.0001342512610745933,
+ "loss": 0.8706,
+ "step": 1165
+ },
+ {
+ "epoch": 1.1663280297583696,
+ "grad_norm": 0.4826609790325165,
+ "learning_rate": 0.0001341527581950827,
+ "loss": 1.1075,
+ "step": 1166
+ },
+ {
+ "epoch": 1.1673283110874935,
+ "grad_norm": 0.4908469617366791,
+ "learning_rate": 0.00013405421778786737,
+ "loss": 0.835,
+ "step": 1167
+ },
+ {
+ "epoch": 1.1683285924166171,
+ "grad_norm": 0.5113404989242554,
+ "learning_rate": 0.00013395563996122537,
+ "loss": 0.8437,
+ "step": 1168
+ },
+ {
+ "epoch": 1.169328873745741,
+ "grad_norm": 0.5029433369636536,
+ "learning_rate": 0.00013385702482347593,
+ "loss": 1.1188,
+ "step": 1169
+ },
+ {
+ "epoch": 1.170329155074865,
+ "grad_norm": 0.4739987552165985,
+ "learning_rate": 0.00013375837248297926,
+ "loss": 0.9829,
+ "step": 1170
+ },
+ {
+ "epoch": 1.1713294364039886,
+ "grad_norm": 0.5853392481803894,
+ "learning_rate": 0.0001336596830481364,
+ "loss": 1.0384,
+ "step": 1171
+ },
+ {
+ "epoch": 1.1723297177331125,
+ "grad_norm": 0.5038638710975647,
+ "learning_rate": 0.0001335609566273892,
+ "loss": 0.9389,
+ "step": 1172
+ },
+ {
+ "epoch": 1.1733299990622363,
+ "grad_norm": 0.4367244243621826,
+ "learning_rate": 0.00013346219332922016,
+ "loss": 0.8182,
+ "step": 1173
+ },
+ {
+ "epoch": 1.17433028039136,
+ "grad_norm": 0.4453211724758148,
+ "learning_rate": 0.00013336339326215228,
+ "loss": 0.9289,
+ "step": 1174
+ },
+ {
+ "epoch": 1.175330561720484,
+ "grad_norm": 0.49941959977149963,
+ "learning_rate": 0.00013326455653474897,
+ "loss": 1.1277,
+ "step": 1175
+ },
+ {
+ "epoch": 1.1763308430496078,
+ "grad_norm": 0.553996205329895,
+ "learning_rate": 0.00013316568325561393,
+ "loss": 0.8582,
+ "step": 1176
+ },
+ {
+ "epoch": 1.1773311243787314,
+ "grad_norm": 0.5424408316612244,
+ "learning_rate": 0.00013306677353339098,
+ "loss": 1.0046,
+ "step": 1177
+ },
+ {
+ "epoch": 1.1783314057078553,
+ "grad_norm": 0.4373432695865631,
+ "learning_rate": 0.000132967827476764,
+ "loss": 0.9554,
+ "step": 1178
+ },
+ {
+ "epoch": 1.1793316870369792,
+ "grad_norm": 0.4744022786617279,
+ "learning_rate": 0.0001328688451944569,
+ "loss": 0.7784,
+ "step": 1179
+ },
+ {
+ "epoch": 1.1803319683661029,
+ "grad_norm": 0.5251059532165527,
+ "learning_rate": 0.00013276982679523322,
+ "loss": 0.8857,
+ "step": 1180
+ },
+ {
+ "epoch": 1.1813322496952268,
+ "grad_norm": 0.5108295679092407,
+ "learning_rate": 0.00013267077238789633,
+ "loss": 1.0711,
+ "step": 1181
+ },
+ {
+ "epoch": 1.1823325310243507,
+ "grad_norm": 0.49973955750465393,
+ "learning_rate": 0.00013257168208128908,
+ "loss": 1.0047,
+ "step": 1182
+ },
+ {
+ "epoch": 1.1833328123534743,
+ "grad_norm": 0.5143113732337952,
+ "learning_rate": 0.00013247255598429378,
+ "loss": 0.9294,
+ "step": 1183
+ },
+ {
+ "epoch": 1.1843330936825982,
+ "grad_norm": 0.5185163617134094,
+ "learning_rate": 0.00013237339420583212,
+ "loss": 0.9491,
+ "step": 1184
+ },
+ {
+ "epoch": 1.185333375011722,
+ "grad_norm": 0.49349021911621094,
+ "learning_rate": 0.00013227419685486492,
+ "loss": 0.812,
+ "step": 1185
+ },
+ {
+ "epoch": 1.186333656340846,
+ "grad_norm": 0.5210988521575928,
+ "learning_rate": 0.00013217496404039218,
+ "loss": 1.1228,
+ "step": 1186
+ },
+ {
+ "epoch": 1.1873339376699696,
+ "grad_norm": 0.46139585971832275,
+ "learning_rate": 0.0001320756958714528,
+ "loss": 0.7623,
+ "step": 1187
+ },
+ {
+ "epoch": 1.1883342189990935,
+ "grad_norm": 0.5365749597549438,
+ "learning_rate": 0.00013197639245712454,
+ "loss": 1.0785,
+ "step": 1188
+ },
+ {
+ "epoch": 1.1893345003282172,
+ "grad_norm": 0.4624418616294861,
+ "learning_rate": 0.00013187705390652388,
+ "loss": 1.0245,
+ "step": 1189
+ },
+ {
+ "epoch": 1.190334781657341,
+ "grad_norm": 0.4919735789299011,
+ "learning_rate": 0.00013177768032880593,
+ "loss": 0.9078,
+ "step": 1190
+ },
+ {
+ "epoch": 1.191335062986465,
+ "grad_norm": 0.5049088597297668,
+ "learning_rate": 0.0001316782718331643,
+ "loss": 0.8884,
+ "step": 1191
+ },
+ {
+ "epoch": 1.1923353443155889,
+ "grad_norm": 0.47496137022972107,
+ "learning_rate": 0.0001315788285288309,
+ "loss": 0.9414,
+ "step": 1192
+ },
+ {
+ "epoch": 1.1933356256447125,
+ "grad_norm": 0.4913059175014496,
+ "learning_rate": 0.00013147935052507597,
+ "loss": 0.8762,
+ "step": 1193
+ },
+ {
+ "epoch": 1.1943359069738364,
+ "grad_norm": 0.5643580555915833,
+ "learning_rate": 0.00013137983793120786,
+ "loss": 0.9556,
+ "step": 1194
+ },
+ {
+ "epoch": 1.1953361883029603,
+ "grad_norm": 0.5032216310501099,
+ "learning_rate": 0.0001312802908565729,
+ "loss": 1.1547,
+ "step": 1195
+ },
+ {
+ "epoch": 1.196336469632084,
+ "grad_norm": 0.5721387267112732,
+ "learning_rate": 0.0001311807094105553,
+ "loss": 0.97,
+ "step": 1196
+ },
+ {
+ "epoch": 1.1973367509612078,
+ "grad_norm": 0.47524675726890564,
+ "learning_rate": 0.00013108109370257712,
+ "loss": 0.9953,
+ "step": 1197
+ },
+ {
+ "epoch": 1.1983370322903317,
+ "grad_norm": 0.5769131183624268,
+ "learning_rate": 0.00013098144384209796,
+ "loss": 1.0578,
+ "step": 1198
+ },
+ {
+ "epoch": 1.1993373136194554,
+ "grad_norm": 0.4861721694469452,
+ "learning_rate": 0.000130881759938615,
+ "loss": 0.7542,
+ "step": 1199
+ },
+ {
+ "epoch": 1.2003375949485793,
+ "grad_norm": 0.4798511266708374,
+ "learning_rate": 0.00013078204210166278,
+ "loss": 0.9024,
+ "step": 1200
+ },
+ {
+ "epoch": 1.2013378762777032,
+ "grad_norm": 0.4447210729122162,
+ "learning_rate": 0.00013068229044081324,
+ "loss": 0.9703,
+ "step": 1201
+ },
+ {
+ "epoch": 1.2023381576068268,
+ "grad_norm": 0.5221365690231323,
+ "learning_rate": 0.0001305825050656754,
+ "loss": 1.0575,
+ "step": 1202
+ },
+ {
+ "epoch": 1.2033384389359507,
+ "grad_norm": 0.44786536693573,
+ "learning_rate": 0.00013048268608589533,
+ "loss": 0.9047,
+ "step": 1203
+ },
+ {
+ "epoch": 1.2043387202650746,
+ "grad_norm": 0.44534093141555786,
+ "learning_rate": 0.00013038283361115603,
+ "loss": 0.9156,
+ "step": 1204
+ },
+ {
+ "epoch": 1.2053390015941985,
+ "grad_norm": 0.5345563292503357,
+ "learning_rate": 0.0001302829477511773,
+ "loss": 0.9933,
+ "step": 1205
+ },
+ {
+ "epoch": 1.2063392829233222,
+ "grad_norm": 0.49175193905830383,
+ "learning_rate": 0.0001301830286157157,
+ "loss": 0.84,
+ "step": 1206
+ },
+ {
+ "epoch": 1.207339564252446,
+ "grad_norm": 0.5271350145339966,
+ "learning_rate": 0.0001300830763145642,
+ "loss": 0.8739,
+ "step": 1207
+ },
+ {
+ "epoch": 1.2083398455815697,
+ "grad_norm": 0.4891369342803955,
+ "learning_rate": 0.00012998309095755235,
+ "loss": 0.9923,
+ "step": 1208
+ },
+ {
+ "epoch": 1.2093401269106936,
+ "grad_norm": 0.44362354278564453,
+ "learning_rate": 0.00012988307265454597,
+ "loss": 0.911,
+ "step": 1209
+ },
+ {
+ "epoch": 1.2103404082398175,
+ "grad_norm": 0.46026211977005005,
+ "learning_rate": 0.0001297830215154471,
+ "loss": 0.8749,
+ "step": 1210
+ },
+ {
+ "epoch": 1.2113406895689414,
+ "grad_norm": 0.49236229062080383,
+ "learning_rate": 0.00012968293765019384,
+ "loss": 0.8959,
+ "step": 1211
+ },
+ {
+ "epoch": 1.212340970898065,
+ "grad_norm": 0.5326531529426575,
+ "learning_rate": 0.00012958282116876026,
+ "loss": 1.0464,
+ "step": 1212
+ },
+ {
+ "epoch": 1.213341252227189,
+ "grad_norm": 0.4658203721046448,
+ "learning_rate": 0.00012948267218115624,
+ "loss": 0.8895,
+ "step": 1213
+ },
+ {
+ "epoch": 1.2143415335563128,
+ "grad_norm": 0.5042040348052979,
+ "learning_rate": 0.00012938249079742743,
+ "loss": 0.889,
+ "step": 1214
+ },
+ {
+ "epoch": 1.2153418148854365,
+ "grad_norm": 0.5408799648284912,
+ "learning_rate": 0.00012928227712765504,
+ "loss": 0.9974,
+ "step": 1215
+ },
+ {
+ "epoch": 1.2163420962145604,
+ "grad_norm": 0.7056695818901062,
+ "learning_rate": 0.0001291820312819558,
+ "loss": 0.8644,
+ "step": 1216
+ },
+ {
+ "epoch": 1.2173423775436842,
+ "grad_norm": 0.5424172878265381,
+ "learning_rate": 0.00012908175337048174,
+ "loss": 1.0855,
+ "step": 1217
+ },
+ {
+ "epoch": 1.218342658872808,
+ "grad_norm": 0.4773527681827545,
+ "learning_rate": 0.00012898144350342015,
+ "loss": 1.014,
+ "step": 1218
+ },
+ {
+ "epoch": 1.2193429402019318,
+ "grad_norm": 0.5538880228996277,
+ "learning_rate": 0.0001288811017909934,
+ "loss": 1.0491,
+ "step": 1219
+ },
+ {
+ "epoch": 1.2203432215310557,
+ "grad_norm": 0.4497896730899811,
+ "learning_rate": 0.00012878072834345895,
+ "loss": 0.8591,
+ "step": 1220
+ },
+ {
+ "epoch": 1.2213435028601793,
+ "grad_norm": 0.5487242341041565,
+ "learning_rate": 0.00012868032327110904,
+ "loss": 0.9809,
+ "step": 1221
+ },
+ {
+ "epoch": 1.2223437841893032,
+ "grad_norm": 0.5900948643684387,
+ "learning_rate": 0.00012857988668427066,
+ "loss": 1.1435,
+ "step": 1222
+ },
+ {
+ "epoch": 1.2233440655184271,
+ "grad_norm": 0.5471523404121399,
+ "learning_rate": 0.0001284794186933055,
+ "loss": 1.0088,
+ "step": 1223
+ },
+ {
+ "epoch": 1.2243443468475508,
+ "grad_norm": 0.4625445604324341,
+ "learning_rate": 0.00012837891940860972,
+ "loss": 1.0452,
+ "step": 1224
+ },
+ {
+ "epoch": 1.2253446281766747,
+ "grad_norm": 0.4972693920135498,
+ "learning_rate": 0.00012827838894061377,
+ "loss": 1.0403,
+ "step": 1225
+ },
+ {
+ "epoch": 1.2263449095057986,
+ "grad_norm": 0.4823111295700073,
+ "learning_rate": 0.00012817782739978255,
+ "loss": 0.9439,
+ "step": 1226
+ },
+ {
+ "epoch": 1.2273451908349222,
+ "grad_norm": 0.5163894295692444,
+ "learning_rate": 0.00012807723489661495,
+ "loss": 1.031,
+ "step": 1227
+ },
+ {
+ "epoch": 1.228345472164046,
+ "grad_norm": 0.5085253119468689,
+ "learning_rate": 0.00012797661154164395,
+ "loss": 0.998,
+ "step": 1228
+ },
+ {
+ "epoch": 1.22934575349317,
+ "grad_norm": 0.4469011425971985,
+ "learning_rate": 0.00012787595744543647,
+ "loss": 0.8943,
+ "step": 1229
+ },
+ {
+ "epoch": 1.2303460348222939,
+ "grad_norm": 0.5117391347885132,
+ "learning_rate": 0.00012777527271859307,
+ "loss": 0.9817,
+ "step": 1230
+ },
+ {
+ "epoch": 1.2313463161514175,
+ "grad_norm": 0.44259950518608093,
+ "learning_rate": 0.0001276745574717481,
+ "loss": 0.7659,
+ "step": 1231
+ },
+ {
+ "epoch": 1.2323465974805414,
+ "grad_norm": 0.42978596687316895,
+ "learning_rate": 0.00012757381181556943,
+ "loss": 0.7313,
+ "step": 1232
+ },
+ {
+ "epoch": 1.2333468788096653,
+ "grad_norm": 0.5619105696678162,
+ "learning_rate": 0.0001274730358607583,
+ "loss": 0.9881,
+ "step": 1233
+ },
+ {
+ "epoch": 1.234347160138789,
+ "grad_norm": 0.5065141916275024,
+ "learning_rate": 0.00012737222971804924,
+ "loss": 0.9789,
+ "step": 1234
+ },
+ {
+ "epoch": 1.2353474414679129,
+ "grad_norm": 0.514705240726471,
+ "learning_rate": 0.00012727139349821,
+ "loss": 0.9278,
+ "step": 1235
+ },
+ {
+ "epoch": 1.2363477227970368,
+ "grad_norm": 0.48272448778152466,
+ "learning_rate": 0.0001271705273120413,
+ "loss": 0.9011,
+ "step": 1236
+ },
+ {
+ "epoch": 1.2373480041261604,
+ "grad_norm": 0.4993284344673157,
+ "learning_rate": 0.00012706963127037685,
+ "loss": 0.8341,
+ "step": 1237
+ },
+ {
+ "epoch": 1.2383482854552843,
+ "grad_norm": 0.44701850414276123,
+ "learning_rate": 0.00012696870548408316,
+ "loss": 0.8481,
+ "step": 1238
+ },
+ {
+ "epoch": 1.2393485667844082,
+ "grad_norm": 0.5611200332641602,
+ "learning_rate": 0.00012686775006405946,
+ "loss": 1.101,
+ "step": 1239
+ },
+ {
+ "epoch": 1.2403488481135319,
+ "grad_norm": 0.4962129592895508,
+ "learning_rate": 0.00012676676512123747,
+ "loss": 0.951,
+ "step": 1240
+ },
+ {
+ "epoch": 1.2413491294426557,
+ "grad_norm": 0.5547065734863281,
+ "learning_rate": 0.00012666575076658134,
+ "loss": 1.0228,
+ "step": 1241
+ },
+ {
+ "epoch": 1.2423494107717796,
+ "grad_norm": 0.5761319398880005,
+ "learning_rate": 0.00012656470711108764,
+ "loss": 1.0631,
+ "step": 1242
+ },
+ {
+ "epoch": 1.2433496921009033,
+ "grad_norm": 0.5202417969703674,
+ "learning_rate": 0.00012646363426578505,
+ "loss": 0.9623,
+ "step": 1243
+ },
+ {
+ "epoch": 1.2443499734300272,
+ "grad_norm": 0.561244547367096,
+ "learning_rate": 0.0001263625323417343,
+ "loss": 1.1666,
+ "step": 1244
+ },
+ {
+ "epoch": 1.245350254759151,
+ "grad_norm": 0.43389594554901123,
+ "learning_rate": 0.0001262614014500282,
+ "loss": 0.9473,
+ "step": 1245
+ },
+ {
+ "epoch": 1.2463505360882747,
+ "grad_norm": 0.5219054222106934,
+ "learning_rate": 0.00012616024170179126,
+ "loss": 1.0181,
+ "step": 1246
+ },
+ {
+ "epoch": 1.2473508174173986,
+ "grad_norm": 0.5179515480995178,
+ "learning_rate": 0.00012605905320817976,
+ "loss": 1.0851,
+ "step": 1247
+ },
+ {
+ "epoch": 1.2483510987465225,
+ "grad_norm": 0.5104801058769226,
+ "learning_rate": 0.00012595783608038155,
+ "loss": 0.9239,
+ "step": 1248
+ },
+ {
+ "epoch": 1.2493513800756464,
+ "grad_norm": 0.46918627619743347,
+ "learning_rate": 0.00012585659042961596,
+ "loss": 0.8361,
+ "step": 1249
+ },
+ {
+ "epoch": 1.25035166140477,
+ "grad_norm": 0.5275365710258484,
+ "learning_rate": 0.00012575531636713368,
+ "loss": 0.9256,
+ "step": 1250
+ },
+ {
+ "epoch": 1.251351942733894,
+ "grad_norm": 0.5006279349327087,
+ "learning_rate": 0.00012565401400421651,
+ "loss": 0.8748,
+ "step": 1251
+ },
+ {
+ "epoch": 1.2523522240630176,
+ "grad_norm": 0.466467022895813,
+ "learning_rate": 0.0001255526834521775,
+ "loss": 0.9217,
+ "step": 1252
+ },
+ {
+ "epoch": 1.2533525053921415,
+ "grad_norm": 0.45304587483406067,
+ "learning_rate": 0.00012545132482236055,
+ "loss": 0.8776,
+ "step": 1253
+ },
+ {
+ "epoch": 1.2543527867212654,
+ "grad_norm": 0.483394980430603,
+ "learning_rate": 0.0001253499382261405,
+ "loss": 0.9421,
+ "step": 1254
+ },
+ {
+ "epoch": 1.2553530680503893,
+ "grad_norm": 0.5117647051811218,
+ "learning_rate": 0.00012524852377492285,
+ "loss": 1.0033,
+ "step": 1255
+ },
+ {
+ "epoch": 1.256353349379513,
+ "grad_norm": 0.5712929964065552,
+ "learning_rate": 0.00012514708158014378,
+ "loss": 1.0216,
+ "step": 1256
+ },
+ {
+ "epoch": 1.2573536307086368,
+ "grad_norm": 0.49368858337402344,
+ "learning_rate": 0.00012504561175326985,
+ "loss": 0.8836,
+ "step": 1257
+ },
+ {
+ "epoch": 1.2583539120377607,
+ "grad_norm": 0.5303272008895874,
+ "learning_rate": 0.00012494411440579814,
+ "loss": 1.0138,
+ "step": 1258
+ },
+ {
+ "epoch": 1.2593541933668844,
+ "grad_norm": 0.47034743428230286,
+ "learning_rate": 0.0001248425896492558,
+ "loss": 0.9346,
+ "step": 1259
+ },
+ {
+ "epoch": 1.2603544746960083,
+ "grad_norm": 0.5398191809654236,
+ "learning_rate": 0.00012474103759520027,
+ "loss": 1.2548,
+ "step": 1260
+ },
+ {
+ "epoch": 1.2613547560251321,
+ "grad_norm": 0.4403116703033447,
+ "learning_rate": 0.00012463945835521878,
+ "loss": 0.8063,
+ "step": 1261
+ },
+ {
+ "epoch": 1.2623550373542558,
+ "grad_norm": 0.5504721999168396,
+ "learning_rate": 0.0001245378520409286,
+ "loss": 1.0888,
+ "step": 1262
+ },
+ {
+ "epoch": 1.2633553186833797,
+ "grad_norm": 0.46984589099884033,
+ "learning_rate": 0.0001244362187639767,
+ "loss": 0.9062,
+ "step": 1263
+ },
+ {
+ "epoch": 1.2643556000125036,
+ "grad_norm": 0.5573250651359558,
+ "learning_rate": 0.00012433455863603967,
+ "loss": 0.9474,
+ "step": 1264
+ },
+ {
+ "epoch": 1.2653558813416272,
+ "grad_norm": 0.5468732714653015,
+ "learning_rate": 0.00012423287176882358,
+ "loss": 0.9424,
+ "step": 1265
+ },
+ {
+ "epoch": 1.2663561626707511,
+ "grad_norm": 0.4921899437904358,
+ "learning_rate": 0.00012413115827406392,
+ "loss": 0.8568,
+ "step": 1266
+ },
+ {
+ "epoch": 1.267356443999875,
+ "grad_norm": 0.48769402503967285,
+ "learning_rate": 0.00012402941826352546,
+ "loss": 0.7579,
+ "step": 1267
+ },
+ {
+ "epoch": 1.268356725328999,
+ "grad_norm": 0.5462141633033752,
+ "learning_rate": 0.00012392765184900202,
+ "loss": 0.9946,
+ "step": 1268
+ },
+ {
+ "epoch": 1.2693570066581226,
+ "grad_norm": 0.5021050572395325,
+ "learning_rate": 0.0001238258591423165,
+ "loss": 0.8603,
+ "step": 1269
+ },
+ {
+ "epoch": 1.2703572879872465,
+ "grad_norm": 0.5272159576416016,
+ "learning_rate": 0.00012372404025532072,
+ "loss": 0.94,
+ "step": 1270
+ },
+ {
+ "epoch": 1.2713575693163701,
+ "grad_norm": 0.5332500338554382,
+ "learning_rate": 0.00012362219529989514,
+ "loss": 1.1609,
+ "step": 1271
+ },
+ {
+ "epoch": 1.272357850645494,
+ "grad_norm": 0.5058136582374573,
+ "learning_rate": 0.00012352032438794902,
+ "loss": 1.0013,
+ "step": 1272
+ },
+ {
+ "epoch": 1.273358131974618,
+ "grad_norm": 0.5055596828460693,
+ "learning_rate": 0.00012341842763142005,
+ "loss": 1.0121,
+ "step": 1273
+ },
+ {
+ "epoch": 1.2743584133037418,
+ "grad_norm": 0.5699402689933777,
+ "learning_rate": 0.00012331650514227425,
+ "loss": 1.1188,
+ "step": 1274
+ },
+ {
+ "epoch": 1.2753586946328654,
+ "grad_norm": 0.511233925819397,
+ "learning_rate": 0.00012321455703250616,
+ "loss": 1.0291,
+ "step": 1275
+ },
+ {
+ "epoch": 1.2763589759619893,
+ "grad_norm": 0.5304299592971802,
+ "learning_rate": 0.00012311258341413822,
+ "loss": 0.9619,
+ "step": 1276
+ },
+ {
+ "epoch": 1.277359257291113,
+ "grad_norm": 0.5318915247917175,
+ "learning_rate": 0.00012301058439922102,
+ "loss": 0.9669,
+ "step": 1277
+ },
+ {
+ "epoch": 1.2783595386202369,
+ "grad_norm": 0.510267436504364,
+ "learning_rate": 0.000122908560099833,
+ "loss": 1.0956,
+ "step": 1278
+ },
+ {
+ "epoch": 1.2793598199493608,
+ "grad_norm": 0.530360758304596,
+ "learning_rate": 0.00012280651062808047,
+ "loss": 1.02,
+ "step": 1279
+ },
+ {
+ "epoch": 1.2803601012784847,
+ "grad_norm": 0.5094459056854248,
+ "learning_rate": 0.00012270443609609729,
+ "loss": 0.9614,
+ "step": 1280
+ },
+ {
+ "epoch": 1.2813603826076083,
+ "grad_norm": 0.4430864453315735,
+ "learning_rate": 0.0001226023366160449,
+ "loss": 0.8188,
+ "step": 1281
+ },
+ {
+ "epoch": 1.2823606639367322,
+ "grad_norm": 0.4705411493778229,
+ "learning_rate": 0.00012250021230011225,
+ "loss": 0.8952,
+ "step": 1282
+ },
+ {
+ "epoch": 1.283360945265856,
+ "grad_norm": 0.5231715440750122,
+ "learning_rate": 0.00012239806326051539,
+ "loss": 0.941,
+ "step": 1283
+ },
+ {
+ "epoch": 1.2843612265949798,
+ "grad_norm": 0.5658493041992188,
+ "learning_rate": 0.00012229588960949771,
+ "loss": 1.0047,
+ "step": 1284
+ },
+ {
+ "epoch": 1.2853615079241036,
+ "grad_norm": 0.6016567349433899,
+ "learning_rate": 0.00012219369145932959,
+ "loss": 1.1764,
+ "step": 1285
+ },
+ {
+ "epoch": 1.2863617892532275,
+ "grad_norm": 0.6365408301353455,
+ "learning_rate": 0.00012209146892230822,
+ "loss": 0.9777,
+ "step": 1286
+ },
+ {
+ "epoch": 1.2873620705823514,
+ "grad_norm": 0.46536219120025635,
+ "learning_rate": 0.00012198922211075778,
+ "loss": 0.9826,
+ "step": 1287
+ },
+ {
+ "epoch": 1.288362351911475,
+ "grad_norm": 0.5130245089530945,
+ "learning_rate": 0.00012188695113702896,
+ "loss": 1.0255,
+ "step": 1288
+ },
+ {
+ "epoch": 1.289362633240599,
+ "grad_norm": 0.5321043133735657,
+ "learning_rate": 0.00012178465611349911,
+ "loss": 0.9973,
+ "step": 1289
+ },
+ {
+ "epoch": 1.2903629145697226,
+ "grad_norm": 0.48580724000930786,
+ "learning_rate": 0.00012168233715257194,
+ "loss": 0.8768,
+ "step": 1290
+ },
+ {
+ "epoch": 1.2913631958988465,
+ "grad_norm": 0.5140405297279358,
+ "learning_rate": 0.00012157999436667747,
+ "loss": 0.8985,
+ "step": 1291
+ },
+ {
+ "epoch": 1.2923634772279704,
+ "grad_norm": 0.4582030773162842,
+ "learning_rate": 0.00012147762786827193,
+ "loss": 0.9693,
+ "step": 1292
+ },
+ {
+ "epoch": 1.2933637585570943,
+ "grad_norm": 0.47397539019584656,
+ "learning_rate": 0.00012137523776983757,
+ "loss": 0.8348,
+ "step": 1293
+ },
+ {
+ "epoch": 1.294364039886218,
+ "grad_norm": 0.43932002782821655,
+ "learning_rate": 0.00012127282418388264,
+ "loss": 0.851,
+ "step": 1294
+ },
+ {
+ "epoch": 1.2953643212153418,
+ "grad_norm": 0.5559205412864685,
+ "learning_rate": 0.0001211703872229411,
+ "loss": 0.86,
+ "step": 1295
+ },
+ {
+ "epoch": 1.2963646025444655,
+ "grad_norm": 0.5433980226516724,
+ "learning_rate": 0.00012106792699957263,
+ "loss": 1.1181,
+ "step": 1296
+ },
+ {
+ "epoch": 1.2973648838735894,
+ "grad_norm": 0.5069502592086792,
+ "learning_rate": 0.00012096544362636255,
+ "loss": 0.9613,
+ "step": 1297
+ },
+ {
+ "epoch": 1.2983651652027133,
+ "grad_norm": 0.5588079690933228,
+ "learning_rate": 0.00012086293721592152,
+ "loss": 1.0741,
+ "step": 1298
+ },
+ {
+ "epoch": 1.2993654465318372,
+ "grad_norm": 0.6035181879997253,
+ "learning_rate": 0.00012076040788088554,
+ "loss": 1.0187,
+ "step": 1299
+ },
+ {
+ "epoch": 1.3003657278609608,
+ "grad_norm": 0.4385228455066681,
+ "learning_rate": 0.00012065785573391581,
+ "loss": 0.9293,
+ "step": 1300
+ },
+ {
+ "epoch": 1.3013660091900847,
+ "grad_norm": 0.5284578800201416,
+ "learning_rate": 0.00012055528088769861,
+ "loss": 0.9479,
+ "step": 1301
+ },
+ {
+ "epoch": 1.3023662905192086,
+ "grad_norm": 0.46655789017677307,
+ "learning_rate": 0.00012045268345494511,
+ "loss": 0.8702,
+ "step": 1302
+ },
+ {
+ "epoch": 1.3033665718483323,
+ "grad_norm": 0.5073155164718628,
+ "learning_rate": 0.00012035006354839133,
+ "loss": 0.8667,
+ "step": 1303
+ },
+ {
+ "epoch": 1.3043668531774562,
+ "grad_norm": 0.5954610109329224,
+ "learning_rate": 0.00012024742128079805,
+ "loss": 1.0998,
+ "step": 1304
+ },
+ {
+ "epoch": 1.30536713450658,
+ "grad_norm": 0.46617114543914795,
+ "learning_rate": 0.00012014475676495052,
+ "loss": 0.8853,
+ "step": 1305
+ },
+ {
+ "epoch": 1.306367415835704,
+ "grad_norm": 0.5705167055130005,
+ "learning_rate": 0.00012004207011365849,
+ "loss": 0.9094,
+ "step": 1306
+ },
+ {
+ "epoch": 1.3073676971648276,
+ "grad_norm": 0.4711546301841736,
+ "learning_rate": 0.00011993936143975599,
+ "loss": 0.9597,
+ "step": 1307
+ },
+ {
+ "epoch": 1.3083679784939515,
+ "grad_norm": 0.5322745442390442,
+ "learning_rate": 0.00011983663085610131,
+ "loss": 0.9221,
+ "step": 1308
+ },
+ {
+ "epoch": 1.3093682598230751,
+ "grad_norm": 0.4769452214241028,
+ "learning_rate": 0.00011973387847557676,
+ "loss": 0.7874,
+ "step": 1309
+ },
+ {
+ "epoch": 1.310368541152199,
+ "grad_norm": 0.5224636793136597,
+ "learning_rate": 0.00011963110441108863,
+ "loss": 0.8233,
+ "step": 1310
+ },
+ {
+ "epoch": 1.311368822481323,
+ "grad_norm": 0.5125696063041687,
+ "learning_rate": 0.000119528308775567,
+ "loss": 0.9894,
+ "step": 1311
+ },
+ {
+ "epoch": 1.3123691038104468,
+ "grad_norm": 0.5573001503944397,
+ "learning_rate": 0.00011942549168196575,
+ "loss": 0.9043,
+ "step": 1312
+ },
+ {
+ "epoch": 1.3133693851395705,
+ "grad_norm": 0.5493408441543579,
+ "learning_rate": 0.00011932265324326221,
+ "loss": 0.964,
+ "step": 1313
+ },
+ {
+ "epoch": 1.3143696664686944,
+ "grad_norm": 0.5327842235565186,
+ "learning_rate": 0.0001192197935724573,
+ "loss": 0.9196,
+ "step": 1314
+ },
+ {
+ "epoch": 1.315369947797818,
+ "grad_norm": 0.5743328332901001,
+ "learning_rate": 0.00011911691278257511,
+ "loss": 1.0504,
+ "step": 1315
+ },
+ {
+ "epoch": 1.316370229126942,
+ "grad_norm": 0.446932315826416,
+ "learning_rate": 0.0001190140109866631,
+ "loss": 0.8425,
+ "step": 1316
+ },
+ {
+ "epoch": 1.3173705104560658,
+ "grad_norm": 0.47306087613105774,
+ "learning_rate": 0.00011891108829779165,
+ "loss": 0.8726,
+ "step": 1317
+ },
+ {
+ "epoch": 1.3183707917851897,
+ "grad_norm": 0.566939115524292,
+ "learning_rate": 0.00011880814482905422,
+ "loss": 0.8747,
+ "step": 1318
+ },
+ {
+ "epoch": 1.3193710731143133,
+ "grad_norm": 0.5145870447158813,
+ "learning_rate": 0.00011870518069356709,
+ "loss": 0.9383,
+ "step": 1319
+ },
+ {
+ "epoch": 1.3203713544434372,
+ "grad_norm": 0.5228437185287476,
+ "learning_rate": 0.0001186021960044692,
+ "loss": 1.103,
+ "step": 1320
+ },
+ {
+ "epoch": 1.3213716357725611,
+ "grad_norm": 0.4844512939453125,
+ "learning_rate": 0.00011849919087492211,
+ "loss": 0.98,
+ "step": 1321
+ },
+ {
+ "epoch": 1.3223719171016848,
+ "grad_norm": 0.5099167227745056,
+ "learning_rate": 0.00011839616541810983,
+ "loss": 0.9023,
+ "step": 1322
+ },
+ {
+ "epoch": 1.3233721984308087,
+ "grad_norm": 0.4702555537223816,
+ "learning_rate": 0.00011829311974723867,
+ "loss": 0.8553,
+ "step": 1323
+ },
+ {
+ "epoch": 1.3243724797599326,
+ "grad_norm": 0.5219053030014038,
+ "learning_rate": 0.00011819005397553723,
+ "loss": 0.9446,
+ "step": 1324
+ },
+ {
+ "epoch": 1.3253727610890562,
+ "grad_norm": 0.48462843894958496,
+ "learning_rate": 0.00011808696821625613,
+ "loss": 0.9591,
+ "step": 1325
+ },
+ {
+ "epoch": 1.32637304241818,
+ "grad_norm": 0.5187227725982666,
+ "learning_rate": 0.000117983862582668,
+ "loss": 0.9413,
+ "step": 1326
+ },
+ {
+ "epoch": 1.327373323747304,
+ "grad_norm": 0.47444605827331543,
+ "learning_rate": 0.00011788073718806725,
+ "loss": 0.8979,
+ "step": 1327
+ },
+ {
+ "epoch": 1.3283736050764277,
+ "grad_norm": 0.5251137018203735,
+ "learning_rate": 0.00011777759214577006,
+ "loss": 1.0449,
+ "step": 1328
+ },
+ {
+ "epoch": 1.3293738864055515,
+ "grad_norm": 0.5007866024971008,
+ "learning_rate": 0.00011767442756911417,
+ "loss": 0.9907,
+ "step": 1329
+ },
+ {
+ "epoch": 1.3303741677346754,
+ "grad_norm": 0.8486194610595703,
+ "learning_rate": 0.00011757124357145881,
+ "loss": 1.0459,
+ "step": 1330
+ },
+ {
+ "epoch": 1.3313744490637993,
+ "grad_norm": 0.5153964161872864,
+ "learning_rate": 0.00011746804026618452,
+ "loss": 0.9911,
+ "step": 1331
+ },
+ {
+ "epoch": 1.332374730392923,
+ "grad_norm": 0.523077666759491,
+ "learning_rate": 0.00011736481776669306,
+ "loss": 1.0571,
+ "step": 1332
+ },
+ {
+ "epoch": 1.3333750117220469,
+ "grad_norm": 0.5242265462875366,
+ "learning_rate": 0.00011726157618640728,
+ "loss": 0.9057,
+ "step": 1333
+ },
+ {
+ "epoch": 1.3343752930511705,
+ "grad_norm": 0.524046778678894,
+ "learning_rate": 0.00011715831563877104,
+ "loss": 1.0413,
+ "step": 1334
+ },
+ {
+ "epoch": 1.3353755743802944,
+ "grad_norm": 0.5873232483863831,
+ "learning_rate": 0.00011705503623724898,
+ "loss": 1.1105,
+ "step": 1335
+ },
+ {
+ "epoch": 1.3363758557094183,
+ "grad_norm": 0.5559434294700623,
+ "learning_rate": 0.00011695173809532652,
+ "loss": 0.9045,
+ "step": 1336
+ },
+ {
+ "epoch": 1.3373761370385422,
+ "grad_norm": 0.5970155000686646,
+ "learning_rate": 0.00011684842132650957,
+ "loss": 1.1663,
+ "step": 1337
+ },
+ {
+ "epoch": 1.3383764183676659,
+ "grad_norm": 0.5005142092704773,
+ "learning_rate": 0.00011674508604432464,
+ "loss": 1.0695,
+ "step": 1338
+ },
+ {
+ "epoch": 1.3393766996967897,
+ "grad_norm": 0.49226582050323486,
+ "learning_rate": 0.00011664173236231848,
+ "loss": 1.0875,
+ "step": 1339
+ },
+ {
+ "epoch": 1.3403769810259134,
+ "grad_norm": 0.4792287349700928,
+ "learning_rate": 0.0001165383603940581,
+ "loss": 0.9102,
+ "step": 1340
+ },
+ {
+ "epoch": 1.3413772623550373,
+ "grad_norm": 0.4332147538661957,
+ "learning_rate": 0.00011643497025313061,
+ "loss": 0.8948,
+ "step": 1341
+ },
+ {
+ "epoch": 1.3423775436841612,
+ "grad_norm": 0.45502984523773193,
+ "learning_rate": 0.00011633156205314309,
+ "loss": 0.8538,
+ "step": 1342
+ },
+ {
+ "epoch": 1.343377825013285,
+ "grad_norm": 0.5594006776809692,
+ "learning_rate": 0.00011622813590772244,
+ "loss": 1.0178,
+ "step": 1343
+ },
+ {
+ "epoch": 1.3443781063424087,
+ "grad_norm": 0.4428876042366028,
+ "learning_rate": 0.00011612469193051525,
+ "loss": 0.856,
+ "step": 1344
+ },
+ {
+ "epoch": 1.3453783876715326,
+ "grad_norm": 0.4615425169467926,
+ "learning_rate": 0.00011602123023518779,
+ "loss": 0.8568,
+ "step": 1345
+ },
+ {
+ "epoch": 1.3463786690006565,
+ "grad_norm": 0.543389618396759,
+ "learning_rate": 0.00011591775093542572,
+ "loss": 0.8293,
+ "step": 1346
+ },
+ {
+ "epoch": 1.3473789503297802,
+ "grad_norm": 0.4740433394908905,
+ "learning_rate": 0.0001158142541449341,
+ "loss": 0.9163,
+ "step": 1347
+ },
+ {
+ "epoch": 1.348379231658904,
+ "grad_norm": 0.47938287258148193,
+ "learning_rate": 0.00011571073997743716,
+ "loss": 0.9745,
+ "step": 1348
+ },
+ {
+ "epoch": 1.349379512988028,
+ "grad_norm": 0.47510263323783875,
+ "learning_rate": 0.0001156072085466783,
+ "loss": 0.9536,
+ "step": 1349
+ },
+ {
+ "epoch": 1.3503797943171518,
+ "grad_norm": 0.5921860933303833,
+ "learning_rate": 0.00011550365996641979,
+ "loss": 0.8397,
+ "step": 1350
+ },
+ {
+ "epoch": 1.3513800756462755,
+ "grad_norm": 0.5436375737190247,
+ "learning_rate": 0.00011540009435044281,
+ "loss": 0.9381,
+ "step": 1351
+ },
+ {
+ "epoch": 1.3523803569753994,
+ "grad_norm": 0.4591434597969055,
+ "learning_rate": 0.00011529651181254723,
+ "loss": 1.0771,
+ "step": 1352
+ },
+ {
+ "epoch": 1.353380638304523,
+ "grad_norm": 0.533069372177124,
+ "learning_rate": 0.0001151929124665516,
+ "loss": 0.9103,
+ "step": 1353
+ },
+ {
+ "epoch": 1.354380919633647,
+ "grad_norm": 0.538324773311615,
+ "learning_rate": 0.00011508929642629274,
+ "loss": 1.0469,
+ "step": 1354
+ },
+ {
+ "epoch": 1.3553812009627708,
+ "grad_norm": 0.46198832988739014,
+ "learning_rate": 0.00011498566380562601,
+ "loss": 0.8242,
+ "step": 1355
+ },
+ {
+ "epoch": 1.3563814822918947,
+ "grad_norm": 0.573716402053833,
+ "learning_rate": 0.0001148820147184249,
+ "loss": 0.9437,
+ "step": 1356
+ },
+ {
+ "epoch": 1.3573817636210184,
+ "grad_norm": 0.5638802647590637,
+ "learning_rate": 0.00011477834927858104,
+ "loss": 0.9336,
+ "step": 1357
+ },
+ {
+ "epoch": 1.3583820449501423,
+ "grad_norm": 0.48780402541160583,
+ "learning_rate": 0.00011467466760000399,
+ "loss": 0.8859,
+ "step": 1358
+ },
+ {
+ "epoch": 1.359382326279266,
+ "grad_norm": 0.5441538095474243,
+ "learning_rate": 0.00011457096979662114,
+ "loss": 0.8804,
+ "step": 1359
+ },
+ {
+ "epoch": 1.3603826076083898,
+ "grad_norm": 0.5250831842422485,
+ "learning_rate": 0.00011446725598237767,
+ "loss": 0.9739,
+ "step": 1360
+ },
+ {
+ "epoch": 1.3613828889375137,
+ "grad_norm": 0.49177756905555725,
+ "learning_rate": 0.00011436352627123623,
+ "loss": 0.9586,
+ "step": 1361
+ },
+ {
+ "epoch": 1.3623831702666376,
+ "grad_norm": 0.5866628885269165,
+ "learning_rate": 0.00011425978077717709,
+ "loss": 1.0511,
+ "step": 1362
+ },
+ {
+ "epoch": 1.3633834515957612,
+ "grad_norm": 0.49350351095199585,
+ "learning_rate": 0.00011415601961419775,
+ "loss": 0.9637,
+ "step": 1363
+ },
+ {
+ "epoch": 1.3643837329248851,
+ "grad_norm": 0.5402287244796753,
+ "learning_rate": 0.00011405224289631295,
+ "loss": 1.0008,
+ "step": 1364
+ },
+ {
+ "epoch": 1.365384014254009,
+ "grad_norm": 0.5524907112121582,
+ "learning_rate": 0.00011394845073755455,
+ "loss": 1.0398,
+ "step": 1365
+ },
+ {
+ "epoch": 1.3663842955831327,
+ "grad_norm": 0.49948206543922424,
+ "learning_rate": 0.0001138446432519714,
+ "loss": 0.8577,
+ "step": 1366
+ },
+ {
+ "epoch": 1.3673845769122566,
+ "grad_norm": 0.500592052936554,
+ "learning_rate": 0.00011374082055362909,
+ "loss": 1.0053,
+ "step": 1367
+ },
+ {
+ "epoch": 1.3683848582413805,
+ "grad_norm": 0.4469926357269287,
+ "learning_rate": 0.00011363698275661001,
+ "loss": 0.8081,
+ "step": 1368
+ },
+ {
+ "epoch": 1.3693851395705043,
+ "grad_norm": 0.4939117431640625,
+ "learning_rate": 0.00011353312997501313,
+ "loss": 0.9559,
+ "step": 1369
+ },
+ {
+ "epoch": 1.370385420899628,
+ "grad_norm": 0.5091076493263245,
+ "learning_rate": 0.00011342926232295386,
+ "loss": 0.8962,
+ "step": 1370
+ },
+ {
+ "epoch": 1.371385702228752,
+ "grad_norm": 0.48055970668792725,
+ "learning_rate": 0.00011332537991456398,
+ "loss": 0.8686,
+ "step": 1371
+ },
+ {
+ "epoch": 1.3723859835578756,
+ "grad_norm": 0.4724258482456207,
+ "learning_rate": 0.00011322148286399147,
+ "loss": 0.8872,
+ "step": 1372
+ },
+ {
+ "epoch": 1.3733862648869994,
+ "grad_norm": 0.4945514500141144,
+ "learning_rate": 0.0001131175712854004,
+ "loss": 0.8766,
+ "step": 1373
+ },
+ {
+ "epoch": 1.3743865462161233,
+ "grad_norm": 0.4784204065799713,
+ "learning_rate": 0.00011301364529297079,
+ "loss": 0.8216,
+ "step": 1374
+ },
+ {
+ "epoch": 1.3753868275452472,
+ "grad_norm": 0.4669654667377472,
+ "learning_rate": 0.0001129097050008985,
+ "loss": 0.98,
+ "step": 1375
+ },
+ {
+ "epoch": 1.3763871088743709,
+ "grad_norm": 0.5275737047195435,
+ "learning_rate": 0.00011280575052339514,
+ "loss": 0.9391,
+ "step": 1376
+ },
+ {
+ "epoch": 1.3773873902034948,
+ "grad_norm": 0.47577112913131714,
+ "learning_rate": 0.00011270178197468789,
+ "loss": 0.8956,
+ "step": 1377
+ },
+ {
+ "epoch": 1.3783876715326184,
+ "grad_norm": 0.49086448550224304,
+ "learning_rate": 0.00011259779946901934,
+ "loss": 1.0058,
+ "step": 1378
+ },
+ {
+ "epoch": 1.3793879528617423,
+ "grad_norm": 0.5351247191429138,
+ "learning_rate": 0.0001124938031206475,
+ "loss": 1.0215,
+ "step": 1379
+ },
+ {
+ "epoch": 1.3803882341908662,
+ "grad_norm": 0.5512630343437195,
+ "learning_rate": 0.00011238979304384554,
+ "loss": 1.0254,
+ "step": 1380
+ },
+ {
+ "epoch": 1.38138851551999,
+ "grad_norm": 0.5598354339599609,
+ "learning_rate": 0.0001122857693529017,
+ "loss": 0.8707,
+ "step": 1381
+ },
+ {
+ "epoch": 1.3823887968491138,
+ "grad_norm": 0.5506719946861267,
+ "learning_rate": 0.0001121817321621192,
+ "loss": 0.9061,
+ "step": 1382
+ },
+ {
+ "epoch": 1.3833890781782376,
+ "grad_norm": 0.5244742035865784,
+ "learning_rate": 0.00011207768158581613,
+ "loss": 1.0017,
+ "step": 1383
+ },
+ {
+ "epoch": 1.3843893595073615,
+ "grad_norm": 0.480194091796875,
+ "learning_rate": 0.00011197361773832525,
+ "loss": 0.8132,
+ "step": 1384
+ },
+ {
+ "epoch": 1.3853896408364852,
+ "grad_norm": 0.5409587025642395,
+ "learning_rate": 0.00011186954073399387,
+ "loss": 1.0724,
+ "step": 1385
+ },
+ {
+ "epoch": 1.386389922165609,
+ "grad_norm": 0.5776751041412354,
+ "learning_rate": 0.00011176545068718385,
+ "loss": 0.9577,
+ "step": 1386
+ },
+ {
+ "epoch": 1.387390203494733,
+ "grad_norm": 0.4478171765804291,
+ "learning_rate": 0.0001116613477122713,
+ "loss": 0.7698,
+ "step": 1387
+ },
+ {
+ "epoch": 1.3883904848238566,
+ "grad_norm": 0.5580281615257263,
+ "learning_rate": 0.00011155723192364658,
+ "loss": 1.0065,
+ "step": 1388
+ },
+ {
+ "epoch": 1.3893907661529805,
+ "grad_norm": 0.5318020582199097,
+ "learning_rate": 0.00011145310343571411,
+ "loss": 0.9155,
+ "step": 1389
+ },
+ {
+ "epoch": 1.3903910474821044,
+ "grad_norm": 0.45960649847984314,
+ "learning_rate": 0.00011134896236289224,
+ "loss": 0.848,
+ "step": 1390
+ },
+ {
+ "epoch": 1.391391328811228,
+ "grad_norm": 0.49986693263053894,
+ "learning_rate": 0.0001112448088196132,
+ "loss": 1.0222,
+ "step": 1391
+ },
+ {
+ "epoch": 1.392391610140352,
+ "grad_norm": 0.6470636129379272,
+ "learning_rate": 0.00011114064292032282,
+ "loss": 0.8976,
+ "step": 1392
+ },
+ {
+ "epoch": 1.3933918914694758,
+ "grad_norm": 0.49885210394859314,
+ "learning_rate": 0.0001110364647794807,
+ "loss": 0.8872,
+ "step": 1393
+ },
+ {
+ "epoch": 1.3943921727985997,
+ "grad_norm": 0.48183003067970276,
+ "learning_rate": 0.00011093227451155974,
+ "loss": 0.7506,
+ "step": 1394
+ },
+ {
+ "epoch": 1.3953924541277234,
+ "grad_norm": 0.47776031494140625,
+ "learning_rate": 0.0001108280722310462,
+ "loss": 0.9945,
+ "step": 1395
+ },
+ {
+ "epoch": 1.3963927354568473,
+ "grad_norm": 0.5032552480697632,
+ "learning_rate": 0.0001107238580524395,
+ "loss": 0.9844,
+ "step": 1396
+ },
+ {
+ "epoch": 1.397393016785971,
+ "grad_norm": 0.5641827583312988,
+ "learning_rate": 0.00011061963209025223,
+ "loss": 0.9862,
+ "step": 1397
+ },
+ {
+ "epoch": 1.3983932981150948,
+ "grad_norm": 0.45950955152511597,
+ "learning_rate": 0.00011051539445900983,
+ "loss": 0.9878,
+ "step": 1398
+ },
+ {
+ "epoch": 1.3993935794442187,
+ "grad_norm": 0.48625022172927856,
+ "learning_rate": 0.00011041114527325065,
+ "loss": 0.9446,
+ "step": 1399
+ },
+ {
+ "epoch": 1.4003938607733426,
+ "grad_norm": 0.5851911902427673,
+ "learning_rate": 0.00011030688464752566,
+ "loss": 1.1538,
+ "step": 1400
+ },
+ {
+ "epoch": 1.4013941421024663,
+ "grad_norm": 0.45012837648391724,
+ "learning_rate": 0.00011020261269639842,
+ "loss": 0.8871,
+ "step": 1401
+ },
+ {
+ "epoch": 1.4023944234315902,
+ "grad_norm": 0.4794975221157074,
+ "learning_rate": 0.000110098329534445,
+ "loss": 0.912,
+ "step": 1402
+ },
+ {
+ "epoch": 1.4033947047607138,
+ "grad_norm": 0.5397909879684448,
+ "learning_rate": 0.00010999403527625367,
+ "loss": 1.015,
+ "step": 1403
+ },
+ {
+ "epoch": 1.4043949860898377,
+ "grad_norm": 0.5413039922714233,
+ "learning_rate": 0.00010988973003642499,
+ "loss": 1.0111,
+ "step": 1404
+ },
+ {
+ "epoch": 1.4053952674189616,
+ "grad_norm": 0.48752084374427795,
+ "learning_rate": 0.00010978541392957156,
+ "loss": 0.8649,
+ "step": 1405
+ },
+ {
+ "epoch": 1.4063955487480855,
+ "grad_norm": 0.5576539635658264,
+ "learning_rate": 0.00010968108707031792,
+ "loss": 0.8334,
+ "step": 1406
+ },
+ {
+ "epoch": 1.4073958300772091,
+ "grad_norm": 0.5292769074440002,
+ "learning_rate": 0.00010957674957330042,
+ "loss": 1.0312,
+ "step": 1407
+ },
+ {
+ "epoch": 1.408396111406333,
+ "grad_norm": 0.5971432328224182,
+ "learning_rate": 0.00010947240155316707,
+ "loss": 0.9367,
+ "step": 1408
+ },
+ {
+ "epoch": 1.409396392735457,
+ "grad_norm": 0.5620018839836121,
+ "learning_rate": 0.00010936804312457749,
+ "loss": 0.9493,
+ "step": 1409
+ },
+ {
+ "epoch": 1.4103966740645806,
+ "grad_norm": 0.456496000289917,
+ "learning_rate": 0.00010926367440220276,
+ "loss": 0.8532,
+ "step": 1410
+ },
+ {
+ "epoch": 1.4113969553937045,
+ "grad_norm": 0.47393882274627686,
+ "learning_rate": 0.00010915929550072517,
+ "loss": 0.8073,
+ "step": 1411
+ },
+ {
+ "epoch": 1.4123972367228284,
+ "grad_norm": 0.5321446061134338,
+ "learning_rate": 0.00010905490653483827,
+ "loss": 1.1076,
+ "step": 1412
+ },
+ {
+ "epoch": 1.4133975180519522,
+ "grad_norm": 0.4768468141555786,
+ "learning_rate": 0.00010895050761924668,
+ "loss": 0.9466,
+ "step": 1413
+ },
+ {
+ "epoch": 1.414397799381076,
+ "grad_norm": 0.5629300475120544,
+ "learning_rate": 0.00010884609886866588,
+ "loss": 1.0541,
+ "step": 1414
+ },
+ {
+ "epoch": 1.4153980807101998,
+ "grad_norm": 0.45907631516456604,
+ "learning_rate": 0.00010874168039782227,
+ "loss": 0.9156,
+ "step": 1415
+ },
+ {
+ "epoch": 1.4163983620393235,
+ "grad_norm": 0.5152727961540222,
+ "learning_rate": 0.00010863725232145286,
+ "loss": 1.0495,
+ "step": 1416
+ },
+ {
+ "epoch": 1.4173986433684473,
+ "grad_norm": 0.511647641658783,
+ "learning_rate": 0.00010853281475430517,
+ "loss": 0.7327,
+ "step": 1417
+ },
+ {
+ "epoch": 1.4183989246975712,
+ "grad_norm": 0.6430179476737976,
+ "learning_rate": 0.0001084283678111372,
+ "loss": 0.9831,
+ "step": 1418
+ },
+ {
+ "epoch": 1.4193992060266951,
+ "grad_norm": 0.5592547059059143,
+ "learning_rate": 0.00010832391160671729,
+ "loss": 0.9462,
+ "step": 1419
+ },
+ {
+ "epoch": 1.4203994873558188,
+ "grad_norm": 0.5079266428947449,
+ "learning_rate": 0.00010821944625582392,
+ "loss": 1.0473,
+ "step": 1420
+ },
+ {
+ "epoch": 1.4213997686849427,
+ "grad_norm": 0.5006073713302612,
+ "learning_rate": 0.00010811497187324555,
+ "loss": 0.8077,
+ "step": 1421
+ },
+ {
+ "epoch": 1.4224000500140663,
+ "grad_norm": 0.47260841727256775,
+ "learning_rate": 0.00010801048857378071,
+ "loss": 0.8069,
+ "step": 1422
+ },
+ {
+ "epoch": 1.4234003313431902,
+ "grad_norm": 0.5051037669181824,
+ "learning_rate": 0.00010790599647223763,
+ "loss": 1.0241,
+ "step": 1423
+ },
+ {
+ "epoch": 1.424400612672314,
+ "grad_norm": 0.5116690397262573,
+ "learning_rate": 0.0001078014956834342,
+ "loss": 1.0377,
+ "step": 1424
+ },
+ {
+ "epoch": 1.425400894001438,
+ "grad_norm": 0.48974907398223877,
+ "learning_rate": 0.00010769698632219794,
+ "loss": 1.0578,
+ "step": 1425
+ },
+ {
+ "epoch": 1.4264011753305617,
+ "grad_norm": 0.5071999430656433,
+ "learning_rate": 0.00010759246850336572,
+ "loss": 0.9072,
+ "step": 1426
+ },
+ {
+ "epoch": 1.4274014566596855,
+ "grad_norm": 0.6418463587760925,
+ "learning_rate": 0.0001074879423417837,
+ "loss": 1.1195,
+ "step": 1427
+ },
+ {
+ "epoch": 1.4284017379888094,
+ "grad_norm": 0.4854032099246979,
+ "learning_rate": 0.00010738340795230721,
+ "loss": 1.0776,
+ "step": 1428
+ },
+ {
+ "epoch": 1.429402019317933,
+ "grad_norm": 0.5330777764320374,
+ "learning_rate": 0.00010727886544980068,
+ "loss": 1.0851,
+ "step": 1429
+ },
+ {
+ "epoch": 1.430402300647057,
+ "grad_norm": 0.5281643271446228,
+ "learning_rate": 0.00010717431494913741,
+ "loss": 0.8663,
+ "step": 1430
+ },
+ {
+ "epoch": 1.4314025819761809,
+ "grad_norm": 0.47898662090301514,
+ "learning_rate": 0.00010706975656519946,
+ "loss": 0.9926,
+ "step": 1431
+ },
+ {
+ "epoch": 1.4324028633053048,
+ "grad_norm": 0.43927934765815735,
+ "learning_rate": 0.00010696519041287765,
+ "loss": 0.8698,
+ "step": 1432
+ },
+ {
+ "epoch": 1.4334031446344284,
+ "grad_norm": 0.5207253694534302,
+ "learning_rate": 0.0001068606166070712,
+ "loss": 0.9795,
+ "step": 1433
+ },
+ {
+ "epoch": 1.4344034259635523,
+ "grad_norm": 0.5264511704444885,
+ "learning_rate": 0.00010675603526268785,
+ "loss": 0.9593,
+ "step": 1434
+ },
+ {
+ "epoch": 1.435403707292676,
+ "grad_norm": 0.5435792803764343,
+ "learning_rate": 0.00010665144649464356,
+ "loss": 0.9436,
+ "step": 1435
+ },
+ {
+ "epoch": 1.4364039886217999,
+ "grad_norm": 0.5383104681968689,
+ "learning_rate": 0.00010654685041786249,
+ "loss": 0.9569,
+ "step": 1436
+ },
+ {
+ "epoch": 1.4374042699509237,
+ "grad_norm": 0.48762592673301697,
+ "learning_rate": 0.00010644224714727681,
+ "loss": 0.9235,
+ "step": 1437
+ },
+ {
+ "epoch": 1.4384045512800476,
+ "grad_norm": 0.4815019965171814,
+ "learning_rate": 0.0001063376367978266,
+ "loss": 0.8241,
+ "step": 1438
+ },
+ {
+ "epoch": 1.4394048326091713,
+ "grad_norm": 0.4944337010383606,
+ "learning_rate": 0.00010623301948445971,
+ "loss": 0.9169,
+ "step": 1439
+ },
+ {
+ "epoch": 1.4404051139382952,
+ "grad_norm": 0.5658552646636963,
+ "learning_rate": 0.00010612839532213164,
+ "loss": 1.044,
+ "step": 1440
+ },
+ {
+ "epoch": 1.4414053952674188,
+ "grad_norm": 0.5688045620918274,
+ "learning_rate": 0.00010602376442580544,
+ "loss": 0.9684,
+ "step": 1441
+ },
+ {
+ "epoch": 1.4424056765965427,
+ "grad_norm": 0.5434709787368774,
+ "learning_rate": 0.00010591912691045152,
+ "loss": 0.8741,
+ "step": 1442
+ },
+ {
+ "epoch": 1.4434059579256666,
+ "grad_norm": 0.583562433719635,
+ "learning_rate": 0.00010581448289104758,
+ "loss": 1.1651,
+ "step": 1443
+ },
+ {
+ "epoch": 1.4444062392547905,
+ "grad_norm": 0.566363513469696,
+ "learning_rate": 0.00010570983248257853,
+ "loss": 1.0091,
+ "step": 1444
+ },
+ {
+ "epoch": 1.4454065205839142,
+ "grad_norm": 0.527039647102356,
+ "learning_rate": 0.00010560517580003617,
+ "loss": 1.0666,
+ "step": 1445
+ },
+ {
+ "epoch": 1.446406801913038,
+ "grad_norm": 0.46389803290367126,
+ "learning_rate": 0.00010550051295841931,
+ "loss": 0.9344,
+ "step": 1446
+ },
+ {
+ "epoch": 1.447407083242162,
+ "grad_norm": 0.6291074752807617,
+ "learning_rate": 0.00010539584407273349,
+ "loss": 1.0388,
+ "step": 1447
+ },
+ {
+ "epoch": 1.4484073645712856,
+ "grad_norm": 0.5249356031417847,
+ "learning_rate": 0.00010529116925799085,
+ "loss": 0.97,
+ "step": 1448
+ },
+ {
+ "epoch": 1.4494076459004095,
+ "grad_norm": 0.4662008583545685,
+ "learning_rate": 0.00010518648862921012,
+ "loss": 0.8385,
+ "step": 1449
+ },
+ {
+ "epoch": 1.4504079272295334,
+ "grad_norm": 0.5730600953102112,
+ "learning_rate": 0.00010508180230141635,
+ "loss": 0.8747,
+ "step": 1450
+ },
+ {
+ "epoch": 1.451408208558657,
+ "grad_norm": 0.48082512617111206,
+ "learning_rate": 0.00010497711038964086,
+ "loss": 0.8624,
+ "step": 1451
+ },
+ {
+ "epoch": 1.452408489887781,
+ "grad_norm": 0.48900333046913147,
+ "learning_rate": 0.0001048724130089212,
+ "loss": 0.7826,
+ "step": 1452
+ },
+ {
+ "epoch": 1.4534087712169048,
+ "grad_norm": 0.4998112618923187,
+ "learning_rate": 0.00010476771027430086,
+ "loss": 0.8687,
+ "step": 1453
+ },
+ {
+ "epoch": 1.4544090525460285,
+ "grad_norm": 0.4872112572193146,
+ "learning_rate": 0.00010466300230082911,
+ "loss": 0.9185,
+ "step": 1454
+ },
+ {
+ "epoch": 1.4554093338751524,
+ "grad_norm": 0.5405575633049011,
+ "learning_rate": 0.00010455828920356115,
+ "loss": 0.9601,
+ "step": 1455
+ },
+ {
+ "epoch": 1.4564096152042763,
+ "grad_norm": 0.4496804475784302,
+ "learning_rate": 0.00010445357109755771,
+ "loss": 0.8606,
+ "step": 1456
+ },
+ {
+ "epoch": 1.4574098965334001,
+ "grad_norm": 0.49340635538101196,
+ "learning_rate": 0.00010434884809788508,
+ "loss": 1.1009,
+ "step": 1457
+ },
+ {
+ "epoch": 1.4584101778625238,
+ "grad_norm": 0.4692990481853485,
+ "learning_rate": 0.00010424412031961484,
+ "loss": 0.8011,
+ "step": 1458
+ },
+ {
+ "epoch": 1.4594104591916477,
+ "grad_norm": 0.5027800798416138,
+ "learning_rate": 0.00010413938787782394,
+ "loss": 0.8827,
+ "step": 1459
+ },
+ {
+ "epoch": 1.4604107405207714,
+ "grad_norm": 0.6764587163925171,
+ "learning_rate": 0.00010403465088759437,
+ "loss": 0.8513,
+ "step": 1460
+ },
+ {
+ "epoch": 1.4614110218498952,
+ "grad_norm": 0.558620035648346,
+ "learning_rate": 0.00010392990946401313,
+ "loss": 0.9881,
+ "step": 1461
+ },
+ {
+ "epoch": 1.4624113031790191,
+ "grad_norm": 0.603817343711853,
+ "learning_rate": 0.00010382516372217215,
+ "loss": 0.9869,
+ "step": 1462
+ },
+ {
+ "epoch": 1.463411584508143,
+ "grad_norm": 0.4486953020095825,
+ "learning_rate": 0.000103720413777168,
+ "loss": 0.8933,
+ "step": 1463
+ },
+ {
+ "epoch": 1.4644118658372667,
+ "grad_norm": 0.5756564736366272,
+ "learning_rate": 0.00010361565974410192,
+ "loss": 0.9974,
+ "step": 1464
+ },
+ {
+ "epoch": 1.4654121471663906,
+ "grad_norm": 0.4386444389820099,
+ "learning_rate": 0.00010351090173807969,
+ "loss": 0.8577,
+ "step": 1465
+ },
+ {
+ "epoch": 1.4664124284955142,
+ "grad_norm": 0.5308933258056641,
+ "learning_rate": 0.00010340613987421137,
+ "loss": 1.0539,
+ "step": 1466
+ },
+ {
+ "epoch": 1.4674127098246381,
+ "grad_norm": 0.6070798635482788,
+ "learning_rate": 0.00010330137426761135,
+ "loss": 0.9111,
+ "step": 1467
+ },
+ {
+ "epoch": 1.468412991153762,
+ "grad_norm": 0.5870214700698853,
+ "learning_rate": 0.00010319660503339808,
+ "loss": 0.9958,
+ "step": 1468
+ },
+ {
+ "epoch": 1.469413272482886,
+ "grad_norm": 0.5014438629150391,
+ "learning_rate": 0.00010309183228669397,
+ "loss": 0.987,
+ "step": 1469
+ },
+ {
+ "epoch": 1.4704135538120096,
+ "grad_norm": 0.47051525115966797,
+ "learning_rate": 0.00010298705614262532,
+ "loss": 1.0899,
+ "step": 1470
+ },
+ {
+ "epoch": 1.4714138351411334,
+ "grad_norm": 0.5500984787940979,
+ "learning_rate": 0.0001028822767163222,
+ "loss": 0.8882,
+ "step": 1471
+ },
+ {
+ "epoch": 1.4724141164702573,
+ "grad_norm": 0.4973205626010895,
+ "learning_rate": 0.00010277749412291824,
+ "loss": 0.9374,
+ "step": 1472
+ },
+ {
+ "epoch": 1.473414397799381,
+ "grad_norm": 0.4927331209182739,
+ "learning_rate": 0.00010267270847755048,
+ "loss": 0.9608,
+ "step": 1473
+ },
+ {
+ "epoch": 1.4744146791285049,
+ "grad_norm": 0.5539640188217163,
+ "learning_rate": 0.00010256791989535952,
+ "loss": 0.9339,
+ "step": 1474
+ },
+ {
+ "epoch": 1.4754149604576288,
+ "grad_norm": 0.48375800251960754,
+ "learning_rate": 0.00010246312849148899,
+ "loss": 0.8778,
+ "step": 1475
+ },
+ {
+ "epoch": 1.4764152417867527,
+ "grad_norm": 0.522544264793396,
+ "learning_rate": 0.00010235833438108571,
+ "loss": 0.9633,
+ "step": 1476
+ },
+ {
+ "epoch": 1.4774155231158763,
+ "grad_norm": 0.5747688412666321,
+ "learning_rate": 0.00010225353767929944,
+ "loss": 1.0206,
+ "step": 1477
+ },
+ {
+ "epoch": 1.4784158044450002,
+ "grad_norm": 0.4539598226547241,
+ "learning_rate": 0.00010214873850128282,
+ "loss": 0.7895,
+ "step": 1478
+ },
+ {
+ "epoch": 1.4794160857741239,
+ "grad_norm": 0.4290696978569031,
+ "learning_rate": 0.00010204393696219117,
+ "loss": 0.8718,
+ "step": 1479
+ },
+ {
+ "epoch": 1.4804163671032478,
+ "grad_norm": 0.43560928106307983,
+ "learning_rate": 0.00010193913317718244,
+ "loss": 0.8839,
+ "step": 1480
+ },
+ {
+ "epoch": 1.4814166484323716,
+ "grad_norm": 0.4937680661678314,
+ "learning_rate": 0.00010183432726141706,
+ "loss": 0.9615,
+ "step": 1481
+ },
+ {
+ "epoch": 1.4824169297614955,
+ "grad_norm": 0.5631589889526367,
+ "learning_rate": 0.00010172951933005775,
+ "loss": 1.0691,
+ "step": 1482
+ },
+ {
+ "epoch": 1.4834172110906192,
+ "grad_norm": 0.5049973726272583,
+ "learning_rate": 0.00010162470949826948,
+ "loss": 0.9107,
+ "step": 1483
+ },
+ {
+ "epoch": 1.484417492419743,
+ "grad_norm": 0.5362145304679871,
+ "learning_rate": 0.0001015198978812193,
+ "loss": 0.9762,
+ "step": 1484
+ },
+ {
+ "epoch": 1.4854177737488667,
+ "grad_norm": 0.4824192225933075,
+ "learning_rate": 0.00010141508459407623,
+ "loss": 0.8844,
+ "step": 1485
+ },
+ {
+ "epoch": 1.4864180550779906,
+ "grad_norm": 0.5116665959358215,
+ "learning_rate": 0.0001013102697520111,
+ "loss": 0.9461,
+ "step": 1486
+ },
+ {
+ "epoch": 1.4874183364071145,
+ "grad_norm": 0.5244630575180054,
+ "learning_rate": 0.00010120545347019647,
+ "loss": 1.0286,
+ "step": 1487
+ },
+ {
+ "epoch": 1.4884186177362384,
+ "grad_norm": 0.5252584218978882,
+ "learning_rate": 0.00010110063586380646,
+ "loss": 1.1083,
+ "step": 1488
+ },
+ {
+ "epoch": 1.489418899065362,
+ "grad_norm": 0.4909230172634125,
+ "learning_rate": 0.00010099581704801673,
+ "loss": 0.9338,
+ "step": 1489
+ },
+ {
+ "epoch": 1.490419180394486,
+ "grad_norm": 0.5618056654930115,
+ "learning_rate": 0.00010089099713800414,
+ "loss": 1.0513,
+ "step": 1490
+ },
+ {
+ "epoch": 1.4914194617236098,
+ "grad_norm": 0.48737892508506775,
+ "learning_rate": 0.00010078617624894684,
+ "loss": 0.8669,
+ "step": 1491
+ },
+ {
+ "epoch": 1.4924197430527335,
+ "grad_norm": 0.411451131105423,
+ "learning_rate": 0.000100681354496024,
+ "loss": 0.881,
+ "step": 1492
+ },
+ {
+ "epoch": 1.4934200243818574,
+ "grad_norm": 0.5821709632873535,
+ "learning_rate": 0.00010057653199441581,
+ "loss": 0.9359,
+ "step": 1493
+ },
+ {
+ "epoch": 1.4944203057109813,
+ "grad_norm": 0.4621860086917877,
+ "learning_rate": 0.00010047170885930324,
+ "loss": 0.8121,
+ "step": 1494
+ },
+ {
+ "epoch": 1.4954205870401052,
+ "grad_norm": 0.4658668339252472,
+ "learning_rate": 0.00010036688520586788,
+ "loss": 0.9806,
+ "step": 1495
+ },
+ {
+ "epoch": 1.4964208683692288,
+ "grad_norm": 0.49816030263900757,
+ "learning_rate": 0.00010026206114929209,
+ "loss": 0.9124,
+ "step": 1496
+ },
+ {
+ "epoch": 1.4974211496983527,
+ "grad_norm": 0.5228123068809509,
+ "learning_rate": 0.00010015723680475846,
+ "loss": 1.0132,
+ "step": 1497
+ },
+ {
+ "epoch": 1.4984214310274764,
+ "grad_norm": 0.4727514982223511,
+ "learning_rate": 0.00010005241228745004,
+ "loss": 0.8418,
+ "step": 1498
+ },
+ {
+ "epoch": 1.4994217123566003,
+ "grad_norm": 0.528904914855957,
+ "learning_rate": 9.994758771254997e-05,
+ "loss": 0.9702,
+ "step": 1499
+ },
+ {
+ "epoch": 1.5004219936857242,
+ "grad_norm": 0.5090524554252625,
+ "learning_rate": 9.984276319524154e-05,
+ "loss": 0.9927,
+ "step": 1500
+ },
+ {
+ "epoch": 1.501422275014848,
+ "grad_norm": 0.4553126096725464,
+ "learning_rate": 9.973793885070792e-05,
+ "loss": 0.9075,
+ "step": 1501
+ },
+ {
+ "epoch": 1.5024225563439717,
+ "grad_norm": 0.4887089133262634,
+ "learning_rate": 9.963311479413211e-05,
+ "loss": 0.9999,
+ "step": 1502
+ },
+ {
+ "epoch": 1.5034228376730956,
+ "grad_norm": 0.48520341515541077,
+ "learning_rate": 9.95282911406968e-05,
+ "loss": 1.0182,
+ "step": 1503
+ },
+ {
+ "epoch": 1.5044231190022193,
+ "grad_norm": 0.5554280877113342,
+ "learning_rate": 9.942346800558421e-05,
+ "loss": 0.9456,
+ "step": 1504
+ },
+ {
+ "epoch": 1.5054234003313431,
+ "grad_norm": 0.5199026465415955,
+ "learning_rate": 9.931864550397601e-05,
+ "loss": 1.0141,
+ "step": 1505
+ },
+ {
+ "epoch": 1.506423681660467,
+ "grad_norm": 0.5191763043403625,
+ "learning_rate": 9.921382375105318e-05,
+ "loss": 0.937,
+ "step": 1506
+ },
+ {
+ "epoch": 1.507423962989591,
+ "grad_norm": 0.5416325330734253,
+ "learning_rate": 9.910900286199587e-05,
+ "loss": 1.07,
+ "step": 1507
+ },
+ {
+ "epoch": 1.5084242443187148,
+ "grad_norm": 0.5193303227424622,
+ "learning_rate": 9.900418295198328e-05,
+ "loss": 0.9386,
+ "step": 1508
+ },
+ {
+ "epoch": 1.5094245256478385,
+ "grad_norm": 0.5433129072189331,
+ "learning_rate": 9.889936413619356e-05,
+ "loss": 0.8967,
+ "step": 1509
+ },
+ {
+ "epoch": 1.5104248069769621,
+ "grad_norm": 0.526980459690094,
+ "learning_rate": 9.879454652980358e-05,
+ "loss": 1.1135,
+ "step": 1510
+ },
+ {
+ "epoch": 1.511425088306086,
+ "grad_norm": 0.4468344449996948,
+ "learning_rate": 9.868973024798895e-05,
+ "loss": 0.9408,
+ "step": 1511
+ },
+ {
+ "epoch": 1.51242536963521,
+ "grad_norm": 0.5974569320678711,
+ "learning_rate": 9.858491540592382e-05,
+ "loss": 0.9747,
+ "step": 1512
+ },
+ {
+ "epoch": 1.5134256509643338,
+ "grad_norm": 0.5186171531677246,
+ "learning_rate": 9.848010211878074e-05,
+ "loss": 1.1012,
+ "step": 1513
+ },
+ {
+ "epoch": 1.5144259322934577,
+ "grad_norm": 0.5307335257530212,
+ "learning_rate": 9.837529050173052e-05,
+ "loss": 0.9548,
+ "step": 1514
+ },
+ {
+ "epoch": 1.5154262136225813,
+ "grad_norm": 0.469865083694458,
+ "learning_rate": 9.827048066994225e-05,
+ "loss": 0.8556,
+ "step": 1515
+ },
+ {
+ "epoch": 1.516426494951705,
+ "grad_norm": 0.4164840877056122,
+ "learning_rate": 9.816567273858296e-05,
+ "loss": 0.7429,
+ "step": 1516
+ },
+ {
+ "epoch": 1.517426776280829,
+ "grad_norm": 0.5811400413513184,
+ "learning_rate": 9.806086682281758e-05,
+ "loss": 1.066,
+ "step": 1517
+ },
+ {
+ "epoch": 1.5184270576099528,
+ "grad_norm": 0.4634648263454437,
+ "learning_rate": 9.795606303780885e-05,
+ "loss": 1.0048,
+ "step": 1518
+ },
+ {
+ "epoch": 1.5194273389390767,
+ "grad_norm": 0.45642492175102234,
+ "learning_rate": 9.785126149871722e-05,
+ "loss": 0.8776,
+ "step": 1519
+ },
+ {
+ "epoch": 1.5204276202682006,
+ "grad_norm": 0.5217366218566895,
+ "learning_rate": 9.77464623207006e-05,
+ "loss": 0.9806,
+ "step": 1520
+ },
+ {
+ "epoch": 1.5214279015973242,
+ "grad_norm": 0.4867999851703644,
+ "learning_rate": 9.764166561891432e-05,
+ "loss": 0.9539,
+ "step": 1521
+ },
+ {
+ "epoch": 1.522428182926448,
+ "grad_norm": 0.5579104423522949,
+ "learning_rate": 9.753687150851102e-05,
+ "loss": 1.0812,
+ "step": 1522
+ },
+ {
+ "epoch": 1.5234284642555718,
+ "grad_norm": 0.5152975916862488,
+ "learning_rate": 9.74320801046405e-05,
+ "loss": 0.8958,
+ "step": 1523
+ },
+ {
+ "epoch": 1.5244287455846957,
+ "grad_norm": 0.5229570269584656,
+ "learning_rate": 9.732729152244953e-05,
+ "loss": 1.1053,
+ "step": 1524
+ },
+ {
+ "epoch": 1.5254290269138195,
+ "grad_norm": 0.49501264095306396,
+ "learning_rate": 9.722250587708181e-05,
+ "loss": 0.8045,
+ "step": 1525
+ },
+ {
+ "epoch": 1.5264293082429434,
+ "grad_norm": 0.5376133918762207,
+ "learning_rate": 9.711772328367784e-05,
+ "loss": 1.0366,
+ "step": 1526
+ },
+ {
+ "epoch": 1.527429589572067,
+ "grad_norm": 0.5039237141609192,
+ "learning_rate": 9.70129438573747e-05,
+ "loss": 0.9531,
+ "step": 1527
+ },
+ {
+ "epoch": 1.528429870901191,
+ "grad_norm": 0.483420729637146,
+ "learning_rate": 9.690816771330608e-05,
+ "loss": 0.8635,
+ "step": 1528
+ },
+ {
+ "epoch": 1.5294301522303146,
+ "grad_norm": 0.5216282606124878,
+ "learning_rate": 9.680339496660192e-05,
+ "loss": 0.8885,
+ "step": 1529
+ },
+ {
+ "epoch": 1.5304304335594385,
+ "grad_norm": 0.4887123703956604,
+ "learning_rate": 9.669862573238863e-05,
+ "loss": 1.01,
+ "step": 1530
+ },
+ {
+ "epoch": 1.5314307148885624,
+ "grad_norm": 0.5213040113449097,
+ "learning_rate": 9.659386012578863e-05,
+ "loss": 0.8264,
+ "step": 1531
+ },
+ {
+ "epoch": 1.5324309962176863,
+ "grad_norm": 0.45882460474967957,
+ "learning_rate": 9.648909826192033e-05,
+ "loss": 0.9247,
+ "step": 1532
+ },
+ {
+ "epoch": 1.5334312775468102,
+ "grad_norm": 0.4360674023628235,
+ "learning_rate": 9.63843402558981e-05,
+ "loss": 0.9197,
+ "step": 1533
+ },
+ {
+ "epoch": 1.5344315588759339,
+ "grad_norm": 0.5070340633392334,
+ "learning_rate": 9.627958622283203e-05,
+ "loss": 0.9523,
+ "step": 1534
+ },
+ {
+ "epoch": 1.5354318402050575,
+ "grad_norm": 0.5255693197250366,
+ "learning_rate": 9.617483627782788e-05,
+ "loss": 1.1249,
+ "step": 1535
+ },
+ {
+ "epoch": 1.5364321215341814,
+ "grad_norm": 0.5451697707176208,
+ "learning_rate": 9.607009053598689e-05,
+ "loss": 1.0246,
+ "step": 1536
+ },
+ {
+ "epoch": 1.5374324028633053,
+ "grad_norm": 0.4846939742565155,
+ "learning_rate": 9.596534911240566e-05,
+ "loss": 0.8665,
+ "step": 1537
+ },
+ {
+ "epoch": 1.5384326841924292,
+ "grad_norm": 0.4528220295906067,
+ "learning_rate": 9.58606121221761e-05,
+ "loss": 0.9338,
+ "step": 1538
+ },
+ {
+ "epoch": 1.539432965521553,
+ "grad_norm": 0.4627808630466461,
+ "learning_rate": 9.57558796803852e-05,
+ "loss": 0.8086,
+ "step": 1539
+ },
+ {
+ "epoch": 1.5404332468506767,
+ "grad_norm": 0.47025686502456665,
+ "learning_rate": 9.565115190211497e-05,
+ "loss": 0.8745,
+ "step": 1540
+ },
+ {
+ "epoch": 1.5414335281798006,
+ "grad_norm": 0.5646499395370483,
+ "learning_rate": 9.554642890244233e-05,
+ "loss": 1.0445,
+ "step": 1541
+ },
+ {
+ "epoch": 1.5424338095089243,
+ "grad_norm": 0.48776212334632874,
+ "learning_rate": 9.54417107964389e-05,
+ "loss": 0.9189,
+ "step": 1542
+ },
+ {
+ "epoch": 1.5434340908380482,
+ "grad_norm": 0.4854126274585724,
+ "learning_rate": 9.533699769917092e-05,
+ "loss": 0.9359,
+ "step": 1543
+ },
+ {
+ "epoch": 1.544434372167172,
+ "grad_norm": 0.4896346926689148,
+ "learning_rate": 9.523228972569917e-05,
+ "loss": 0.8201,
+ "step": 1544
+ },
+ {
+ "epoch": 1.545434653496296,
+ "grad_norm": 0.5236535668373108,
+ "learning_rate": 9.512758699107879e-05,
+ "loss": 0.9501,
+ "step": 1545
+ },
+ {
+ "epoch": 1.5464349348254196,
+ "grad_norm": 0.607430636882782,
+ "learning_rate": 9.502288961035912e-05,
+ "loss": 0.8468,
+ "step": 1546
+ },
+ {
+ "epoch": 1.5474352161545435,
+ "grad_norm": 0.46944427490234375,
+ "learning_rate": 9.491819769858366e-05,
+ "loss": 0.8697,
+ "step": 1547
+ },
+ {
+ "epoch": 1.5484354974836672,
+ "grad_norm": 0.44860196113586426,
+ "learning_rate": 9.48135113707899e-05,
+ "loss": 0.9398,
+ "step": 1548
+ },
+ {
+ "epoch": 1.549435778812791,
+ "grad_norm": 0.45095279812812805,
+ "learning_rate": 9.470883074200916e-05,
+ "loss": 0.7818,
+ "step": 1549
+ },
+ {
+ "epoch": 1.550436060141915,
+ "grad_norm": 0.519603967666626,
+ "learning_rate": 9.460415592726653e-05,
+ "loss": 0.8663,
+ "step": 1550
+ },
+ {
+ "epoch": 1.5514363414710388,
+ "grad_norm": 0.4833553731441498,
+ "learning_rate": 9.449948704158071e-05,
+ "loss": 0.958,
+ "step": 1551
+ },
+ {
+ "epoch": 1.5524366228001627,
+ "grad_norm": 0.504408597946167,
+ "learning_rate": 9.439482419996384e-05,
+ "loss": 0.8795,
+ "step": 1552
+ },
+ {
+ "epoch": 1.5534369041292864,
+ "grad_norm": 0.45152923464775085,
+ "learning_rate": 9.42901675174215e-05,
+ "loss": 0.8427,
+ "step": 1553
+ },
+ {
+ "epoch": 1.55443718545841,
+ "grad_norm": 0.48051750659942627,
+ "learning_rate": 9.418551710895243e-05,
+ "loss": 0.8997,
+ "step": 1554
+ },
+ {
+ "epoch": 1.555437466787534,
+ "grad_norm": 0.41671374440193176,
+ "learning_rate": 9.408087308954853e-05,
+ "loss": 0.7823,
+ "step": 1555
+ },
+ {
+ "epoch": 1.5564377481166578,
+ "grad_norm": 0.4859127402305603,
+ "learning_rate": 9.397623557419461e-05,
+ "loss": 0.8865,
+ "step": 1556
+ },
+ {
+ "epoch": 1.5574380294457817,
+ "grad_norm": 0.492712140083313,
+ "learning_rate": 9.38716046778684e-05,
+ "loss": 0.8464,
+ "step": 1557
+ },
+ {
+ "epoch": 1.5584383107749056,
+ "grad_norm": 0.4976697564125061,
+ "learning_rate": 9.37669805155403e-05,
+ "loss": 0.948,
+ "step": 1558
+ },
+ {
+ "epoch": 1.5594385921040292,
+ "grad_norm": 0.5431742668151855,
+ "learning_rate": 9.366236320217339e-05,
+ "loss": 1.1718,
+ "step": 1559
+ },
+ {
+ "epoch": 1.5604388734331531,
+ "grad_norm": 0.49732932448387146,
+ "learning_rate": 9.355775285272318e-05,
+ "loss": 0.939,
+ "step": 1560
+ },
+ {
+ "epoch": 1.5614391547622768,
+ "grad_norm": 0.4857761859893799,
+ "learning_rate": 9.34531495821375e-05,
+ "loss": 0.9269,
+ "step": 1561
+ },
+ {
+ "epoch": 1.5624394360914007,
+ "grad_norm": 0.47211897373199463,
+ "learning_rate": 9.334855350535645e-05,
+ "loss": 1.0069,
+ "step": 1562
+ },
+ {
+ "epoch": 1.5634397174205246,
+ "grad_norm": 0.4433748126029968,
+ "learning_rate": 9.324396473731217e-05,
+ "loss": 0.866,
+ "step": 1563
+ },
+ {
+ "epoch": 1.5644399987496485,
+ "grad_norm": 0.5030574798583984,
+ "learning_rate": 9.313938339292883e-05,
+ "loss": 0.7763,
+ "step": 1564
+ },
+ {
+ "epoch": 1.5654402800787721,
+ "grad_norm": 0.46466779708862305,
+ "learning_rate": 9.303480958712239e-05,
+ "loss": 0.9033,
+ "step": 1565
+ },
+ {
+ "epoch": 1.566440561407896,
+ "grad_norm": 0.39663952589035034,
+ "learning_rate": 9.293024343480055e-05,
+ "loss": 0.7205,
+ "step": 1566
+ },
+ {
+ "epoch": 1.5674408427370197,
+ "grad_norm": 0.5455542206764221,
+ "learning_rate": 9.282568505086261e-05,
+ "loss": 0.8864,
+ "step": 1567
+ },
+ {
+ "epoch": 1.5684411240661436,
+ "grad_norm": 0.5139548778533936,
+ "learning_rate": 9.272113455019935e-05,
+ "loss": 0.9822,
+ "step": 1568
+ },
+ {
+ "epoch": 1.5694414053952674,
+ "grad_norm": 0.46824902296066284,
+ "learning_rate": 9.261659204769284e-05,
+ "loss": 0.8348,
+ "step": 1569
+ },
+ {
+ "epoch": 1.5704416867243913,
+ "grad_norm": 0.5223984122276306,
+ "learning_rate": 9.251205765821636e-05,
+ "loss": 0.9696,
+ "step": 1570
+ },
+ {
+ "epoch": 1.5714419680535152,
+ "grad_norm": 0.6279047727584839,
+ "learning_rate": 9.240753149663433e-05,
+ "loss": 1.009,
+ "step": 1571
+ },
+ {
+ "epoch": 1.5724422493826389,
+ "grad_norm": 0.49068430066108704,
+ "learning_rate": 9.230301367780208e-05,
+ "loss": 0.9984,
+ "step": 1572
+ },
+ {
+ "epoch": 1.5734425307117625,
+ "grad_norm": 0.4828907251358032,
+ "learning_rate": 9.219850431656579e-05,
+ "loss": 0.8535,
+ "step": 1573
+ },
+ {
+ "epoch": 1.5744428120408864,
+ "grad_norm": 0.4925834834575653,
+ "learning_rate": 9.209400352776237e-05,
+ "loss": 0.8849,
+ "step": 1574
+ },
+ {
+ "epoch": 1.5754430933700103,
+ "grad_norm": 0.5048914551734924,
+ "learning_rate": 9.198951142621929e-05,
+ "loss": 0.8767,
+ "step": 1575
+ },
+ {
+ "epoch": 1.5764433746991342,
+ "grad_norm": 0.44887635111808777,
+ "learning_rate": 9.188502812675446e-05,
+ "loss": 0.8687,
+ "step": 1576
+ },
+ {
+ "epoch": 1.577443656028258,
+ "grad_norm": 0.4909934401512146,
+ "learning_rate": 9.178055374417612e-05,
+ "loss": 0.8362,
+ "step": 1577
+ },
+ {
+ "epoch": 1.5784439373573818,
+ "grad_norm": 0.45031628012657166,
+ "learning_rate": 9.167608839328272e-05,
+ "loss": 0.902,
+ "step": 1578
+ },
+ {
+ "epoch": 1.5794442186865054,
+ "grad_norm": 0.5682864189147949,
+ "learning_rate": 9.15716321888628e-05,
+ "loss": 1.0558,
+ "step": 1579
+ },
+ {
+ "epoch": 1.5804445000156293,
+ "grad_norm": 0.4406115412712097,
+ "learning_rate": 9.146718524569487e-05,
+ "loss": 0.8283,
+ "step": 1580
+ },
+ {
+ "epoch": 1.5814447813447532,
+ "grad_norm": 0.4749000072479248,
+ "learning_rate": 9.136274767854716e-05,
+ "loss": 0.9342,
+ "step": 1581
+ },
+ {
+ "epoch": 1.582445062673877,
+ "grad_norm": 0.4785940945148468,
+ "learning_rate": 9.125831960217774e-05,
+ "loss": 0.9208,
+ "step": 1582
+ },
+ {
+ "epoch": 1.583445344003001,
+ "grad_norm": 0.572299599647522,
+ "learning_rate": 9.115390113133414e-05,
+ "loss": 0.8469,
+ "step": 1583
+ },
+ {
+ "epoch": 1.5844456253321246,
+ "grad_norm": 0.4829537570476532,
+ "learning_rate": 9.104949238075336e-05,
+ "loss": 0.9471,
+ "step": 1584
+ },
+ {
+ "epoch": 1.5854459066612485,
+ "grad_norm": 0.5315890908241272,
+ "learning_rate": 9.094509346516178e-05,
+ "loss": 0.9663,
+ "step": 1585
+ },
+ {
+ "epoch": 1.5864461879903722,
+ "grad_norm": 0.4654553532600403,
+ "learning_rate": 9.084070449927488e-05,
+ "loss": 0.7776,
+ "step": 1586
+ },
+ {
+ "epoch": 1.587446469319496,
+ "grad_norm": 0.5083040595054626,
+ "learning_rate": 9.07363255977973e-05,
+ "loss": 0.8438,
+ "step": 1587
+ },
+ {
+ "epoch": 1.58844675064862,
+ "grad_norm": 0.502129077911377,
+ "learning_rate": 9.063195687542249e-05,
+ "loss": 0.8481,
+ "step": 1588
+ },
+ {
+ "epoch": 1.5894470319777438,
+ "grad_norm": 0.517439067363739,
+ "learning_rate": 9.052759844683295e-05,
+ "loss": 0.9054,
+ "step": 1589
+ },
+ {
+ "epoch": 1.5904473133068675,
+ "grad_norm": 0.4777907431125641,
+ "learning_rate": 9.042325042669961e-05,
+ "loss": 0.9888,
+ "step": 1590
+ },
+ {
+ "epoch": 1.5914475946359914,
+ "grad_norm": 0.41228219866752625,
+ "learning_rate": 9.03189129296821e-05,
+ "loss": 0.5767,
+ "step": 1591
+ },
+ {
+ "epoch": 1.592447875965115,
+ "grad_norm": 0.45188775658607483,
+ "learning_rate": 9.021458607042845e-05,
+ "loss": 0.875,
+ "step": 1592
+ },
+ {
+ "epoch": 1.593448157294239,
+ "grad_norm": 0.46999362111091614,
+ "learning_rate": 9.011026996357503e-05,
+ "loss": 0.8739,
+ "step": 1593
+ },
+ {
+ "epoch": 1.5944484386233628,
+ "grad_norm": 0.5621476173400879,
+ "learning_rate": 9.000596472374637e-05,
+ "loss": 0.8978,
+ "step": 1594
+ },
+ {
+ "epoch": 1.5954487199524867,
+ "grad_norm": 0.4524415135383606,
+ "learning_rate": 8.990167046555504e-05,
+ "loss": 0.7987,
+ "step": 1595
+ },
+ {
+ "epoch": 1.5964490012816106,
+ "grad_norm": 0.42351627349853516,
+ "learning_rate": 8.97973873036016e-05,
+ "loss": 0.8705,
+ "step": 1596
+ },
+ {
+ "epoch": 1.5974492826107343,
+ "grad_norm": 0.45115014910697937,
+ "learning_rate": 8.969311535247438e-05,
+ "loss": 0.9235,
+ "step": 1597
+ },
+ {
+ "epoch": 1.598449563939858,
+ "grad_norm": 0.5297085642814636,
+ "learning_rate": 8.958885472674939e-05,
+ "loss": 0.9363,
+ "step": 1598
+ },
+ {
+ "epoch": 1.5994498452689818,
+ "grad_norm": 0.5296758413314819,
+ "learning_rate": 8.948460554099018e-05,
+ "loss": 0.9461,
+ "step": 1599
+ },
+ {
+ "epoch": 1.6004501265981057,
+ "grad_norm": 0.4951537251472473,
+ "learning_rate": 8.93803679097478e-05,
+ "loss": 0.9494,
+ "step": 1600
+ },
+ {
+ "epoch": 1.6014504079272296,
+ "grad_norm": 0.5380229949951172,
+ "learning_rate": 8.927614194756052e-05,
+ "loss": 0.8813,
+ "step": 1601
+ },
+ {
+ "epoch": 1.6024506892563535,
+ "grad_norm": 0.487196683883667,
+ "learning_rate": 8.917192776895382e-05,
+ "loss": 0.8183,
+ "step": 1602
+ },
+ {
+ "epoch": 1.6034509705854771,
+ "grad_norm": 0.450591504573822,
+ "learning_rate": 8.906772548844026e-05,
+ "loss": 0.9506,
+ "step": 1603
+ },
+ {
+ "epoch": 1.604451251914601,
+ "grad_norm": 0.5414707064628601,
+ "learning_rate": 8.896353522051928e-05,
+ "loss": 1.2171,
+ "step": 1604
+ },
+ {
+ "epoch": 1.6054515332437247,
+ "grad_norm": 0.5198320746421814,
+ "learning_rate": 8.885935707967716e-05,
+ "loss": 0.8762,
+ "step": 1605
+ },
+ {
+ "epoch": 1.6064518145728486,
+ "grad_norm": 0.4546220302581787,
+ "learning_rate": 8.875519118038684e-05,
+ "loss": 0.9634,
+ "step": 1606
+ },
+ {
+ "epoch": 1.6074520959019725,
+ "grad_norm": 0.5151107907295227,
+ "learning_rate": 8.865103763710777e-05,
+ "loss": 1.1038,
+ "step": 1607
+ },
+ {
+ "epoch": 1.6084523772310964,
+ "grad_norm": 0.46089720726013184,
+ "learning_rate": 8.854689656428591e-05,
+ "loss": 0.8706,
+ "step": 1608
+ },
+ {
+ "epoch": 1.60945265856022,
+ "grad_norm": 0.4554317593574524,
+ "learning_rate": 8.844276807635343e-05,
+ "loss": 0.7553,
+ "step": 1609
+ },
+ {
+ "epoch": 1.610452939889344,
+ "grad_norm": 0.5166018009185791,
+ "learning_rate": 8.833865228772871e-05,
+ "loss": 0.8954,
+ "step": 1610
+ },
+ {
+ "epoch": 1.6114532212184676,
+ "grad_norm": 0.45595693588256836,
+ "learning_rate": 8.823454931281616e-05,
+ "loss": 0.9015,
+ "step": 1611
+ },
+ {
+ "epoch": 1.6124535025475915,
+ "grad_norm": 0.4563496708869934,
+ "learning_rate": 8.813045926600615e-05,
+ "loss": 0.9071,
+ "step": 1612
+ },
+ {
+ "epoch": 1.6134537838767153,
+ "grad_norm": 0.44123467803001404,
+ "learning_rate": 8.802638226167479e-05,
+ "loss": 0.8316,
+ "step": 1613
+ },
+ {
+ "epoch": 1.6144540652058392,
+ "grad_norm": 0.5304034352302551,
+ "learning_rate": 8.792231841418391e-05,
+ "loss": 0.9965,
+ "step": 1614
+ },
+ {
+ "epoch": 1.6154543465349631,
+ "grad_norm": 0.5578649044036865,
+ "learning_rate": 8.781826783788084e-05,
+ "loss": 0.9171,
+ "step": 1615
+ },
+ {
+ "epoch": 1.6164546278640868,
+ "grad_norm": 0.5331206917762756,
+ "learning_rate": 8.771423064709837e-05,
+ "loss": 0.8648,
+ "step": 1616
+ },
+ {
+ "epoch": 1.6174549091932104,
+ "grad_norm": 0.5196745991706848,
+ "learning_rate": 8.76102069561545e-05,
+ "loss": 0.9136,
+ "step": 1617
+ },
+ {
+ "epoch": 1.6184551905223343,
+ "grad_norm": 0.5278195142745972,
+ "learning_rate": 8.750619687935251e-05,
+ "loss": 0.9105,
+ "step": 1618
+ },
+ {
+ "epoch": 1.6194554718514582,
+ "grad_norm": 0.4967080056667328,
+ "learning_rate": 8.740220053098067e-05,
+ "loss": 0.8975,
+ "step": 1619
+ },
+ {
+ "epoch": 1.620455753180582,
+ "grad_norm": 0.5626882910728455,
+ "learning_rate": 8.729821802531212e-05,
+ "loss": 1.0178,
+ "step": 1620
+ },
+ {
+ "epoch": 1.621456034509706,
+ "grad_norm": 0.4372572898864746,
+ "learning_rate": 8.719424947660487e-05,
+ "loss": 0.8344,
+ "step": 1621
+ },
+ {
+ "epoch": 1.6224563158388297,
+ "grad_norm": 0.5572327971458435,
+ "learning_rate": 8.70902949991015e-05,
+ "loss": 0.9831,
+ "step": 1622
+ },
+ {
+ "epoch": 1.6234565971679535,
+ "grad_norm": 0.43764790892601013,
+ "learning_rate": 8.698635470702923e-05,
+ "loss": 0.8901,
+ "step": 1623
+ },
+ {
+ "epoch": 1.6244568784970772,
+ "grad_norm": 0.5335058569908142,
+ "learning_rate": 8.688242871459963e-05,
+ "loss": 0.8063,
+ "step": 1624
+ },
+ {
+ "epoch": 1.625457159826201,
+ "grad_norm": 0.5070383548736572,
+ "learning_rate": 8.677851713600855e-05,
+ "loss": 1.1381,
+ "step": 1625
+ },
+ {
+ "epoch": 1.626457441155325,
+ "grad_norm": 0.5117019414901733,
+ "learning_rate": 8.667462008543603e-05,
+ "loss": 1.1598,
+ "step": 1626
+ },
+ {
+ "epoch": 1.6274577224844489,
+ "grad_norm": 0.4911440908908844,
+ "learning_rate": 8.657073767704615e-05,
+ "loss": 0.9673,
+ "step": 1627
+ },
+ {
+ "epoch": 1.6284580038135725,
+ "grad_norm": 0.4799586832523346,
+ "learning_rate": 8.646687002498692e-05,
+ "loss": 0.8415,
+ "step": 1628
+ },
+ {
+ "epoch": 1.6294582851426964,
+ "grad_norm": 0.5615330934524536,
+ "learning_rate": 8.636301724339004e-05,
+ "loss": 0.9751,
+ "step": 1629
+ },
+ {
+ "epoch": 1.63045856647182,
+ "grad_norm": 0.45118963718414307,
+ "learning_rate": 8.625917944637096e-05,
+ "loss": 0.9169,
+ "step": 1630
+ },
+ {
+ "epoch": 1.631458847800944,
+ "grad_norm": 0.49533525109291077,
+ "learning_rate": 8.615535674802865e-05,
+ "loss": 0.9739,
+ "step": 1631
+ },
+ {
+ "epoch": 1.6324591291300679,
+ "grad_norm": 0.5451453328132629,
+ "learning_rate": 8.605154926244543e-05,
+ "loss": 0.777,
+ "step": 1632
+ },
+ {
+ "epoch": 1.6334594104591917,
+ "grad_norm": 0.6013240814208984,
+ "learning_rate": 8.594775710368704e-05,
+ "loss": 0.9289,
+ "step": 1633
+ },
+ {
+ "epoch": 1.6344596917883156,
+ "grad_norm": 0.5311821699142456,
+ "learning_rate": 8.584398038580226e-05,
+ "loss": 0.9737,
+ "step": 1634
+ },
+ {
+ "epoch": 1.6354599731174393,
+ "grad_norm": 0.4836428165435791,
+ "learning_rate": 8.574021922282292e-05,
+ "loss": 0.9495,
+ "step": 1635
+ },
+ {
+ "epoch": 1.636460254446563,
+ "grad_norm": 0.5316966772079468,
+ "learning_rate": 8.563647372876378e-05,
+ "loss": 0.8871,
+ "step": 1636
+ },
+ {
+ "epoch": 1.6374605357756868,
+ "grad_norm": 0.4969998896121979,
+ "learning_rate": 8.553274401762237e-05,
+ "loss": 0.8881,
+ "step": 1637
+ },
+ {
+ "epoch": 1.6384608171048107,
+ "grad_norm": 0.48786112666130066,
+ "learning_rate": 8.542903020337887e-05,
+ "loss": 0.8859,
+ "step": 1638
+ },
+ {
+ "epoch": 1.6394610984339346,
+ "grad_norm": 0.4753643572330475,
+ "learning_rate": 8.532533239999602e-05,
+ "loss": 0.759,
+ "step": 1639
+ },
+ {
+ "epoch": 1.6404613797630585,
+ "grad_norm": 0.4672154486179352,
+ "learning_rate": 8.522165072141897e-05,
+ "loss": 0.8429,
+ "step": 1640
+ },
+ {
+ "epoch": 1.6414616610921822,
+ "grad_norm": 0.47218796610832214,
+ "learning_rate": 8.511798528157512e-05,
+ "loss": 0.7702,
+ "step": 1641
+ },
+ {
+ "epoch": 1.6424619424213058,
+ "grad_norm": 0.4409984052181244,
+ "learning_rate": 8.501433619437403e-05,
+ "loss": 0.7803,
+ "step": 1642
+ },
+ {
+ "epoch": 1.6434622237504297,
+ "grad_norm": 0.539503812789917,
+ "learning_rate": 8.49107035737073e-05,
+ "loss": 0.9739,
+ "step": 1643
+ },
+ {
+ "epoch": 1.6444625050795536,
+ "grad_norm": 0.5032373666763306,
+ "learning_rate": 8.480708753344846e-05,
+ "loss": 1.0876,
+ "step": 1644
+ },
+ {
+ "epoch": 1.6454627864086775,
+ "grad_norm": 0.4480466842651367,
+ "learning_rate": 8.470348818745278e-05,
+ "loss": 0.9183,
+ "step": 1645
+ },
+ {
+ "epoch": 1.6464630677378014,
+ "grad_norm": 0.49911466240882874,
+ "learning_rate": 8.459990564955721e-05,
+ "loss": 0.8048,
+ "step": 1646
+ },
+ {
+ "epoch": 1.647463349066925,
+ "grad_norm": 0.48236754536628723,
+ "learning_rate": 8.449634003358022e-05,
+ "loss": 0.9785,
+ "step": 1647
+ },
+ {
+ "epoch": 1.648463630396049,
+ "grad_norm": 0.5161852240562439,
+ "learning_rate": 8.43927914533217e-05,
+ "loss": 0.9626,
+ "step": 1648
+ },
+ {
+ "epoch": 1.6494639117251726,
+ "grad_norm": 0.5653015971183777,
+ "learning_rate": 8.428926002256283e-05,
+ "loss": 1.0785,
+ "step": 1649
+ },
+ {
+ "epoch": 1.6504641930542965,
+ "grad_norm": 0.5340739488601685,
+ "learning_rate": 8.418574585506591e-05,
+ "loss": 1.0613,
+ "step": 1650
+ },
+ {
+ "epoch": 1.6514644743834204,
+ "grad_norm": 0.4651111960411072,
+ "learning_rate": 8.408224906457429e-05,
+ "loss": 0.8313,
+ "step": 1651
+ },
+ {
+ "epoch": 1.6524647557125443,
+ "grad_norm": 0.5264735221862793,
+ "learning_rate": 8.397876976481224e-05,
+ "loss": 0.8187,
+ "step": 1652
+ },
+ {
+ "epoch": 1.653465037041668,
+ "grad_norm": 0.4576081335544586,
+ "learning_rate": 8.387530806948476e-05,
+ "loss": 0.8758,
+ "step": 1653
+ },
+ {
+ "epoch": 1.6544653183707918,
+ "grad_norm": 0.4851805567741394,
+ "learning_rate": 8.37718640922776e-05,
+ "loss": 0.877,
+ "step": 1654
+ },
+ {
+ "epoch": 1.6554655996999155,
+ "grad_norm": 0.48545941710472107,
+ "learning_rate": 8.366843794685695e-05,
+ "loss": 0.8988,
+ "step": 1655
+ },
+ {
+ "epoch": 1.6564658810290394,
+ "grad_norm": 0.5381633639335632,
+ "learning_rate": 8.356502974686941e-05,
+ "loss": 0.8958,
+ "step": 1656
+ },
+ {
+ "epoch": 1.6574661623581632,
+ "grad_norm": 0.5239037275314331,
+ "learning_rate": 8.346163960594193e-05,
+ "loss": 0.9698,
+ "step": 1657
+ },
+ {
+ "epoch": 1.6584664436872871,
+ "grad_norm": 0.5378285050392151,
+ "learning_rate": 8.335826763768156e-05,
+ "loss": 0.8765,
+ "step": 1658
+ },
+ {
+ "epoch": 1.659466725016411,
+ "grad_norm": 0.45296210050582886,
+ "learning_rate": 8.325491395567541e-05,
+ "loss": 0.8048,
+ "step": 1659
+ },
+ {
+ "epoch": 1.6604670063455347,
+ "grad_norm": 0.4575178325176239,
+ "learning_rate": 8.315157867349046e-05,
+ "loss": 0.8388,
+ "step": 1660
+ },
+ {
+ "epoch": 1.6614672876746583,
+ "grad_norm": 0.4762253165245056,
+ "learning_rate": 8.30482619046735e-05,
+ "loss": 0.9123,
+ "step": 1661
+ },
+ {
+ "epoch": 1.6624675690037822,
+ "grad_norm": 0.46717318892478943,
+ "learning_rate": 8.294496376275104e-05,
+ "loss": 0.9213,
+ "step": 1662
+ },
+ {
+ "epoch": 1.6634678503329061,
+ "grad_norm": 0.4792725741863251,
+ "learning_rate": 8.284168436122898e-05,
+ "loss": 0.793,
+ "step": 1663
+ },
+ {
+ "epoch": 1.66446813166203,
+ "grad_norm": 0.4854644238948822,
+ "learning_rate": 8.273842381359273e-05,
+ "loss": 0.9657,
+ "step": 1664
+ },
+ {
+ "epoch": 1.665468412991154,
+ "grad_norm": 0.44722744822502136,
+ "learning_rate": 8.263518223330697e-05,
+ "loss": 0.8159,
+ "step": 1665
+ },
+ {
+ "epoch": 1.6664686943202776,
+ "grad_norm": 0.5070934891700745,
+ "learning_rate": 8.253195973381552e-05,
+ "loss": 0.8971,
+ "step": 1666
+ },
+ {
+ "epoch": 1.6674689756494014,
+ "grad_norm": 0.4743734300136566,
+ "learning_rate": 8.242875642854121e-05,
+ "loss": 0.8042,
+ "step": 1667
+ },
+ {
+ "epoch": 1.668469256978525,
+ "grad_norm": 0.5857224464416504,
+ "learning_rate": 8.232557243088585e-05,
+ "loss": 1.0666,
+ "step": 1668
+ },
+ {
+ "epoch": 1.669469538307649,
+ "grad_norm": 0.5257895588874817,
+ "learning_rate": 8.222240785422996e-05,
+ "loss": 0.9619,
+ "step": 1669
+ },
+ {
+ "epoch": 1.6704698196367729,
+ "grad_norm": 0.5153073668479919,
+ "learning_rate": 8.211926281193277e-05,
+ "loss": 0.9189,
+ "step": 1670
+ },
+ {
+ "epoch": 1.6714701009658968,
+ "grad_norm": 0.49723324179649353,
+ "learning_rate": 8.201613741733203e-05,
+ "loss": 1.037,
+ "step": 1671
+ },
+ {
+ "epoch": 1.6724703822950204,
+ "grad_norm": 0.5014336705207825,
+ "learning_rate": 8.191303178374389e-05,
+ "loss": 0.8598,
+ "step": 1672
+ },
+ {
+ "epoch": 1.6734706636241443,
+ "grad_norm": 0.5031597018241882,
+ "learning_rate": 8.180994602446279e-05,
+ "loss": 0.9622,
+ "step": 1673
+ },
+ {
+ "epoch": 1.674470944953268,
+ "grad_norm": 0.4872223436832428,
+ "learning_rate": 8.170688025276134e-05,
+ "loss": 0.7971,
+ "step": 1674
+ },
+ {
+ "epoch": 1.6754712262823919,
+ "grad_norm": 0.5090667605400085,
+ "learning_rate": 8.160383458189022e-05,
+ "loss": 0.9825,
+ "step": 1675
+ },
+ {
+ "epoch": 1.6764715076115158,
+ "grad_norm": 0.49642691016197205,
+ "learning_rate": 8.15008091250779e-05,
+ "loss": 0.9541,
+ "step": 1676
+ },
+ {
+ "epoch": 1.6774717889406396,
+ "grad_norm": 0.7710174322128296,
+ "learning_rate": 8.13978039955308e-05,
+ "loss": 0.9036,
+ "step": 1677
+ },
+ {
+ "epoch": 1.6784720702697635,
+ "grad_norm": 0.551180362701416,
+ "learning_rate": 8.12948193064329e-05,
+ "loss": 0.931,
+ "step": 1678
+ },
+ {
+ "epoch": 1.6794723515988872,
+ "grad_norm": 0.540558934211731,
+ "learning_rate": 8.119185517094578e-05,
+ "loss": 0.8364,
+ "step": 1679
+ },
+ {
+ "epoch": 1.6804726329280109,
+ "grad_norm": 0.47380101680755615,
+ "learning_rate": 8.108891170220836e-05,
+ "loss": 0.8494,
+ "step": 1680
+ },
+ {
+ "epoch": 1.6814729142571347,
+ "grad_norm": 0.4427139461040497,
+ "learning_rate": 8.098598901333692e-05,
+ "loss": 0.8441,
+ "step": 1681
+ },
+ {
+ "epoch": 1.6824731955862586,
+ "grad_norm": 0.5092798471450806,
+ "learning_rate": 8.088308721742491e-05,
+ "loss": 0.9069,
+ "step": 1682
+ },
+ {
+ "epoch": 1.6834734769153825,
+ "grad_norm": 0.4453091621398926,
+ "learning_rate": 8.078020642754274e-05,
+ "loss": 0.8539,
+ "step": 1683
+ },
+ {
+ "epoch": 1.6844737582445064,
+ "grad_norm": 0.5102719068527222,
+ "learning_rate": 8.06773467567378e-05,
+ "loss": 0.808,
+ "step": 1684
+ },
+ {
+ "epoch": 1.68547403957363,
+ "grad_norm": 0.44998160004615784,
+ "learning_rate": 8.057450831803428e-05,
+ "loss": 0.9399,
+ "step": 1685
+ },
+ {
+ "epoch": 1.686474320902754,
+ "grad_norm": 0.47718214988708496,
+ "learning_rate": 8.047169122443302e-05,
+ "loss": 0.8851,
+ "step": 1686
+ },
+ {
+ "epoch": 1.6874746022318776,
+ "grad_norm": 0.5858275890350342,
+ "learning_rate": 8.036889558891142e-05,
+ "loss": 1.0813,
+ "step": 1687
+ },
+ {
+ "epoch": 1.6884748835610015,
+ "grad_norm": 0.6066718101501465,
+ "learning_rate": 8.026612152442329e-05,
+ "loss": 0.985,
+ "step": 1688
+ },
+ {
+ "epoch": 1.6894751648901254,
+ "grad_norm": 0.529468834400177,
+ "learning_rate": 8.016336914389874e-05,
+ "loss": 1.0599,
+ "step": 1689
+ },
+ {
+ "epoch": 1.6904754462192493,
+ "grad_norm": 0.5604698061943054,
+ "learning_rate": 8.006063856024405e-05,
+ "loss": 0.8511,
+ "step": 1690
+ },
+ {
+ "epoch": 1.691475727548373,
+ "grad_norm": 0.5078622102737427,
+ "learning_rate": 7.995792988634152e-05,
+ "loss": 0.8286,
+ "step": 1691
+ },
+ {
+ "epoch": 1.6924760088774968,
+ "grad_norm": 0.5138706564903259,
+ "learning_rate": 7.985524323504948e-05,
+ "loss": 0.9054,
+ "step": 1692
+ },
+ {
+ "epoch": 1.6934762902066205,
+ "grad_norm": 0.42073604464530945,
+ "learning_rate": 7.975257871920195e-05,
+ "loss": 0.8403,
+ "step": 1693
+ },
+ {
+ "epoch": 1.6944765715357444,
+ "grad_norm": 0.5249999761581421,
+ "learning_rate": 7.964993645160866e-05,
+ "loss": 0.8382,
+ "step": 1694
+ },
+ {
+ "epoch": 1.6954768528648683,
+ "grad_norm": 0.4233437478542328,
+ "learning_rate": 7.954731654505491e-05,
+ "loss": 0.7757,
+ "step": 1695
+ },
+ {
+ "epoch": 1.6964771341939922,
+ "grad_norm": 0.5192474722862244,
+ "learning_rate": 7.944471911230142e-05,
+ "loss": 0.9689,
+ "step": 1696
+ },
+ {
+ "epoch": 1.697477415523116,
+ "grad_norm": 0.5599137544631958,
+ "learning_rate": 7.93421442660842e-05,
+ "loss": 1.1277,
+ "step": 1697
+ },
+ {
+ "epoch": 1.6984776968522397,
+ "grad_norm": 0.4425784647464752,
+ "learning_rate": 7.923959211911449e-05,
+ "loss": 0.8822,
+ "step": 1698
+ },
+ {
+ "epoch": 1.6994779781813634,
+ "grad_norm": 0.48276057839393616,
+ "learning_rate": 7.91370627840785e-05,
+ "loss": 1.0073,
+ "step": 1699
+ },
+ {
+ "epoch": 1.7004782595104873,
+ "grad_norm": 0.5134496688842773,
+ "learning_rate": 7.903455637363746e-05,
+ "loss": 0.8437,
+ "step": 1700
+ },
+ {
+ "epoch": 1.7014785408396111,
+ "grad_norm": 0.49254342913627625,
+ "learning_rate": 7.89320730004274e-05,
+ "loss": 0.9512,
+ "step": 1701
+ },
+ {
+ "epoch": 1.702478822168735,
+ "grad_norm": 0.4442595839500427,
+ "learning_rate": 7.882961277705895e-05,
+ "loss": 0.8391,
+ "step": 1702
+ },
+ {
+ "epoch": 1.703479103497859,
+ "grad_norm": 0.5177878141403198,
+ "learning_rate": 7.872717581611741e-05,
+ "loss": 0.9012,
+ "step": 1703
+ },
+ {
+ "epoch": 1.7044793848269826,
+ "grad_norm": 0.4612918496131897,
+ "learning_rate": 7.862476223016246e-05,
+ "loss": 0.86,
+ "step": 1704
+ },
+ {
+ "epoch": 1.7054796661561062,
+ "grad_norm": 0.47172513604164124,
+ "learning_rate": 7.852237213172812e-05,
+ "loss": 0.8821,
+ "step": 1705
+ },
+ {
+ "epoch": 1.7064799474852301,
+ "grad_norm": 0.5113676190376282,
+ "learning_rate": 7.842000563332254e-05,
+ "loss": 0.8243,
+ "step": 1706
+ },
+ {
+ "epoch": 1.707480228814354,
+ "grad_norm": 0.5000366568565369,
+ "learning_rate": 7.831766284742807e-05,
+ "loss": 0.9887,
+ "step": 1707
+ },
+ {
+ "epoch": 1.708480510143478,
+ "grad_norm": 0.5838572978973389,
+ "learning_rate": 7.82153438865009e-05,
+ "loss": 0.9401,
+ "step": 1708
+ },
+ {
+ "epoch": 1.7094807914726018,
+ "grad_norm": 0.5229962468147278,
+ "learning_rate": 7.811304886297104e-05,
+ "loss": 1.0353,
+ "step": 1709
+ },
+ {
+ "epoch": 1.7104810728017255,
+ "grad_norm": 0.45854273438453674,
+ "learning_rate": 7.801077788924224e-05,
+ "loss": 0.8868,
+ "step": 1710
+ },
+ {
+ "epoch": 1.7114813541308493,
+ "grad_norm": 0.5133983492851257,
+ "learning_rate": 7.790853107769179e-05,
+ "loss": 0.9689,
+ "step": 1711
+ },
+ {
+ "epoch": 1.712481635459973,
+ "grad_norm": 0.5269356369972229,
+ "learning_rate": 7.780630854067045e-05,
+ "loss": 0.8751,
+ "step": 1712
+ },
+ {
+ "epoch": 1.713481916789097,
+ "grad_norm": 0.523595929145813,
+ "learning_rate": 7.77041103905023e-05,
+ "loss": 0.9806,
+ "step": 1713
+ },
+ {
+ "epoch": 1.7144821981182208,
+ "grad_norm": 0.6217412352561951,
+ "learning_rate": 7.760193673948461e-05,
+ "loss": 0.8298,
+ "step": 1714
+ },
+ {
+ "epoch": 1.7154824794473447,
+ "grad_norm": 0.47979483008384705,
+ "learning_rate": 7.749978769988778e-05,
+ "loss": 0.8578,
+ "step": 1715
+ },
+ {
+ "epoch": 1.7164827607764683,
+ "grad_norm": 0.4971829652786255,
+ "learning_rate": 7.739766338395511e-05,
+ "loss": 0.9794,
+ "step": 1716
+ },
+ {
+ "epoch": 1.7174830421055922,
+ "grad_norm": 0.5164886116981506,
+ "learning_rate": 7.729556390390275e-05,
+ "loss": 0.9267,
+ "step": 1717
+ },
+ {
+ "epoch": 1.7184833234347159,
+ "grad_norm": 0.5067420601844788,
+ "learning_rate": 7.719348937191957e-05,
+ "loss": 0.951,
+ "step": 1718
+ },
+ {
+ "epoch": 1.7194836047638398,
+ "grad_norm": 0.5390254259109497,
+ "learning_rate": 7.709143990016702e-05,
+ "loss": 0.8409,
+ "step": 1719
+ },
+ {
+ "epoch": 1.7204838860929637,
+ "grad_norm": 0.4631121754646301,
+ "learning_rate": 7.698941560077899e-05,
+ "loss": 0.704,
+ "step": 1720
+ },
+ {
+ "epoch": 1.7214841674220875,
+ "grad_norm": 0.5231932997703552,
+ "learning_rate": 7.688741658586178e-05,
+ "loss": 1.0912,
+ "step": 1721
+ },
+ {
+ "epoch": 1.7224844487512114,
+ "grad_norm": 0.4563293755054474,
+ "learning_rate": 7.678544296749384e-05,
+ "loss": 0.8444,
+ "step": 1722
+ },
+ {
+ "epoch": 1.723484730080335,
+ "grad_norm": 0.4844750463962555,
+ "learning_rate": 7.668349485772572e-05,
+ "loss": 0.9234,
+ "step": 1723
+ },
+ {
+ "epoch": 1.7244850114094588,
+ "grad_norm": 0.45698872208595276,
+ "learning_rate": 7.658157236857999e-05,
+ "loss": 0.8608,
+ "step": 1724
+ },
+ {
+ "epoch": 1.7254852927385826,
+ "grad_norm": 0.46694663166999817,
+ "learning_rate": 7.6479675612051e-05,
+ "loss": 0.9628,
+ "step": 1725
+ },
+ {
+ "epoch": 1.7264855740677065,
+ "grad_norm": 0.46077099442481995,
+ "learning_rate": 7.637780470010487e-05,
+ "loss": 0.8173,
+ "step": 1726
+ },
+ {
+ "epoch": 1.7274858553968304,
+ "grad_norm": 0.5198522210121155,
+ "learning_rate": 7.62759597446793e-05,
+ "loss": 0.8813,
+ "step": 1727
+ },
+ {
+ "epoch": 1.7284861367259543,
+ "grad_norm": 0.48385483026504517,
+ "learning_rate": 7.617414085768351e-05,
+ "loss": 0.7007,
+ "step": 1728
+ },
+ {
+ "epoch": 1.729486418055078,
+ "grad_norm": 0.5622795224189758,
+ "learning_rate": 7.607234815099802e-05,
+ "loss": 1.0422,
+ "step": 1729
+ },
+ {
+ "epoch": 1.7304866993842019,
+ "grad_norm": 0.5077874660491943,
+ "learning_rate": 7.597058173647458e-05,
+ "loss": 1.014,
+ "step": 1730
+ },
+ {
+ "epoch": 1.7314869807133255,
+ "grad_norm": 0.598760724067688,
+ "learning_rate": 7.586884172593609e-05,
+ "loss": 0.8979,
+ "step": 1731
+ },
+ {
+ "epoch": 1.7324872620424494,
+ "grad_norm": 0.6116266846656799,
+ "learning_rate": 7.576712823117645e-05,
+ "loss": 0.9121,
+ "step": 1732
+ },
+ {
+ "epoch": 1.7334875433715733,
+ "grad_norm": 0.6157407164573669,
+ "learning_rate": 7.566544136396037e-05,
+ "loss": 0.9361,
+ "step": 1733
+ },
+ {
+ "epoch": 1.7344878247006972,
+ "grad_norm": 0.5174565315246582,
+ "learning_rate": 7.556378123602334e-05,
+ "loss": 1.1858,
+ "step": 1734
+ },
+ {
+ "epoch": 1.7354881060298208,
+ "grad_norm": 0.42541515827178955,
+ "learning_rate": 7.54621479590714e-05,
+ "loss": 0.7425,
+ "step": 1735
+ },
+ {
+ "epoch": 1.7364883873589447,
+ "grad_norm": 0.49402132630348206,
+ "learning_rate": 7.536054164478123e-05,
+ "loss": 0.8158,
+ "step": 1736
+ },
+ {
+ "epoch": 1.7374886686880684,
+ "grad_norm": 0.4637628197669983,
+ "learning_rate": 7.525896240479976e-05,
+ "loss": 0.7859,
+ "step": 1737
+ },
+ {
+ "epoch": 1.7384889500171923,
+ "grad_norm": 0.5475689172744751,
+ "learning_rate": 7.51574103507442e-05,
+ "loss": 0.825,
+ "step": 1738
+ },
+ {
+ "epoch": 1.7394892313463162,
+ "grad_norm": 0.5652226209640503,
+ "learning_rate": 7.505588559420189e-05,
+ "loss": 0.9051,
+ "step": 1739
+ },
+ {
+ "epoch": 1.74048951267544,
+ "grad_norm": 0.4930717647075653,
+ "learning_rate": 7.495438824673016e-05,
+ "loss": 0.7797,
+ "step": 1740
+ },
+ {
+ "epoch": 1.741489794004564,
+ "grad_norm": 0.4611824154853821,
+ "learning_rate": 7.485291841985626e-05,
+ "loss": 1.014,
+ "step": 1741
+ },
+ {
+ "epoch": 1.7424900753336876,
+ "grad_norm": 0.4652807414531708,
+ "learning_rate": 7.475147622507717e-05,
+ "loss": 0.7601,
+ "step": 1742
+ },
+ {
+ "epoch": 1.7434903566628113,
+ "grad_norm": 0.5227355360984802,
+ "learning_rate": 7.465006177385953e-05,
+ "loss": 0.8616,
+ "step": 1743
+ },
+ {
+ "epoch": 1.7444906379919352,
+ "grad_norm": 0.42283377051353455,
+ "learning_rate": 7.454867517763948e-05,
+ "loss": 0.8647,
+ "step": 1744
+ },
+ {
+ "epoch": 1.745490919321059,
+ "grad_norm": 0.45151621103286743,
+ "learning_rate": 7.444731654782253e-05,
+ "loss": 0.8619,
+ "step": 1745
+ },
+ {
+ "epoch": 1.746491200650183,
+ "grad_norm": 0.6146779656410217,
+ "learning_rate": 7.434598599578351e-05,
+ "loss": 0.9479,
+ "step": 1746
+ },
+ {
+ "epoch": 1.7474914819793068,
+ "grad_norm": 0.4988139271736145,
+ "learning_rate": 7.424468363286634e-05,
+ "loss": 0.9136,
+ "step": 1747
+ },
+ {
+ "epoch": 1.7484917633084305,
+ "grad_norm": 0.5271700024604797,
+ "learning_rate": 7.414340957038406e-05,
+ "loss": 1.0416,
+ "step": 1748
+ },
+ {
+ "epoch": 1.7494920446375544,
+ "grad_norm": 0.46806615591049194,
+ "learning_rate": 7.404216391961847e-05,
+ "loss": 0.8376,
+ "step": 1749
+ },
+ {
+ "epoch": 1.750492325966678,
+ "grad_norm": 0.4781439006328583,
+ "learning_rate": 7.394094679182024e-05,
+ "loss": 0.9669,
+ "step": 1750
+ },
+ {
+ "epoch": 1.751492607295802,
+ "grad_norm": 0.49085667729377747,
+ "learning_rate": 7.383975829820874e-05,
+ "loss": 0.9279,
+ "step": 1751
+ },
+ {
+ "epoch": 1.7524928886249258,
+ "grad_norm": 0.4937964379787445,
+ "learning_rate": 7.37385985499718e-05,
+ "loss": 1.1126,
+ "step": 1752
+ },
+ {
+ "epoch": 1.7534931699540497,
+ "grad_norm": 0.3883766233921051,
+ "learning_rate": 7.36374676582657e-05,
+ "loss": 0.7398,
+ "step": 1753
+ },
+ {
+ "epoch": 1.7544934512831734,
+ "grad_norm": 0.4864053726196289,
+ "learning_rate": 7.353636573421496e-05,
+ "loss": 0.8172,
+ "step": 1754
+ },
+ {
+ "epoch": 1.7554937326122972,
+ "grad_norm": 0.48342639207839966,
+ "learning_rate": 7.343529288891239e-05,
+ "loss": 0.8957,
+ "step": 1755
+ },
+ {
+ "epoch": 1.756494013941421,
+ "grad_norm": 0.47928398847579956,
+ "learning_rate": 7.333424923341868e-05,
+ "loss": 0.8414,
+ "step": 1756
+ },
+ {
+ "epoch": 1.7574942952705448,
+ "grad_norm": 0.46736687421798706,
+ "learning_rate": 7.323323487876257e-05,
+ "loss": 0.7661,
+ "step": 1757
+ },
+ {
+ "epoch": 1.7584945765996687,
+ "grad_norm": 0.5184097290039062,
+ "learning_rate": 7.313224993594057e-05,
+ "loss": 0.8719,
+ "step": 1758
+ },
+ {
+ "epoch": 1.7594948579287926,
+ "grad_norm": 0.526541531085968,
+ "learning_rate": 7.303129451591686e-05,
+ "loss": 0.8801,
+ "step": 1759
+ },
+ {
+ "epoch": 1.7604951392579165,
+ "grad_norm": 0.5191768407821655,
+ "learning_rate": 7.29303687296232e-05,
+ "loss": 0.9343,
+ "step": 1760
+ },
+ {
+ "epoch": 1.7614954205870401,
+ "grad_norm": 0.5041552186012268,
+ "learning_rate": 7.282947268795877e-05,
+ "loss": 0.9369,
+ "step": 1761
+ },
+ {
+ "epoch": 1.7624957019161638,
+ "grad_norm": 0.4530990719795227,
+ "learning_rate": 7.272860650179006e-05,
+ "loss": 0.9629,
+ "step": 1762
+ },
+ {
+ "epoch": 1.7634959832452877,
+ "grad_norm": 0.42898643016815186,
+ "learning_rate": 7.262777028195081e-05,
+ "loss": 0.7658,
+ "step": 1763
+ },
+ {
+ "epoch": 1.7644962645744116,
+ "grad_norm": 0.4350574314594269,
+ "learning_rate": 7.252696413924174e-05,
+ "loss": 0.7273,
+ "step": 1764
+ },
+ {
+ "epoch": 1.7654965459035354,
+ "grad_norm": 0.517660915851593,
+ "learning_rate": 7.242618818443056e-05,
+ "loss": 0.9021,
+ "step": 1765
+ },
+ {
+ "epoch": 1.7664968272326593,
+ "grad_norm": 0.5530719757080078,
+ "learning_rate": 7.232544252825189e-05,
+ "loss": 0.8532,
+ "step": 1766
+ },
+ {
+ "epoch": 1.767497108561783,
+ "grad_norm": 0.41731134057044983,
+ "learning_rate": 7.222472728140695e-05,
+ "loss": 0.6834,
+ "step": 1767
+ },
+ {
+ "epoch": 1.7684973898909067,
+ "grad_norm": 0.4782492518424988,
+ "learning_rate": 7.212404255456357e-05,
+ "loss": 0.8692,
+ "step": 1768
+ },
+ {
+ "epoch": 1.7694976712200305,
+ "grad_norm": 0.5327005386352539,
+ "learning_rate": 7.202338845835606e-05,
+ "loss": 0.92,
+ "step": 1769
+ },
+ {
+ "epoch": 1.7704979525491544,
+ "grad_norm": 0.48882028460502625,
+ "learning_rate": 7.192276510338507e-05,
+ "loss": 0.8545,
+ "step": 1770
+ },
+ {
+ "epoch": 1.7714982338782783,
+ "grad_norm": 0.5156509280204773,
+ "learning_rate": 7.182217260021749e-05,
+ "loss": 0.9533,
+ "step": 1771
+ },
+ {
+ "epoch": 1.7724985152074022,
+ "grad_norm": 0.49955782294273376,
+ "learning_rate": 7.172161105938624e-05,
+ "loss": 0.7701,
+ "step": 1772
+ },
+ {
+ "epoch": 1.7734987965365259,
+ "grad_norm": 0.4707096219062805,
+ "learning_rate": 7.162108059139032e-05,
+ "loss": 0.9093,
+ "step": 1773
+ },
+ {
+ "epoch": 1.7744990778656498,
+ "grad_norm": 0.5026343464851379,
+ "learning_rate": 7.15205813066945e-05,
+ "loss": 1.0551,
+ "step": 1774
+ },
+ {
+ "epoch": 1.7754993591947734,
+ "grad_norm": 0.4696865975856781,
+ "learning_rate": 7.142011331572936e-05,
+ "loss": 0.8701,
+ "step": 1775
+ },
+ {
+ "epoch": 1.7764996405238973,
+ "grad_norm": 0.4939334988594055,
+ "learning_rate": 7.131967672889101e-05,
+ "loss": 0.9638,
+ "step": 1776
+ },
+ {
+ "epoch": 1.7774999218530212,
+ "grad_norm": 0.4661426842212677,
+ "learning_rate": 7.121927165654109e-05,
+ "loss": 0.8687,
+ "step": 1777
+ },
+ {
+ "epoch": 1.778500203182145,
+ "grad_norm": 0.48258379101753235,
+ "learning_rate": 7.111889820900664e-05,
+ "loss": 0.9335,
+ "step": 1778
+ },
+ {
+ "epoch": 1.7795004845112687,
+ "grad_norm": 0.4910578727722168,
+ "learning_rate": 7.101855649657991e-05,
+ "loss": 0.9632,
+ "step": 1779
+ },
+ {
+ "epoch": 1.7805007658403926,
+ "grad_norm": 0.46052396297454834,
+ "learning_rate": 7.091824662951827e-05,
+ "loss": 0.7958,
+ "step": 1780
+ },
+ {
+ "epoch": 1.7815010471695163,
+ "grad_norm": 0.4766314625740051,
+ "learning_rate": 7.08179687180442e-05,
+ "loss": 0.7427,
+ "step": 1781
+ },
+ {
+ "epoch": 1.7825013284986402,
+ "grad_norm": 0.4556989371776581,
+ "learning_rate": 7.071772287234497e-05,
+ "loss": 0.7899,
+ "step": 1782
+ },
+ {
+ "epoch": 1.783501609827764,
+ "grad_norm": 0.5186169743537903,
+ "learning_rate": 7.06175092025726e-05,
+ "loss": 0.9758,
+ "step": 1783
+ },
+ {
+ "epoch": 1.784501891156888,
+ "grad_norm": 0.5379285216331482,
+ "learning_rate": 7.051732781884378e-05,
+ "loss": 0.8966,
+ "step": 1784
+ },
+ {
+ "epoch": 1.7855021724860118,
+ "grad_norm": 0.520286500453949,
+ "learning_rate": 7.041717883123977e-05,
+ "loss": 0.9421,
+ "step": 1785
+ },
+ {
+ "epoch": 1.7865024538151355,
+ "grad_norm": 0.5489597916603088,
+ "learning_rate": 7.031706234980617e-05,
+ "loss": 0.936,
+ "step": 1786
+ },
+ {
+ "epoch": 1.7875027351442592,
+ "grad_norm": 0.5182730555534363,
+ "learning_rate": 7.021697848455291e-05,
+ "loss": 0.953,
+ "step": 1787
+ },
+ {
+ "epoch": 1.788503016473383,
+ "grad_norm": 0.5181865692138672,
+ "learning_rate": 7.011692734545403e-05,
+ "loss": 0.8688,
+ "step": 1788
+ },
+ {
+ "epoch": 1.789503297802507,
+ "grad_norm": 0.44486725330352783,
+ "learning_rate": 7.001690904244767e-05,
+ "loss": 0.8014,
+ "step": 1789
+ },
+ {
+ "epoch": 1.7905035791316308,
+ "grad_norm": 0.5337903499603271,
+ "learning_rate": 6.991692368543584e-05,
+ "loss": 0.9003,
+ "step": 1790
+ },
+ {
+ "epoch": 1.7915038604607547,
+ "grad_norm": 0.5147045254707336,
+ "learning_rate": 6.981697138428434e-05,
+ "loss": 0.9162,
+ "step": 1791
+ },
+ {
+ "epoch": 1.7925041417898784,
+ "grad_norm": 0.5204777121543884,
+ "learning_rate": 6.971705224882271e-05,
+ "loss": 0.8938,
+ "step": 1792
+ },
+ {
+ "epoch": 1.7935044231190023,
+ "grad_norm": 0.45608311891555786,
+ "learning_rate": 6.9617166388844e-05,
+ "loss": 0.7545,
+ "step": 1793
+ },
+ {
+ "epoch": 1.794504704448126,
+ "grad_norm": 0.47650712728500366,
+ "learning_rate": 6.951731391410468e-05,
+ "loss": 0.8237,
+ "step": 1794
+ },
+ {
+ "epoch": 1.7955049857772498,
+ "grad_norm": 0.5793735384941101,
+ "learning_rate": 6.94174949343246e-05,
+ "loss": 1.1272,
+ "step": 1795
+ },
+ {
+ "epoch": 1.7965052671063737,
+ "grad_norm": 0.4923813045024872,
+ "learning_rate": 6.931770955918674e-05,
+ "loss": 1.0535,
+ "step": 1796
+ },
+ {
+ "epoch": 1.7975055484354976,
+ "grad_norm": 0.515476405620575,
+ "learning_rate": 6.921795789833723e-05,
+ "loss": 0.986,
+ "step": 1797
+ },
+ {
+ "epoch": 1.7985058297646213,
+ "grad_norm": 0.4697955250740051,
+ "learning_rate": 6.911824006138503e-05,
+ "loss": 0.8236,
+ "step": 1798
+ },
+ {
+ "epoch": 1.7995061110937451,
+ "grad_norm": 0.48255470395088196,
+ "learning_rate": 6.901855615790206e-05,
+ "loss": 0.8308,
+ "step": 1799
+ },
+ {
+ "epoch": 1.8005063924228688,
+ "grad_norm": 0.5010727047920227,
+ "learning_rate": 6.891890629742288e-05,
+ "loss": 0.9338,
+ "step": 1800
+ },
+ {
+ "epoch": 1.8015066737519927,
+ "grad_norm": 0.5230937004089355,
+ "learning_rate": 6.88192905894447e-05,
+ "loss": 0.7253,
+ "step": 1801
+ },
+ {
+ "epoch": 1.8025069550811166,
+ "grad_norm": 0.4493248164653778,
+ "learning_rate": 6.871970914342712e-05,
+ "loss": 0.9527,
+ "step": 1802
+ },
+ {
+ "epoch": 1.8035072364102405,
+ "grad_norm": 0.4727008044719696,
+ "learning_rate": 6.862016206879216e-05,
+ "loss": 0.9527,
+ "step": 1803
+ },
+ {
+ "epoch": 1.8045075177393644,
+ "grad_norm": 0.457698255777359,
+ "learning_rate": 6.852064947492405e-05,
+ "loss": 0.8424,
+ "step": 1804
+ },
+ {
+ "epoch": 1.805507799068488,
+ "grad_norm": 0.6156003475189209,
+ "learning_rate": 6.842117147116913e-05,
+ "loss": 0.9988,
+ "step": 1805
+ },
+ {
+ "epoch": 1.8065080803976117,
+ "grad_norm": 0.5174852609634399,
+ "learning_rate": 6.832172816683575e-05,
+ "loss": 0.8635,
+ "step": 1806
+ },
+ {
+ "epoch": 1.8075083617267356,
+ "grad_norm": 0.5165886878967285,
+ "learning_rate": 6.82223196711941e-05,
+ "loss": 0.8721,
+ "step": 1807
+ },
+ {
+ "epoch": 1.8085086430558595,
+ "grad_norm": 0.4866868853569031,
+ "learning_rate": 6.812294609347615e-05,
+ "loss": 0.8819,
+ "step": 1808
+ },
+ {
+ "epoch": 1.8095089243849833,
+ "grad_norm": 0.4991300404071808,
+ "learning_rate": 6.802360754287547e-05,
+ "loss": 0.8644,
+ "step": 1809
+ },
+ {
+ "epoch": 1.8105092057141072,
+ "grad_norm": 0.501853346824646,
+ "learning_rate": 6.79243041285472e-05,
+ "loss": 0.824,
+ "step": 1810
+ },
+ {
+ "epoch": 1.811509487043231,
+ "grad_norm": 0.5272979140281677,
+ "learning_rate": 6.782503595960782e-05,
+ "loss": 1.0178,
+ "step": 1811
+ },
+ {
+ "epoch": 1.8125097683723548,
+ "grad_norm": 0.5986105799674988,
+ "learning_rate": 6.772580314513508e-05,
+ "loss": 0.949,
+ "step": 1812
+ },
+ {
+ "epoch": 1.8135100497014784,
+ "grad_norm": 0.5391054153442383,
+ "learning_rate": 6.762660579416791e-05,
+ "loss": 1.0698,
+ "step": 1813
+ },
+ {
+ "epoch": 1.8145103310306023,
+ "grad_norm": 0.48486262559890747,
+ "learning_rate": 6.752744401570625e-05,
+ "loss": 0.9986,
+ "step": 1814
+ },
+ {
+ "epoch": 1.8155106123597262,
+ "grad_norm": 0.5090842843055725,
+ "learning_rate": 6.742831791871096e-05,
+ "loss": 0.8459,
+ "step": 1815
+ },
+ {
+ "epoch": 1.81651089368885,
+ "grad_norm": 0.408403605222702,
+ "learning_rate": 6.732922761210369e-05,
+ "loss": 0.7093,
+ "step": 1816
+ },
+ {
+ "epoch": 1.8175111750179738,
+ "grad_norm": 0.5082786083221436,
+ "learning_rate": 6.723017320476679e-05,
+ "loss": 0.8289,
+ "step": 1817
+ },
+ {
+ "epoch": 1.8185114563470977,
+ "grad_norm": 0.4834018647670746,
+ "learning_rate": 6.713115480554313e-05,
+ "loss": 0.9766,
+ "step": 1818
+ },
+ {
+ "epoch": 1.8195117376762213,
+ "grad_norm": 0.5373227596282959,
+ "learning_rate": 6.7032172523236e-05,
+ "loss": 1.0396,
+ "step": 1819
+ },
+ {
+ "epoch": 1.8205120190053452,
+ "grad_norm": 0.49561604857444763,
+ "learning_rate": 6.693322646660906e-05,
+ "loss": 0.9774,
+ "step": 1820
+ },
+ {
+ "epoch": 1.821512300334469,
+ "grad_norm": 0.47309985756874084,
+ "learning_rate": 6.683431674438612e-05,
+ "loss": 0.8266,
+ "step": 1821
+ },
+ {
+ "epoch": 1.822512581663593,
+ "grad_norm": 0.5706244707107544,
+ "learning_rate": 6.673544346525107e-05,
+ "loss": 1.027,
+ "step": 1822
+ },
+ {
+ "epoch": 1.8235128629927169,
+ "grad_norm": 0.5383077263832092,
+ "learning_rate": 6.663660673784777e-05,
+ "loss": 1.0545,
+ "step": 1823
+ },
+ {
+ "epoch": 1.8245131443218405,
+ "grad_norm": 0.5760438442230225,
+ "learning_rate": 6.653780667077985e-05,
+ "loss": 0.8955,
+ "step": 1824
+ },
+ {
+ "epoch": 1.8255134256509642,
+ "grad_norm": 0.45533323287963867,
+ "learning_rate": 6.643904337261082e-05,
+ "loss": 0.9149,
+ "step": 1825
+ },
+ {
+ "epoch": 1.826513706980088,
+ "grad_norm": 0.43935853242874146,
+ "learning_rate": 6.634031695186362e-05,
+ "loss": 0.8231,
+ "step": 1826
+ },
+ {
+ "epoch": 1.827513988309212,
+ "grad_norm": 0.4752298593521118,
+ "learning_rate": 6.624162751702076e-05,
+ "loss": 0.7823,
+ "step": 1827
+ },
+ {
+ "epoch": 1.8285142696383359,
+ "grad_norm": 0.5012879371643066,
+ "learning_rate": 6.614297517652409e-05,
+ "loss": 0.9586,
+ "step": 1828
+ },
+ {
+ "epoch": 1.8295145509674597,
+ "grad_norm": 0.4421415328979492,
+ "learning_rate": 6.604436003877464e-05,
+ "loss": 0.7812,
+ "step": 1829
+ },
+ {
+ "epoch": 1.8305148322965834,
+ "grad_norm": 0.5347071290016174,
+ "learning_rate": 6.594578221213265e-05,
+ "loss": 0.8906,
+ "step": 1830
+ },
+ {
+ "epoch": 1.831515113625707,
+ "grad_norm": 0.5177352428436279,
+ "learning_rate": 6.58472418049173e-05,
+ "loss": 1.0046,
+ "step": 1831
+ },
+ {
+ "epoch": 1.832515394954831,
+ "grad_norm": 0.5403003096580505,
+ "learning_rate": 6.574873892540671e-05,
+ "loss": 0.9588,
+ "step": 1832
+ },
+ {
+ "epoch": 1.8335156762839548,
+ "grad_norm": 0.5138882994651794,
+ "learning_rate": 6.565027368183769e-05,
+ "loss": 0.9824,
+ "step": 1833
+ },
+ {
+ "epoch": 1.8345159576130787,
+ "grad_norm": 0.4976009726524353,
+ "learning_rate": 6.555184618240577e-05,
+ "loss": 0.9454,
+ "step": 1834
+ },
+ {
+ "epoch": 1.8355162389422026,
+ "grad_norm": 0.5282961130142212,
+ "learning_rate": 6.545345653526495e-05,
+ "loss": 1.0134,
+ "step": 1835
+ },
+ {
+ "epoch": 1.8365165202713263,
+ "grad_norm": 0.47592097520828247,
+ "learning_rate": 6.535510484852767e-05,
+ "loss": 0.9658,
+ "step": 1836
+ },
+ {
+ "epoch": 1.8375168016004502,
+ "grad_norm": 0.5012205839157104,
+ "learning_rate": 6.525679123026463e-05,
+ "loss": 0.8937,
+ "step": 1837
+ },
+ {
+ "epoch": 1.8385170829295738,
+ "grad_norm": 0.47777363657951355,
+ "learning_rate": 6.515851578850474e-05,
+ "loss": 0.9862,
+ "step": 1838
+ },
+ {
+ "epoch": 1.8395173642586977,
+ "grad_norm": 0.4610724449157715,
+ "learning_rate": 6.506027863123492e-05,
+ "loss": 0.9208,
+ "step": 1839
+ },
+ {
+ "epoch": 1.8405176455878216,
+ "grad_norm": 0.5747025609016418,
+ "learning_rate": 6.496207986640004e-05,
+ "loss": 0.9366,
+ "step": 1840
+ },
+ {
+ "epoch": 1.8415179269169455,
+ "grad_norm": 0.48486635088920593,
+ "learning_rate": 6.48639196019028e-05,
+ "loss": 0.7989,
+ "step": 1841
+ },
+ {
+ "epoch": 1.8425182082460692,
+ "grad_norm": 0.4930958151817322,
+ "learning_rate": 6.476579794560356e-05,
+ "loss": 0.7846,
+ "step": 1842
+ },
+ {
+ "epoch": 1.843518489575193,
+ "grad_norm": 0.5363168120384216,
+ "learning_rate": 6.46677150053203e-05,
+ "loss": 0.9519,
+ "step": 1843
+ },
+ {
+ "epoch": 1.8445187709043167,
+ "grad_norm": 0.39670878648757935,
+ "learning_rate": 6.45696708888284e-05,
+ "loss": 0.8245,
+ "step": 1844
+ },
+ {
+ "epoch": 1.8455190522334406,
+ "grad_norm": 0.5151652693748474,
+ "learning_rate": 6.447166570386063e-05,
+ "loss": 0.9517,
+ "step": 1845
+ },
+ {
+ "epoch": 1.8465193335625645,
+ "grad_norm": 0.47063514590263367,
+ "learning_rate": 6.437369955810699e-05,
+ "loss": 0.8235,
+ "step": 1846
+ },
+ {
+ "epoch": 1.8475196148916884,
+ "grad_norm": 0.5120642185211182,
+ "learning_rate": 6.42757725592145e-05,
+ "loss": 1.1862,
+ "step": 1847
+ },
+ {
+ "epoch": 1.8485198962208123,
+ "grad_norm": 0.491312175989151,
+ "learning_rate": 6.417788481478728e-05,
+ "loss": 0.8375,
+ "step": 1848
+ },
+ {
+ "epoch": 1.849520177549936,
+ "grad_norm": 0.5049518346786499,
+ "learning_rate": 6.40800364323862e-05,
+ "loss": 0.8419,
+ "step": 1849
+ },
+ {
+ "epoch": 1.8505204588790596,
+ "grad_norm": 0.4442373514175415,
+ "learning_rate": 6.398222751952899e-05,
+ "loss": 0.8519,
+ "step": 1850
+ },
+ {
+ "epoch": 1.8515207402081835,
+ "grad_norm": 0.5219951868057251,
+ "learning_rate": 6.388445818368991e-05,
+ "loss": 0.8146,
+ "step": 1851
+ },
+ {
+ "epoch": 1.8525210215373074,
+ "grad_norm": 0.5035893321037292,
+ "learning_rate": 6.378672853229981e-05,
+ "loss": 0.908,
+ "step": 1852
+ },
+ {
+ "epoch": 1.8535213028664312,
+ "grad_norm": 0.4742502272129059,
+ "learning_rate": 6.368903867274585e-05,
+ "loss": 0.9503,
+ "step": 1853
+ },
+ {
+ "epoch": 1.8545215841955551,
+ "grad_norm": 0.504763126373291,
+ "learning_rate": 6.35913887123716e-05,
+ "loss": 0.8487,
+ "step": 1854
+ },
+ {
+ "epoch": 1.8555218655246788,
+ "grad_norm": 0.5125763416290283,
+ "learning_rate": 6.34937787584767e-05,
+ "loss": 0.9596,
+ "step": 1855
+ },
+ {
+ "epoch": 1.8565221468538027,
+ "grad_norm": 1.090164065361023,
+ "learning_rate": 6.339620891831678e-05,
+ "loss": 0.8088,
+ "step": 1856
+ },
+ {
+ "epoch": 1.8575224281829263,
+ "grad_norm": 0.4670305550098419,
+ "learning_rate": 6.329867929910347e-05,
+ "loss": 0.9748,
+ "step": 1857
+ },
+ {
+ "epoch": 1.8585227095120502,
+ "grad_norm": 0.49796226620674133,
+ "learning_rate": 6.32011900080042e-05,
+ "loss": 0.7566,
+ "step": 1858
+ },
+ {
+ "epoch": 1.8595229908411741,
+ "grad_norm": 0.5040385723114014,
+ "learning_rate": 6.310374115214204e-05,
+ "loss": 0.8959,
+ "step": 1859
+ },
+ {
+ "epoch": 1.860523272170298,
+ "grad_norm": 0.5290741920471191,
+ "learning_rate": 6.30063328385957e-05,
+ "loss": 0.9035,
+ "step": 1860
+ },
+ {
+ "epoch": 1.8615235534994217,
+ "grad_norm": 0.5860772728919983,
+ "learning_rate": 6.290896517439925e-05,
+ "loss": 1.0858,
+ "step": 1861
+ },
+ {
+ "epoch": 1.8625238348285456,
+ "grad_norm": 0.4714392423629761,
+ "learning_rate": 6.281163826654218e-05,
+ "loss": 0.9652,
+ "step": 1862
+ },
+ {
+ "epoch": 1.8635241161576692,
+ "grad_norm": 0.4995323717594147,
+ "learning_rate": 6.271435222196916e-05,
+ "loss": 0.9808,
+ "step": 1863
+ },
+ {
+ "epoch": 1.864524397486793,
+ "grad_norm": 0.5379069447517395,
+ "learning_rate": 6.261710714757994e-05,
+ "loss": 0.9893,
+ "step": 1864
+ },
+ {
+ "epoch": 1.865524678815917,
+ "grad_norm": 0.5350576043128967,
+ "learning_rate": 6.251990315022927e-05,
+ "loss": 1.1355,
+ "step": 1865
+ },
+ {
+ "epoch": 1.8665249601450409,
+ "grad_norm": 0.5047613382339478,
+ "learning_rate": 6.24227403367268e-05,
+ "loss": 1.0258,
+ "step": 1866
+ },
+ {
+ "epoch": 1.8675252414741648,
+ "grad_norm": 0.5068250894546509,
+ "learning_rate": 6.232561881383687e-05,
+ "loss": 1.0832,
+ "step": 1867
+ },
+ {
+ "epoch": 1.8685255228032884,
+ "grad_norm": 0.5315554738044739,
+ "learning_rate": 6.222853868827839e-05,
+ "loss": 0.8883,
+ "step": 1868
+ },
+ {
+ "epoch": 1.869525804132412,
+ "grad_norm": 0.47088900208473206,
+ "learning_rate": 6.213150006672499e-05,
+ "loss": 1.0515,
+ "step": 1869
+ },
+ {
+ "epoch": 1.870526085461536,
+ "grad_norm": 0.450911283493042,
+ "learning_rate": 6.20345030558045e-05,
+ "loss": 0.8933,
+ "step": 1870
+ },
+ {
+ "epoch": 1.8715263667906599,
+ "grad_norm": 0.5612505674362183,
+ "learning_rate": 6.193754776209911e-05,
+ "loss": 0.8822,
+ "step": 1871
+ },
+ {
+ "epoch": 1.8725266481197838,
+ "grad_norm": 0.6027489900588989,
+ "learning_rate": 6.184063429214515e-05,
+ "loss": 0.849,
+ "step": 1872
+ },
+ {
+ "epoch": 1.8735269294489076,
+ "grad_norm": 0.5527409315109253,
+ "learning_rate": 6.174376275243299e-05,
+ "loss": 0.9841,
+ "step": 1873
+ },
+ {
+ "epoch": 1.8745272107780313,
+ "grad_norm": 0.4340353310108185,
+ "learning_rate": 6.164693324940694e-05,
+ "loss": 0.7171,
+ "step": 1874
+ },
+ {
+ "epoch": 1.8755274921071552,
+ "grad_norm": 0.46394628286361694,
+ "learning_rate": 6.15501458894651e-05,
+ "loss": 0.8439,
+ "step": 1875
+ },
+ {
+ "epoch": 1.8765277734362789,
+ "grad_norm": 0.46649280190467834,
+ "learning_rate": 6.145340077895929e-05,
+ "loss": 0.9665,
+ "step": 1876
+ },
+ {
+ "epoch": 1.8775280547654027,
+ "grad_norm": 0.45841577649116516,
+ "learning_rate": 6.135669802419488e-05,
+ "loss": 0.8537,
+ "step": 1877
+ },
+ {
+ "epoch": 1.8785283360945266,
+ "grad_norm": 0.46763482689857483,
+ "learning_rate": 6.126003773143072e-05,
+ "loss": 0.8314,
+ "step": 1878
+ },
+ {
+ "epoch": 1.8795286174236505,
+ "grad_norm": 0.47747811675071716,
+ "learning_rate": 6.116342000687896e-05,
+ "loss": 0.9612,
+ "step": 1879
+ },
+ {
+ "epoch": 1.8805288987527742,
+ "grad_norm": 0.5145304203033447,
+ "learning_rate": 6.106684495670506e-05,
+ "loss": 0.9628,
+ "step": 1880
+ },
+ {
+ "epoch": 1.881529180081898,
+ "grad_norm": 0.4443700313568115,
+ "learning_rate": 6.097031268702746e-05,
+ "loss": 0.7482,
+ "step": 1881
+ },
+ {
+ "epoch": 1.8825294614110217,
+ "grad_norm": 0.4143758714199066,
+ "learning_rate": 6.087382330391774e-05,
+ "loss": 0.6993,
+ "step": 1882
+ },
+ {
+ "epoch": 1.8835297427401456,
+ "grad_norm": 0.5006669759750366,
+ "learning_rate": 6.077737691340023e-05,
+ "loss": 0.8514,
+ "step": 1883
+ },
+ {
+ "epoch": 1.8845300240692695,
+ "grad_norm": 0.48067471385002136,
+ "learning_rate": 6.0680973621452105e-05,
+ "loss": 0.8319,
+ "step": 1884
+ },
+ {
+ "epoch": 1.8855303053983934,
+ "grad_norm": 0.47147560119628906,
+ "learning_rate": 6.0584613534003144e-05,
+ "loss": 0.9822,
+ "step": 1885
+ },
+ {
+ "epoch": 1.8865305867275173,
+ "grad_norm": 0.48229023814201355,
+ "learning_rate": 6.0488296756935636e-05,
+ "loss": 0.8972,
+ "step": 1886
+ },
+ {
+ "epoch": 1.887530868056641,
+ "grad_norm": 0.487932026386261,
+ "learning_rate": 6.039202339608432e-05,
+ "loss": 0.8976,
+ "step": 1887
+ },
+ {
+ "epoch": 1.8885311493857646,
+ "grad_norm": 0.49332642555236816,
+ "learning_rate": 6.0295793557236203e-05,
+ "loss": 0.8734,
+ "step": 1888
+ },
+ {
+ "epoch": 1.8895314307148885,
+ "grad_norm": 0.4834604263305664,
+ "learning_rate": 6.019960734613047e-05,
+ "loss": 0.8414,
+ "step": 1889
+ },
+ {
+ "epoch": 1.8905317120440124,
+ "grad_norm": 0.46540340781211853,
+ "learning_rate": 6.010346486845837e-05,
+ "loss": 0.848,
+ "step": 1890
+ },
+ {
+ "epoch": 1.8915319933731363,
+ "grad_norm": 0.4350258409976959,
+ "learning_rate": 6.0007366229863117e-05,
+ "loss": 0.8143,
+ "step": 1891
+ },
+ {
+ "epoch": 1.8925322747022602,
+ "grad_norm": 0.4675842821598053,
+ "learning_rate": 5.991131153593971e-05,
+ "loss": 0.8136,
+ "step": 1892
+ },
+ {
+ "epoch": 1.8935325560313838,
+ "grad_norm": 0.560526430606842,
+ "learning_rate": 5.981530089223489e-05,
+ "loss": 1.097,
+ "step": 1893
+ },
+ {
+ "epoch": 1.8945328373605075,
+ "grad_norm": 0.48588961362838745,
+ "learning_rate": 5.971933440424703e-05,
+ "loss": 0.8046,
+ "step": 1894
+ },
+ {
+ "epoch": 1.8955331186896314,
+ "grad_norm": 0.47677376866340637,
+ "learning_rate": 5.9623412177425886e-05,
+ "loss": 0.8202,
+ "step": 1895
+ },
+ {
+ "epoch": 1.8965334000187553,
+ "grad_norm": 0.49400967359542847,
+ "learning_rate": 5.952753431717268e-05,
+ "loss": 0.8114,
+ "step": 1896
+ },
+ {
+ "epoch": 1.8975336813478791,
+ "grad_norm": 0.4729720652103424,
+ "learning_rate": 5.9431700928839805e-05,
+ "loss": 0.7848,
+ "step": 1897
+ },
+ {
+ "epoch": 1.898533962677003,
+ "grad_norm": 0.4910169541835785,
+ "learning_rate": 5.933591211773082e-05,
+ "loss": 0.8424,
+ "step": 1898
+ },
+ {
+ "epoch": 1.8995342440061267,
+ "grad_norm": 0.4618901014328003,
+ "learning_rate": 5.924016798910037e-05,
+ "loss": 0.8423,
+ "step": 1899
+ },
+ {
+ "epoch": 1.9005345253352506,
+ "grad_norm": 0.562754213809967,
+ "learning_rate": 5.914446864815388e-05,
+ "loss": 0.8016,
+ "step": 1900
+ },
+ {
+ "epoch": 1.9015348066643742,
+ "grad_norm": 0.479568749666214,
+ "learning_rate": 5.9048814200047675e-05,
+ "loss": 0.9471,
+ "step": 1901
+ },
+ {
+ "epoch": 1.9025350879934981,
+ "grad_norm": 0.5435795187950134,
+ "learning_rate": 5.895320474988864e-05,
+ "loss": 0.94,
+ "step": 1902
+ },
+ {
+ "epoch": 1.903535369322622,
+ "grad_norm": 0.711804211139679,
+ "learning_rate": 5.885764040273426e-05,
+ "loss": 0.9192,
+ "step": 1903
+ },
+ {
+ "epoch": 1.904535650651746,
+ "grad_norm": 0.49941113591194153,
+ "learning_rate": 5.876212126359251e-05,
+ "loss": 0.8541,
+ "step": 1904
+ },
+ {
+ "epoch": 1.9055359319808696,
+ "grad_norm": 0.4437618553638458,
+ "learning_rate": 5.866664743742162e-05,
+ "loss": 0.935,
+ "step": 1905
+ },
+ {
+ "epoch": 1.9065362133099935,
+ "grad_norm": 0.4949079751968384,
+ "learning_rate": 5.857121902913008e-05,
+ "loss": 0.965,
+ "step": 1906
+ },
+ {
+ "epoch": 1.9075364946391171,
+ "grad_norm": 0.5047593712806702,
+ "learning_rate": 5.8475836143576433e-05,
+ "loss": 0.9078,
+ "step": 1907
+ },
+ {
+ "epoch": 1.908536775968241,
+ "grad_norm": 0.4645143449306488,
+ "learning_rate": 5.838049888556925e-05,
+ "loss": 0.7848,
+ "step": 1908
+ },
+ {
+ "epoch": 1.909537057297365,
+ "grad_norm": 0.45980706810951233,
+ "learning_rate": 5.8285207359866936e-05,
+ "loss": 0.8297,
+ "step": 1909
+ },
+ {
+ "epoch": 1.9105373386264888,
+ "grad_norm": 0.566573441028595,
+ "learning_rate": 5.8189961671177574e-05,
+ "loss": 1.0823,
+ "step": 1910
+ },
+ {
+ "epoch": 1.9115376199556127,
+ "grad_norm": 0.46022629737854004,
+ "learning_rate": 5.809476192415905e-05,
+ "loss": 0.8634,
+ "step": 1911
+ },
+ {
+ "epoch": 1.9125379012847363,
+ "grad_norm": 0.493632048368454,
+ "learning_rate": 5.7999608223418534e-05,
+ "loss": 0.7264,
+ "step": 1912
+ },
+ {
+ "epoch": 1.91353818261386,
+ "grad_norm": 0.4561927616596222,
+ "learning_rate": 5.790450067351291e-05,
+ "loss": 0.8736,
+ "step": 1913
+ },
+ {
+ "epoch": 1.9145384639429839,
+ "grad_norm": 0.5217312574386597,
+ "learning_rate": 5.780943937894805e-05,
+ "loss": 0.9918,
+ "step": 1914
+ },
+ {
+ "epoch": 1.9155387452721078,
+ "grad_norm": 0.500164806842804,
+ "learning_rate": 5.771442444417918e-05,
+ "loss": 0.784,
+ "step": 1915
+ },
+ {
+ "epoch": 1.9165390266012317,
+ "grad_norm": 0.4723392724990845,
+ "learning_rate": 5.761945597361054e-05,
+ "loss": 0.9225,
+ "step": 1916
+ },
+ {
+ "epoch": 1.9175393079303555,
+ "grad_norm": 0.4722166359424591,
+ "learning_rate": 5.752453407159522e-05,
+ "loss": 0.8516,
+ "step": 1917
+ },
+ {
+ "epoch": 1.9185395892594792,
+ "grad_norm": 0.4163341522216797,
+ "learning_rate": 5.742965884243532e-05,
+ "loss": 0.7709,
+ "step": 1918
+ },
+ {
+ "epoch": 1.919539870588603,
+ "grad_norm": 0.5236088037490845,
+ "learning_rate": 5.733483039038149e-05,
+ "loss": 0.9662,
+ "step": 1919
+ },
+ {
+ "epoch": 1.9205401519177268,
+ "grad_norm": 0.5264710783958435,
+ "learning_rate": 5.724004881963311e-05,
+ "loss": 0.8918,
+ "step": 1920
+ },
+ {
+ "epoch": 1.9215404332468506,
+ "grad_norm": 0.43993479013442993,
+ "learning_rate": 5.714531423433791e-05,
+ "loss": 0.9233,
+ "step": 1921
+ },
+ {
+ "epoch": 1.9225407145759745,
+ "grad_norm": 0.4552697241306305,
+ "learning_rate": 5.705062673859216e-05,
+ "loss": 0.8593,
+ "step": 1922
+ },
+ {
+ "epoch": 1.9235409959050984,
+ "grad_norm": 0.5186688899993896,
+ "learning_rate": 5.69559864364402e-05,
+ "loss": 0.906,
+ "step": 1923
+ },
+ {
+ "epoch": 1.924541277234222,
+ "grad_norm": 0.5140933990478516,
+ "learning_rate": 5.6861393431874675e-05,
+ "loss": 1.0488,
+ "step": 1924
+ },
+ {
+ "epoch": 1.925541558563346,
+ "grad_norm": 0.4874193072319031,
+ "learning_rate": 5.676684782883615e-05,
+ "loss": 0.8875,
+ "step": 1925
+ },
+ {
+ "epoch": 1.9265418398924696,
+ "grad_norm": 0.5220529437065125,
+ "learning_rate": 5.667234973121317e-05,
+ "loss": 0.8561,
+ "step": 1926
+ },
+ {
+ "epoch": 1.9275421212215935,
+ "grad_norm": 0.43269822001457214,
+ "learning_rate": 5.6577899242842025e-05,
+ "loss": 0.9039,
+ "step": 1927
+ },
+ {
+ "epoch": 1.9285424025507174,
+ "grad_norm": 0.5126697421073914,
+ "learning_rate": 5.648349646750673e-05,
+ "loss": 0.941,
+ "step": 1928
+ },
+ {
+ "epoch": 1.9295426838798413,
+ "grad_norm": 0.5042800307273865,
+ "learning_rate": 5.6389141508938903e-05,
+ "loss": 0.9901,
+ "step": 1929
+ },
+ {
+ "epoch": 1.9305429652089652,
+ "grad_norm": 0.5381462574005127,
+ "learning_rate": 5.629483447081751e-05,
+ "loss": 0.9661,
+ "step": 1930
+ },
+ {
+ "epoch": 1.9315432465380888,
+ "grad_norm": 0.5455595254898071,
+ "learning_rate": 5.620057545676901e-05,
+ "loss": 0.8618,
+ "step": 1931
+ },
+ {
+ "epoch": 1.9325435278672125,
+ "grad_norm": 0.45534226298332214,
+ "learning_rate": 5.610636457036693e-05,
+ "loss": 0.827,
+ "step": 1932
+ },
+ {
+ "epoch": 1.9335438091963364,
+ "grad_norm": 0.4841485917568207,
+ "learning_rate": 5.601220191513208e-05,
+ "loss": 0.8979,
+ "step": 1933
+ },
+ {
+ "epoch": 1.9345440905254603,
+ "grad_norm": 0.4711385667324066,
+ "learning_rate": 5.591808759453214e-05,
+ "loss": 0.9841,
+ "step": 1934
+ },
+ {
+ "epoch": 1.9355443718545842,
+ "grad_norm": 0.44583311676979065,
+ "learning_rate": 5.5824021711981686e-05,
+ "loss": 0.7455,
+ "step": 1935
+ },
+ {
+ "epoch": 1.936544653183708,
+ "grad_norm": 0.5186678171157837,
+ "learning_rate": 5.573000437084221e-05,
+ "loss": 0.8881,
+ "step": 1936
+ },
+ {
+ "epoch": 1.9375449345128317,
+ "grad_norm": 0.5111430287361145,
+ "learning_rate": 5.563603567442168e-05,
+ "loss": 0.97,
+ "step": 1937
+ },
+ {
+ "epoch": 1.9385452158419556,
+ "grad_norm": 0.5185840129852295,
+ "learning_rate": 5.554211572597477e-05,
+ "loss": 0.9864,
+ "step": 1938
+ },
+ {
+ "epoch": 1.9395454971710793,
+ "grad_norm": 0.590919554233551,
+ "learning_rate": 5.544824462870244e-05,
+ "loss": 0.9917,
+ "step": 1939
+ },
+ {
+ "epoch": 1.9405457785002032,
+ "grad_norm": 0.5174764394760132,
+ "learning_rate": 5.5354422485752125e-05,
+ "loss": 0.91,
+ "step": 1940
+ },
+ {
+ "epoch": 1.941546059829327,
+ "grad_norm": 0.4679591953754425,
+ "learning_rate": 5.5260649400217326e-05,
+ "loss": 0.8743,
+ "step": 1941
+ },
+ {
+ "epoch": 1.942546341158451,
+ "grad_norm": 0.5026495456695557,
+ "learning_rate": 5.5166925475137735e-05,
+ "loss": 0.9652,
+ "step": 1942
+ },
+ {
+ "epoch": 1.9435466224875746,
+ "grad_norm": 0.48180490732192993,
+ "learning_rate": 5.507325081349903e-05,
+ "loss": 0.9213,
+ "step": 1943
+ },
+ {
+ "epoch": 1.9445469038166985,
+ "grad_norm": 0.467143714427948,
+ "learning_rate": 5.497962551823266e-05,
+ "loss": 0.901,
+ "step": 1944
+ },
+ {
+ "epoch": 1.9455471851458221,
+ "grad_norm": 0.4535980820655823,
+ "learning_rate": 5.488604969221597e-05,
+ "loss": 0.8284,
+ "step": 1945
+ },
+ {
+ "epoch": 1.946547466474946,
+ "grad_norm": 0.5203812122344971,
+ "learning_rate": 5.479252343827178e-05,
+ "loss": 0.8001,
+ "step": 1946
+ },
+ {
+ "epoch": 1.94754774780407,
+ "grad_norm": 0.4892285168170929,
+ "learning_rate": 5.469904685916861e-05,
+ "loss": 0.7415,
+ "step": 1947
+ },
+ {
+ "epoch": 1.9485480291331938,
+ "grad_norm": 0.5130967497825623,
+ "learning_rate": 5.460562005762024e-05,
+ "loss": 0.9661,
+ "step": 1948
+ },
+ {
+ "epoch": 1.9495483104623177,
+ "grad_norm": 0.47101548314094543,
+ "learning_rate": 5.4512243136285915e-05,
+ "loss": 0.85,
+ "step": 1949
+ },
+ {
+ "epoch": 1.9505485917914414,
+ "grad_norm": 0.4335457384586334,
+ "learning_rate": 5.441891619776987e-05,
+ "loss": 0.8406,
+ "step": 1950
+ },
+ {
+ "epoch": 1.951548873120565,
+ "grad_norm": 0.45771148800849915,
+ "learning_rate": 5.432563934462166e-05,
+ "loss": 0.9252,
+ "step": 1951
+ },
+ {
+ "epoch": 1.952549154449689,
+ "grad_norm": 0.5619480013847351,
+ "learning_rate": 5.423241267933557e-05,
+ "loss": 0.844,
+ "step": 1952
+ },
+ {
+ "epoch": 1.9535494357788128,
+ "grad_norm": 0.4204142391681671,
+ "learning_rate": 5.4139236304350935e-05,
+ "loss": 0.8829,
+ "step": 1953
+ },
+ {
+ "epoch": 1.9545497171079367,
+ "grad_norm": 0.4862264394760132,
+ "learning_rate": 5.404611032205169e-05,
+ "loss": 0.9882,
+ "step": 1954
+ },
+ {
+ "epoch": 1.9555499984370606,
+ "grad_norm": 0.46490079164505005,
+ "learning_rate": 5.3953034834766416e-05,
+ "loss": 0.978,
+ "step": 1955
+ },
+ {
+ "epoch": 1.9565502797661842,
+ "grad_norm": 0.5944529175758362,
+ "learning_rate": 5.386000994476832e-05,
+ "loss": 0.8706,
+ "step": 1956
+ },
+ {
+ "epoch": 1.957550561095308,
+ "grad_norm": 0.5310636162757874,
+ "learning_rate": 5.376703575427481e-05,
+ "loss": 0.9472,
+ "step": 1957
+ },
+ {
+ "epoch": 1.9585508424244318,
+ "grad_norm": 0.49689510464668274,
+ "learning_rate": 5.367411236544786e-05,
+ "loss": 1.0081,
+ "step": 1958
+ },
+ {
+ "epoch": 1.9595511237535557,
+ "grad_norm": 0.5658974647521973,
+ "learning_rate": 5.3581239880393375e-05,
+ "loss": 1.0419,
+ "step": 1959
+ },
+ {
+ "epoch": 1.9605514050826796,
+ "grad_norm": 0.4068913757801056,
+ "learning_rate": 5.3488418401161475e-05,
+ "loss": 0.7635,
+ "step": 1960
+ },
+ {
+ "epoch": 1.9615516864118034,
+ "grad_norm": 0.6318438053131104,
+ "learning_rate": 5.339564802974615e-05,
+ "loss": 0.8508,
+ "step": 1961
+ },
+ {
+ "epoch": 1.962551967740927,
+ "grad_norm": 0.5346115827560425,
+ "learning_rate": 5.33029288680852e-05,
+ "loss": 0.8885,
+ "step": 1962
+ },
+ {
+ "epoch": 1.963552249070051,
+ "grad_norm": 0.4659571051597595,
+ "learning_rate": 5.321026101806032e-05,
+ "loss": 0.8957,
+ "step": 1963
+ },
+ {
+ "epoch": 1.9645525303991747,
+ "grad_norm": 0.502803385257721,
+ "learning_rate": 5.311764458149664e-05,
+ "loss": 0.8807,
+ "step": 1964
+ },
+ {
+ "epoch": 1.9655528117282985,
+ "grad_norm": 0.482771098613739,
+ "learning_rate": 5.302507966016295e-05,
+ "loss": 0.9404,
+ "step": 1965
+ },
+ {
+ "epoch": 1.9665530930574224,
+ "grad_norm": 0.48509371280670166,
+ "learning_rate": 5.293256635577126e-05,
+ "loss": 0.8903,
+ "step": 1966
+ },
+ {
+ "epoch": 1.9675533743865463,
+ "grad_norm": 0.5044885873794556,
+ "learning_rate": 5.284010476997705e-05,
+ "loss": 0.8193,
+ "step": 1967
+ },
+ {
+ "epoch": 1.96855365571567,
+ "grad_norm": 0.5143056511878967,
+ "learning_rate": 5.274769500437882e-05,
+ "loss": 0.9903,
+ "step": 1968
+ },
+ {
+ "epoch": 1.9695539370447939,
+ "grad_norm": 0.4803191125392914,
+ "learning_rate": 5.265533716051825e-05,
+ "loss": 0.815,
+ "step": 1969
+ },
+ {
+ "epoch": 1.9705542183739175,
+ "grad_norm": 0.4977998435497284,
+ "learning_rate": 5.256303133987982e-05,
+ "loss": 0.8749,
+ "step": 1970
+ },
+ {
+ "epoch": 1.9715544997030414,
+ "grad_norm": 0.4953812062740326,
+ "learning_rate": 5.247077764389099e-05,
+ "loss": 0.8228,
+ "step": 1971
+ },
+ {
+ "epoch": 1.9725547810321653,
+ "grad_norm": 0.4795776903629303,
+ "learning_rate": 5.2378576173921934e-05,
+ "loss": 0.8692,
+ "step": 1972
+ },
+ {
+ "epoch": 1.9735550623612892,
+ "grad_norm": 0.6318855881690979,
+ "learning_rate": 5.22864270312853e-05,
+ "loss": 1.0387,
+ "step": 1973
+ },
+ {
+ "epoch": 1.974555343690413,
+ "grad_norm": 0.4658355116844177,
+ "learning_rate": 5.219433031723641e-05,
+ "loss": 0.7585,
+ "step": 1974
+ },
+ {
+ "epoch": 1.9755556250195367,
+ "grad_norm": 0.41826239228248596,
+ "learning_rate": 5.210228613297281e-05,
+ "loss": 0.7991,
+ "step": 1975
+ },
+ {
+ "epoch": 1.9765559063486604,
+ "grad_norm": 0.4662337005138397,
+ "learning_rate": 5.201029457963451e-05,
+ "loss": 0.9127,
+ "step": 1976
+ },
+ {
+ "epoch": 1.9775561876777843,
+ "grad_norm": 0.4976811408996582,
+ "learning_rate": 5.191835575830352e-05,
+ "loss": 1.104,
+ "step": 1977
+ },
+ {
+ "epoch": 1.9785564690069082,
+ "grad_norm": 0.5814425945281982,
+ "learning_rate": 5.1826469770004026e-05,
+ "loss": 0.9479,
+ "step": 1978
+ },
+ {
+ "epoch": 1.979556750336032,
+ "grad_norm": 0.5100698471069336,
+ "learning_rate": 5.1734636715702043e-05,
+ "loss": 0.8778,
+ "step": 1979
+ },
+ {
+ "epoch": 1.980557031665156,
+ "grad_norm": 0.5200473070144653,
+ "learning_rate": 5.1642856696305575e-05,
+ "loss": 0.9684,
+ "step": 1980
+ },
+ {
+ "epoch": 1.9815573129942796,
+ "grad_norm": 0.728875458240509,
+ "learning_rate": 5.155112981266422e-05,
+ "loss": 1.1052,
+ "step": 1981
+ },
+ {
+ "epoch": 1.9825575943234035,
+ "grad_norm": 0.504478931427002,
+ "learning_rate": 5.145945616556921e-05,
+ "loss": 0.9089,
+ "step": 1982
+ },
+ {
+ "epoch": 1.9835578756525272,
+ "grad_norm": 0.48226889967918396,
+ "learning_rate": 5.136783585575336e-05,
+ "loss": 0.8765,
+ "step": 1983
+ },
+ {
+ "epoch": 1.984558156981651,
+ "grad_norm": 0.47124215960502625,
+ "learning_rate": 5.127626898389075e-05,
+ "loss": 0.8909,
+ "step": 1984
+ },
+ {
+ "epoch": 1.985558438310775,
+ "grad_norm": 0.4892251491546631,
+ "learning_rate": 5.118475565059691e-05,
+ "loss": 0.7596,
+ "step": 1985
+ },
+ {
+ "epoch": 1.9865587196398988,
+ "grad_norm": 0.550346851348877,
+ "learning_rate": 5.109329595642829e-05,
+ "loss": 1.0297,
+ "step": 1986
+ },
+ {
+ "epoch": 1.9875590009690225,
+ "grad_norm": 0.5535497069358826,
+ "learning_rate": 5.1001890001882734e-05,
+ "loss": 0.8995,
+ "step": 1987
+ },
+ {
+ "epoch": 1.9885592822981464,
+ "grad_norm": 0.4945215582847595,
+ "learning_rate": 5.091053788739878e-05,
+ "loss": 0.8223,
+ "step": 1988
+ },
+ {
+ "epoch": 1.98955956362727,
+ "grad_norm": 0.46689140796661377,
+ "learning_rate": 5.081923971335582e-05,
+ "loss": 0.7746,
+ "step": 1989
+ },
+ {
+ "epoch": 1.990559844956394,
+ "grad_norm": 0.46213075518608093,
+ "learning_rate": 5.072799558007415e-05,
+ "loss": 0.9981,
+ "step": 1990
+ },
+ {
+ "epoch": 1.9915601262855178,
+ "grad_norm": 0.4265044629573822,
+ "learning_rate": 5.063680558781445e-05,
+ "loss": 0.7414,
+ "step": 1991
+ },
+ {
+ "epoch": 1.9925604076146417,
+ "grad_norm": 0.47252804040908813,
+ "learning_rate": 5.0545669836778144e-05,
+ "loss": 0.9779,
+ "step": 1992
+ },
+ {
+ "epoch": 1.9935606889437656,
+ "grad_norm": 0.49390360713005066,
+ "learning_rate": 5.045458842710684e-05,
+ "loss": 1.047,
+ "step": 1993
+ },
+ {
+ "epoch": 1.9945609702728893,
+ "grad_norm": 0.48533156514167786,
+ "learning_rate": 5.036356145888263e-05,
+ "loss": 0.784,
+ "step": 1994
+ },
+ {
+ "epoch": 1.995561251602013,
+ "grad_norm": 0.4855436086654663,
+ "learning_rate": 5.0272589032127594e-05,
+ "loss": 1.0186,
+ "step": 1995
+ },
+ {
+ "epoch": 1.9965615329311368,
+ "grad_norm": 0.48796966671943665,
+ "learning_rate": 5.0181671246804064e-05,
+ "loss": 0.931,
+ "step": 1996
+ },
+ {
+ "epoch": 1.9975618142602607,
+ "grad_norm": 0.476491242647171,
+ "learning_rate": 5.009080820281415e-05,
+ "loss": 0.7653,
+ "step": 1997
+ },
+ {
+ "epoch": 1.9985620955893846,
+ "grad_norm": 0.48085761070251465,
+ "learning_rate": 5.000000000000002e-05,
+ "loss": 0.7846,
+ "step": 1998
+ },
+ {
+ "epoch": 1.9995623769185085,
+ "grad_norm": 0.454314261674881,
+ "learning_rate": 4.990924673814336e-05,
+ "loss": 0.8582,
+ "step": 1999
+ },
+ {
+ "epoch": 2.0005626582476324,
+ "grad_norm": 0.3911774158477783,
+ "learning_rate": 4.981854851696568e-05,
+ "loss": 0.5936,
+ "step": 2000
+ },
+ {
+ "epoch": 2.001562939576756,
+ "grad_norm": 0.44177675247192383,
+ "learning_rate": 4.972790543612783e-05,
+ "loss": 0.7308,
+ "step": 2001
+ },
+ {
+ "epoch": 2.0025632209058797,
+ "grad_norm": 0.43666279315948486,
+ "learning_rate": 4.963731759523022e-05,
+ "loss": 0.7415,
+ "step": 2002
+ },
+ {
+ "epoch": 2.0035635022350036,
+ "grad_norm": 0.4072078466415405,
+ "learning_rate": 4.954678509381253e-05,
+ "loss": 0.5694,
+ "step": 2003
+ },
+ {
+ "epoch": 2.0045637835641275,
+ "grad_norm": 0.4740023612976074,
+ "learning_rate": 4.945630803135354e-05,
+ "loss": 0.608,
+ "step": 2004
+ },
+ {
+ "epoch": 2.0055640648932513,
+ "grad_norm": 0.37969714403152466,
+ "learning_rate": 4.9365886507271243e-05,
+ "loss": 0.5037,
+ "step": 2005
+ },
+ {
+ "epoch": 2.0065643462223752,
+ "grad_norm": 0.4192529618740082,
+ "learning_rate": 4.9275520620922477e-05,
+ "loss": 0.7473,
+ "step": 2006
+ },
+ {
+ "epoch": 2.0075646275514987,
+ "grad_norm": 0.42636537551879883,
+ "learning_rate": 4.918521047160308e-05,
+ "loss": 0.5791,
+ "step": 2007
+ },
+ {
+ "epoch": 2.0085649088806226,
+ "grad_norm": 0.44065889716148376,
+ "learning_rate": 4.9094956158547535e-05,
+ "loss": 0.571,
+ "step": 2008
+ },
+ {
+ "epoch": 2.0095651902097464,
+ "grad_norm": 0.4889744520187378,
+ "learning_rate": 4.900475778092897e-05,
+ "loss": 0.6856,
+ "step": 2009
+ },
+ {
+ "epoch": 2.0105654715388703,
+ "grad_norm": 0.4938597083091736,
+ "learning_rate": 4.891461543785917e-05,
+ "loss": 0.7002,
+ "step": 2010
+ },
+ {
+ "epoch": 2.011565752867994,
+ "grad_norm": 0.45282644033432007,
+ "learning_rate": 4.882452922838818e-05,
+ "loss": 0.6107,
+ "step": 2011
+ },
+ {
+ "epoch": 2.012566034197118,
+ "grad_norm": 0.3883883059024811,
+ "learning_rate": 4.873449925150455e-05,
+ "loss": 0.5482,
+ "step": 2012
+ },
+ {
+ "epoch": 2.0135663155262415,
+ "grad_norm": 0.4271782636642456,
+ "learning_rate": 4.864452560613485e-05,
+ "loss": 0.6037,
+ "step": 2013
+ },
+ {
+ "epoch": 2.0145665968553654,
+ "grad_norm": 0.46755480766296387,
+ "learning_rate": 4.855460839114392e-05,
+ "loss": 0.5501,
+ "step": 2014
+ },
+ {
+ "epoch": 2.0155668781844893,
+ "grad_norm": 0.4328460693359375,
+ "learning_rate": 4.846474770533446e-05,
+ "loss": 0.5598,
+ "step": 2015
+ },
+ {
+ "epoch": 2.016567159513613,
+ "grad_norm": 0.45182377099990845,
+ "learning_rate": 4.837494364744711e-05,
+ "loss": 0.6259,
+ "step": 2016
+ },
+ {
+ "epoch": 2.017567440842737,
+ "grad_norm": 0.44600123167037964,
+ "learning_rate": 4.828519631616037e-05,
+ "loss": 0.5913,
+ "step": 2017
+ },
+ {
+ "epoch": 2.018567722171861,
+ "grad_norm": 0.44893786311149597,
+ "learning_rate": 4.8195505810090246e-05,
+ "loss": 0.5569,
+ "step": 2018
+ },
+ {
+ "epoch": 2.019568003500985,
+ "grad_norm": 0.45915499329566956,
+ "learning_rate": 4.810587222779043e-05,
+ "loss": 0.3957,
+ "step": 2019
+ },
+ {
+ "epoch": 2.0205682848301083,
+ "grad_norm": 0.43786874413490295,
+ "learning_rate": 4.801629566775196e-05,
+ "loss": 0.672,
+ "step": 2020
+ },
+ {
+ "epoch": 2.021568566159232,
+ "grad_norm": 0.47288084030151367,
+ "learning_rate": 4.792677622840336e-05,
+ "loss": 0.664,
+ "step": 2021
+ },
+ {
+ "epoch": 2.022568847488356,
+ "grad_norm": 0.4936700165271759,
+ "learning_rate": 4.783731400811022e-05,
+ "loss": 0.6865,
+ "step": 2022
+ },
+ {
+ "epoch": 2.02356912881748,
+ "grad_norm": 0.4503854215145111,
+ "learning_rate": 4.774790910517541e-05,
+ "loss": 0.5091,
+ "step": 2023
+ },
+ {
+ "epoch": 2.024569410146604,
+ "grad_norm": 0.4443006217479706,
+ "learning_rate": 4.7658561617838684e-05,
+ "loss": 0.5552,
+ "step": 2024
+ },
+ {
+ "epoch": 2.0255696914757277,
+ "grad_norm": 0.4622330069541931,
+ "learning_rate": 4.756927164427685e-05,
+ "loss": 0.6221,
+ "step": 2025
+ },
+ {
+ "epoch": 2.026569972804851,
+ "grad_norm": 0.44454696774482727,
+ "learning_rate": 4.748003928260335e-05,
+ "loss": 0.6339,
+ "step": 2026
+ },
+ {
+ "epoch": 2.027570254133975,
+ "grad_norm": 0.5022785067558289,
+ "learning_rate": 4.73908646308685e-05,
+ "loss": 0.6527,
+ "step": 2027
+ },
+ {
+ "epoch": 2.028570535463099,
+ "grad_norm": 0.4373007118701935,
+ "learning_rate": 4.730174778705908e-05,
+ "loss": 0.6287,
+ "step": 2028
+ },
+ {
+ "epoch": 2.029570816792223,
+ "grad_norm": 0.40241381525993347,
+ "learning_rate": 4.721268884909833e-05,
+ "loss": 0.5972,
+ "step": 2029
+ },
+ {
+ "epoch": 2.0305710981213467,
+ "grad_norm": 0.44283151626586914,
+ "learning_rate": 4.712368791484597e-05,
+ "loss": 0.4885,
+ "step": 2030
+ },
+ {
+ "epoch": 2.0315713794504706,
+ "grad_norm": 0.47259289026260376,
+ "learning_rate": 4.703474508209793e-05,
+ "loss": 0.6326,
+ "step": 2031
+ },
+ {
+ "epoch": 2.032571660779594,
+ "grad_norm": 0.4327373802661896,
+ "learning_rate": 4.694586044858633e-05,
+ "loss": 0.5445,
+ "step": 2032
+ },
+ {
+ "epoch": 2.033571942108718,
+ "grad_norm": 0.42448604106903076,
+ "learning_rate": 4.6857034111979235e-05,
+ "loss": 0.5654,
+ "step": 2033
+ },
+ {
+ "epoch": 2.034572223437842,
+ "grad_norm": 0.450679212808609,
+ "learning_rate": 4.6768266169880804e-05,
+ "loss": 0.6663,
+ "step": 2034
+ },
+ {
+ "epoch": 2.0355725047669657,
+ "grad_norm": 0.4411293864250183,
+ "learning_rate": 4.66795567198309e-05,
+ "loss": 0.6251,
+ "step": 2035
+ },
+ {
+ "epoch": 2.0365727860960896,
+ "grad_norm": 0.4397091269493103,
+ "learning_rate": 4.6590905859305135e-05,
+ "loss": 0.5921,
+ "step": 2036
+ },
+ {
+ "epoch": 2.0375730674252135,
+ "grad_norm": 0.4734553396701813,
+ "learning_rate": 4.650231368571486e-05,
+ "loss": 0.6071,
+ "step": 2037
+ },
+ {
+ "epoch": 2.038573348754337,
+ "grad_norm": 0.4945426285266876,
+ "learning_rate": 4.6413780296406764e-05,
+ "loss": 0.6295,
+ "step": 2038
+ },
+ {
+ "epoch": 2.039573630083461,
+ "grad_norm": 0.47174322605133057,
+ "learning_rate": 4.6325305788663096e-05,
+ "loss": 0.7296,
+ "step": 2039
+ },
+ {
+ "epoch": 2.0405739114125847,
+ "grad_norm": 0.4360683262348175,
+ "learning_rate": 4.623689025970128e-05,
+ "loss": 0.5333,
+ "step": 2040
+ },
+ {
+ "epoch": 2.0415741927417086,
+ "grad_norm": 0.481152206659317,
+ "learning_rate": 4.6148533806674074e-05,
+ "loss": 0.6103,
+ "step": 2041
+ },
+ {
+ "epoch": 2.0425744740708325,
+ "grad_norm": 0.553936243057251,
+ "learning_rate": 4.606023652666915e-05,
+ "loss": 0.6623,
+ "step": 2042
+ },
+ {
+ "epoch": 2.0435747553999564,
+ "grad_norm": 0.4619155526161194,
+ "learning_rate": 4.597199851670932e-05,
+ "loss": 0.7526,
+ "step": 2043
+ },
+ {
+ "epoch": 2.0445750367290803,
+ "grad_norm": 0.4538067877292633,
+ "learning_rate": 4.5883819873752156e-05,
+ "loss": 0.6401,
+ "step": 2044
+ },
+ {
+ "epoch": 2.0455753180582037,
+ "grad_norm": 0.46386808156967163,
+ "learning_rate": 4.5795700694690046e-05,
+ "loss": 0.7433,
+ "step": 2045
+ },
+ {
+ "epoch": 2.0465755993873276,
+ "grad_norm": 0.490567147731781,
+ "learning_rate": 4.5707641076350074e-05,
+ "loss": 0.6046,
+ "step": 2046
+ },
+ {
+ "epoch": 2.0475758807164515,
+ "grad_norm": 0.45558422803878784,
+ "learning_rate": 4.5619641115493774e-05,
+ "loss": 0.6133,
+ "step": 2047
+ },
+ {
+ "epoch": 2.0485761620455754,
+ "grad_norm": 0.49613502621650696,
+ "learning_rate": 4.553170090881724e-05,
+ "loss": 0.7413,
+ "step": 2048
+ },
+ {
+ "epoch": 2.0495764433746992,
+ "grad_norm": 0.6731177568435669,
+ "learning_rate": 4.54438205529508e-05,
+ "loss": 0.5993,
+ "step": 2049
+ },
+ {
+ "epoch": 2.050576724703823,
+ "grad_norm": 0.4620528817176819,
+ "learning_rate": 4.535600014445914e-05,
+ "loss": 0.6223,
+ "step": 2050
+ },
+ {
+ "epoch": 2.0515770060329466,
+ "grad_norm": 0.47041627764701843,
+ "learning_rate": 4.5268239779840935e-05,
+ "loss": 0.6265,
+ "step": 2051
+ },
+ {
+ "epoch": 2.0525772873620705,
+ "grad_norm": 0.4700336754322052,
+ "learning_rate": 4.518053955552903e-05,
+ "loss": 0.7044,
+ "step": 2052
+ },
+ {
+ "epoch": 2.0535775686911943,
+ "grad_norm": 0.4150082468986511,
+ "learning_rate": 4.5092899567890035e-05,
+ "loss": 0.5772,
+ "step": 2053
+ },
+ {
+ "epoch": 2.0545778500203182,
+ "grad_norm": 0.4649240970611572,
+ "learning_rate": 4.5005319913224506e-05,
+ "loss": 0.5031,
+ "step": 2054
+ },
+ {
+ "epoch": 2.055578131349442,
+ "grad_norm": 0.5402297973632812,
+ "learning_rate": 4.491780068776663e-05,
+ "loss": 0.6701,
+ "step": 2055
+ },
+ {
+ "epoch": 2.056578412678566,
+ "grad_norm": 0.49985572695732117,
+ "learning_rate": 4.4830341987684166e-05,
+ "loss": 0.5614,
+ "step": 2056
+ },
+ {
+ "epoch": 2.0575786940076894,
+ "grad_norm": 0.49652746319770813,
+ "learning_rate": 4.474294390907847e-05,
+ "loss": 0.593,
+ "step": 2057
+ },
+ {
+ "epoch": 2.0585789753368133,
+ "grad_norm": 0.4196471571922302,
+ "learning_rate": 4.465560654798417e-05,
+ "loss": 0.6161,
+ "step": 2058
+ },
+ {
+ "epoch": 2.059579256665937,
+ "grad_norm": 0.47680115699768066,
+ "learning_rate": 4.4568330000369286e-05,
+ "loss": 0.5485,
+ "step": 2059
+ },
+ {
+ "epoch": 2.060579537995061,
+ "grad_norm": 0.4866887032985687,
+ "learning_rate": 4.448111436213486e-05,
+ "loss": 0.6281,
+ "step": 2060
+ },
+ {
+ "epoch": 2.061579819324185,
+ "grad_norm": 0.5039479732513428,
+ "learning_rate": 4.4393959729115244e-05,
+ "loss": 0.6517,
+ "step": 2061
+ },
+ {
+ "epoch": 2.062580100653309,
+ "grad_norm": 0.4648885428905487,
+ "learning_rate": 4.4306866197077544e-05,
+ "loss": 0.58,
+ "step": 2062
+ },
+ {
+ "epoch": 2.0635803819824328,
+ "grad_norm": 0.5204921960830688,
+ "learning_rate": 4.421983386172178e-05,
+ "loss": 0.6859,
+ "step": 2063
+ },
+ {
+ "epoch": 2.064580663311556,
+ "grad_norm": 0.4675167202949524,
+ "learning_rate": 4.413286281868081e-05,
+ "loss": 0.6169,
+ "step": 2064
+ },
+ {
+ "epoch": 2.06558094464068,
+ "grad_norm": 0.5305991172790527,
+ "learning_rate": 4.404595316352002e-05,
+ "loss": 0.691,
+ "step": 2065
+ },
+ {
+ "epoch": 2.066581225969804,
+ "grad_norm": 0.4630433917045593,
+ "learning_rate": 4.3959104991737455e-05,
+ "loss": 0.6128,
+ "step": 2066
+ },
+ {
+ "epoch": 2.067581507298928,
+ "grad_norm": 0.44131141901016235,
+ "learning_rate": 4.387231839876349e-05,
+ "loss": 0.6266,
+ "step": 2067
+ },
+ {
+ "epoch": 2.0685817886280518,
+ "grad_norm": 0.5094907879829407,
+ "learning_rate": 4.3785593479960964e-05,
+ "loss": 0.7263,
+ "step": 2068
+ },
+ {
+ "epoch": 2.0695820699571756,
+ "grad_norm": 0.42740294337272644,
+ "learning_rate": 4.369893033062481e-05,
+ "loss": 0.543,
+ "step": 2069
+ },
+ {
+ "epoch": 2.070582351286299,
+ "grad_norm": 0.5060046911239624,
+ "learning_rate": 4.3612329045982236e-05,
+ "loss": 0.6893,
+ "step": 2070
+ },
+ {
+ "epoch": 2.071582632615423,
+ "grad_norm": 0.42943909764289856,
+ "learning_rate": 4.35257897211923e-05,
+ "loss": 0.6992,
+ "step": 2071
+ },
+ {
+ "epoch": 2.072582913944547,
+ "grad_norm": 0.48537638783454895,
+ "learning_rate": 4.343931245134616e-05,
+ "loss": 0.6904,
+ "step": 2072
+ },
+ {
+ "epoch": 2.0735831952736707,
+ "grad_norm": 0.4671311676502228,
+ "learning_rate": 4.335289733146665e-05,
+ "loss": 0.6142,
+ "step": 2073
+ },
+ {
+ "epoch": 2.0745834766027946,
+ "grad_norm": 0.4160546362400055,
+ "learning_rate": 4.326654445650833e-05,
+ "loss": 0.6052,
+ "step": 2074
+ },
+ {
+ "epoch": 2.0755837579319185,
+ "grad_norm": 0.4677714705467224,
+ "learning_rate": 4.3180253921357414e-05,
+ "loss": 0.6142,
+ "step": 2075
+ },
+ {
+ "epoch": 2.076584039261042,
+ "grad_norm": 0.4365472197532654,
+ "learning_rate": 4.309402582083161e-05,
+ "loss": 0.6131,
+ "step": 2076
+ },
+ {
+ "epoch": 2.077584320590166,
+ "grad_norm": 0.4473261833190918,
+ "learning_rate": 4.300786024968003e-05,
+ "loss": 0.5813,
+ "step": 2077
+ },
+ {
+ "epoch": 2.0785846019192897,
+ "grad_norm": 0.5056237578392029,
+ "learning_rate": 4.2921757302583e-05,
+ "loss": 0.5913,
+ "step": 2078
+ },
+ {
+ "epoch": 2.0795848832484136,
+ "grad_norm": 0.5617183446884155,
+ "learning_rate": 4.283571707415214e-05,
+ "loss": 0.7603,
+ "step": 2079
+ },
+ {
+ "epoch": 2.0805851645775375,
+ "grad_norm": 0.48133864998817444,
+ "learning_rate": 4.274973965893003e-05,
+ "loss": 0.5045,
+ "step": 2080
+ },
+ {
+ "epoch": 2.0815854459066614,
+ "grad_norm": 0.47303125262260437,
+ "learning_rate": 4.266382515139039e-05,
+ "loss": 0.5701,
+ "step": 2081
+ },
+ {
+ "epoch": 2.0825857272357853,
+ "grad_norm": 0.5299637317657471,
+ "learning_rate": 4.2577973645937674e-05,
+ "loss": 0.7264,
+ "step": 2082
+ },
+ {
+ "epoch": 2.0835860085649087,
+ "grad_norm": 0.5403549075126648,
+ "learning_rate": 4.2492185236907125e-05,
+ "loss": 0.6469,
+ "step": 2083
+ },
+ {
+ "epoch": 2.0845862898940326,
+ "grad_norm": 0.4790133237838745,
+ "learning_rate": 4.2406460018564765e-05,
+ "loss": 0.6682,
+ "step": 2084
+ },
+ {
+ "epoch": 2.0855865712231565,
+ "grad_norm": 0.47176721692085266,
+ "learning_rate": 4.2320798085107036e-05,
+ "loss": 0.6211,
+ "step": 2085
+ },
+ {
+ "epoch": 2.0865868525522804,
+ "grad_norm": 0.4947776794433594,
+ "learning_rate": 4.223519953066099e-05,
+ "loss": 0.59,
+ "step": 2086
+ },
+ {
+ "epoch": 2.0875871338814043,
+ "grad_norm": 0.5150135159492493,
+ "learning_rate": 4.214966444928387e-05,
+ "loss": 0.679,
+ "step": 2087
+ },
+ {
+ "epoch": 2.088587415210528,
+ "grad_norm": 0.5139247179031372,
+ "learning_rate": 4.206419293496333e-05,
+ "loss": 0.5977,
+ "step": 2088
+ },
+ {
+ "epoch": 2.0895876965396516,
+ "grad_norm": 0.48402106761932373,
+ "learning_rate": 4.1978785081617057e-05,
+ "loss": 0.7577,
+ "step": 2089
+ },
+ {
+ "epoch": 2.0905879778687755,
+ "grad_norm": 0.44100990891456604,
+ "learning_rate": 4.1893440983092856e-05,
+ "loss": 0.6396,
+ "step": 2090
+ },
+ {
+ "epoch": 2.0915882591978994,
+ "grad_norm": 0.4564374089241028,
+ "learning_rate": 4.18081607331685e-05,
+ "loss": 0.5049,
+ "step": 2091
+ },
+ {
+ "epoch": 2.0925885405270233,
+ "grad_norm": 0.49232858419418335,
+ "learning_rate": 4.172294442555148e-05,
+ "loss": 0.6589,
+ "step": 2092
+ },
+ {
+ "epoch": 2.093588821856147,
+ "grad_norm": 0.4091750383377075,
+ "learning_rate": 4.1637792153879196e-05,
+ "loss": 0.5527,
+ "step": 2093
+ },
+ {
+ "epoch": 2.094589103185271,
+ "grad_norm": 0.4389550983905792,
+ "learning_rate": 4.15527040117185e-05,
+ "loss": 0.6091,
+ "step": 2094
+ },
+ {
+ "epoch": 2.0955893845143945,
+ "grad_norm": 0.4765204191207886,
+ "learning_rate": 4.146768009256595e-05,
+ "loss": 0.7185,
+ "step": 2095
+ },
+ {
+ "epoch": 2.0965896658435184,
+ "grad_norm": 0.5131024718284607,
+ "learning_rate": 4.13827204898474e-05,
+ "loss": 0.6502,
+ "step": 2096
+ },
+ {
+ "epoch": 2.0975899471726422,
+ "grad_norm": 0.5671885013580322,
+ "learning_rate": 4.129782529691815e-05,
+ "loss": 0.577,
+ "step": 2097
+ },
+ {
+ "epoch": 2.098590228501766,
+ "grad_norm": 0.4500812590122223,
+ "learning_rate": 4.1212994607062594e-05,
+ "loss": 0.6345,
+ "step": 2098
+ },
+ {
+ "epoch": 2.09959050983089,
+ "grad_norm": 0.4754406213760376,
+ "learning_rate": 4.1128228513494385e-05,
+ "loss": 0.6497,
+ "step": 2099
+ },
+ {
+ "epoch": 2.100590791160014,
+ "grad_norm": 0.4294159412384033,
+ "learning_rate": 4.1043527109356095e-05,
+ "loss": 0.6813,
+ "step": 2100
+ },
+ {
+ "epoch": 2.1015910724891373,
+ "grad_norm": 0.7561903595924377,
+ "learning_rate": 4.095889048771922e-05,
+ "loss": 0.8037,
+ "step": 2101
+ },
+ {
+ "epoch": 2.1025913538182612,
+ "grad_norm": 0.48615512251853943,
+ "learning_rate": 4.087431874158416e-05,
+ "loss": 0.6563,
+ "step": 2102
+ },
+ {
+ "epoch": 2.103591635147385,
+ "grad_norm": 0.46207091212272644,
+ "learning_rate": 4.0789811963879906e-05,
+ "loss": 0.6163,
+ "step": 2103
+ },
+ {
+ "epoch": 2.104591916476509,
+ "grad_norm": 0.49406757950782776,
+ "learning_rate": 4.070537024746416e-05,
+ "loss": 0.5831,
+ "step": 2104
+ },
+ {
+ "epoch": 2.105592197805633,
+ "grad_norm": 0.5127863883972168,
+ "learning_rate": 4.06209936851231e-05,
+ "loss": 0.5506,
+ "step": 2105
+ },
+ {
+ "epoch": 2.106592479134757,
+ "grad_norm": 0.41014209389686584,
+ "learning_rate": 4.053668236957134e-05,
+ "loss": 0.5692,
+ "step": 2106
+ },
+ {
+ "epoch": 2.1075927604638807,
+ "grad_norm": 0.5290461182594299,
+ "learning_rate": 4.0452436393451735e-05,
+ "loss": 0.571,
+ "step": 2107
+ },
+ {
+ "epoch": 2.108593041793004,
+ "grad_norm": 0.45752203464508057,
+ "learning_rate": 4.036825584933533e-05,
+ "loss": 0.709,
+ "step": 2108
+ },
+ {
+ "epoch": 2.109593323122128,
+ "grad_norm": 0.5168712139129639,
+ "learning_rate": 4.028414082972141e-05,
+ "loss": 0.7713,
+ "step": 2109
+ },
+ {
+ "epoch": 2.110593604451252,
+ "grad_norm": 0.6233658194541931,
+ "learning_rate": 4.020009142703708e-05,
+ "loss": 0.5952,
+ "step": 2110
+ },
+ {
+ "epoch": 2.1115938857803758,
+ "grad_norm": 0.5407616496086121,
+ "learning_rate": 4.011610773363751e-05,
+ "loss": 0.7355,
+ "step": 2111
+ },
+ {
+ "epoch": 2.1125941671094997,
+ "grad_norm": 0.6055451035499573,
+ "learning_rate": 4.003218984180552e-05,
+ "loss": 0.6509,
+ "step": 2112
+ },
+ {
+ "epoch": 2.1135944484386235,
+ "grad_norm": 0.49832651019096375,
+ "learning_rate": 3.994833784375177e-05,
+ "loss": 0.6325,
+ "step": 2113
+ },
+ {
+ "epoch": 2.114594729767747,
+ "grad_norm": 0.5014695525169373,
+ "learning_rate": 3.986455183161437e-05,
+ "loss": 0.6134,
+ "step": 2114
+ },
+ {
+ "epoch": 2.115595011096871,
+ "grad_norm": 0.45379167795181274,
+ "learning_rate": 3.978083189745907e-05,
+ "loss": 0.5517,
+ "step": 2115
+ },
+ {
+ "epoch": 2.1165952924259948,
+ "grad_norm": 0.5124073028564453,
+ "learning_rate": 3.9697178133278855e-05,
+ "loss": 0.7396,
+ "step": 2116
+ },
+ {
+ "epoch": 2.1175955737551186,
+ "grad_norm": 0.4667278230190277,
+ "learning_rate": 3.961359063099416e-05,
+ "loss": 0.578,
+ "step": 2117
+ },
+ {
+ "epoch": 2.1185958550842425,
+ "grad_norm": 0.5495364665985107,
+ "learning_rate": 3.953006948245247e-05,
+ "loss": 0.4708,
+ "step": 2118
+ },
+ {
+ "epoch": 2.1195961364133664,
+ "grad_norm": 0.45876625180244446,
+ "learning_rate": 3.944661477942844e-05,
+ "loss": 0.5283,
+ "step": 2119
+ },
+ {
+ "epoch": 2.1205964177424903,
+ "grad_norm": 0.47809210419654846,
+ "learning_rate": 3.9363226613623736e-05,
+ "loss": 0.4733,
+ "step": 2120
+ },
+ {
+ "epoch": 2.1215966990716137,
+ "grad_norm": 0.47257041931152344,
+ "learning_rate": 3.9279905076666826e-05,
+ "loss": 0.6244,
+ "step": 2121
+ },
+ {
+ "epoch": 2.1225969804007376,
+ "grad_norm": 0.5050140023231506,
+ "learning_rate": 3.9196650260113044e-05,
+ "loss": 0.6397,
+ "step": 2122
+ },
+ {
+ "epoch": 2.1235972617298615,
+ "grad_norm": 0.5299871563911438,
+ "learning_rate": 3.9113462255444334e-05,
+ "loss": 0.6117,
+ "step": 2123
+ },
+ {
+ "epoch": 2.1245975430589854,
+ "grad_norm": 0.4946582019329071,
+ "learning_rate": 3.903034115406931e-05,
+ "loss": 0.5121,
+ "step": 2124
+ },
+ {
+ "epoch": 2.1255978243881093,
+ "grad_norm": 0.4589192867279053,
+ "learning_rate": 3.8947287047323e-05,
+ "loss": 0.4481,
+ "step": 2125
+ },
+ {
+ "epoch": 2.126598105717233,
+ "grad_norm": 0.5035550594329834,
+ "learning_rate": 3.886430002646688e-05,
+ "loss": 0.684,
+ "step": 2126
+ },
+ {
+ "epoch": 2.1275983870463566,
+ "grad_norm": 0.5557273030281067,
+ "learning_rate": 3.878138018268866e-05,
+ "loss": 0.6545,
+ "step": 2127
+ },
+ {
+ "epoch": 2.1285986683754805,
+ "grad_norm": 0.4621843099594116,
+ "learning_rate": 3.869852760710222e-05,
+ "loss": 0.8157,
+ "step": 2128
+ },
+ {
+ "epoch": 2.1295989497046044,
+ "grad_norm": 0.4417930245399475,
+ "learning_rate": 3.861574239074762e-05,
+ "loss": 0.5235,
+ "step": 2129
+ },
+ {
+ "epoch": 2.1305992310337283,
+ "grad_norm": 0.556983470916748,
+ "learning_rate": 3.8533024624590776e-05,
+ "loss": 0.7682,
+ "step": 2130
+ },
+ {
+ "epoch": 2.131599512362852,
+ "grad_norm": 0.5013543963432312,
+ "learning_rate": 3.845037439952362e-05,
+ "loss": 0.6058,
+ "step": 2131
+ },
+ {
+ "epoch": 2.132599793691976,
+ "grad_norm": 0.43936899304389954,
+ "learning_rate": 3.836779180636373e-05,
+ "loss": 0.5379,
+ "step": 2132
+ },
+ {
+ "epoch": 2.1336000750210995,
+ "grad_norm": 0.4661477208137512,
+ "learning_rate": 3.828527693585451e-05,
+ "loss": 0.5905,
+ "step": 2133
+ },
+ {
+ "epoch": 2.1346003563502234,
+ "grad_norm": 0.6327193379402161,
+ "learning_rate": 3.8202829878664816e-05,
+ "loss": 0.5805,
+ "step": 2134
+ },
+ {
+ "epoch": 2.1356006376793473,
+ "grad_norm": 0.4922885298728943,
+ "learning_rate": 3.812045072538909e-05,
+ "loss": 0.6583,
+ "step": 2135
+ },
+ {
+ "epoch": 2.136600919008471,
+ "grad_norm": 0.44325774908065796,
+ "learning_rate": 3.8038139566547146e-05,
+ "loss": 0.5686,
+ "step": 2136
+ },
+ {
+ "epoch": 2.137601200337595,
+ "grad_norm": 0.5307816863059998,
+ "learning_rate": 3.7955896492584e-05,
+ "loss": 0.6264,
+ "step": 2137
+ },
+ {
+ "epoch": 2.138601481666719,
+ "grad_norm": 0.4622756242752075,
+ "learning_rate": 3.787372159386999e-05,
+ "loss": 0.6792,
+ "step": 2138
+ },
+ {
+ "epoch": 2.1396017629958424,
+ "grad_norm": 0.5342557430267334,
+ "learning_rate": 3.7791614960700395e-05,
+ "loss": 0.5755,
+ "step": 2139
+ },
+ {
+ "epoch": 2.1406020443249663,
+ "grad_norm": 0.5566471815109253,
+ "learning_rate": 3.770957668329562e-05,
+ "loss": 0.7433,
+ "step": 2140
+ },
+ {
+ "epoch": 2.14160232565409,
+ "grad_norm": 0.47061699628829956,
+ "learning_rate": 3.7627606851800837e-05,
+ "loss": 0.4779,
+ "step": 2141
+ },
+ {
+ "epoch": 2.142602606983214,
+ "grad_norm": 0.42781785130500793,
+ "learning_rate": 3.7545705556286126e-05,
+ "loss": 0.6522,
+ "step": 2142
+ },
+ {
+ "epoch": 2.143602888312338,
+ "grad_norm": 0.5037875771522522,
+ "learning_rate": 3.746387288674613e-05,
+ "loss": 0.62,
+ "step": 2143
+ },
+ {
+ "epoch": 2.144603169641462,
+ "grad_norm": 0.5067894458770752,
+ "learning_rate": 3.7382108933100234e-05,
+ "loss": 0.7461,
+ "step": 2144
+ },
+ {
+ "epoch": 2.1456034509705857,
+ "grad_norm": 0.5479350090026855,
+ "learning_rate": 3.730041378519216e-05,
+ "loss": 0.7418,
+ "step": 2145
+ },
+ {
+ "epoch": 2.146603732299709,
+ "grad_norm": 0.4507127106189728,
+ "learning_rate": 3.721878753279017e-05,
+ "loss": 0.6838,
+ "step": 2146
+ },
+ {
+ "epoch": 2.147604013628833,
+ "grad_norm": 0.9193136096000671,
+ "learning_rate": 3.713723026558671e-05,
+ "loss": 0.5877,
+ "step": 2147
+ },
+ {
+ "epoch": 2.148604294957957,
+ "grad_norm": 0.43999728560447693,
+ "learning_rate": 3.705574207319844e-05,
+ "loss": 0.6485,
+ "step": 2148
+ },
+ {
+ "epoch": 2.149604576287081,
+ "grad_norm": 0.5130500197410583,
+ "learning_rate": 3.697432304516618e-05,
+ "loss": 0.7039,
+ "step": 2149
+ },
+ {
+ "epoch": 2.1506048576162047,
+ "grad_norm": 0.5071646571159363,
+ "learning_rate": 3.689297327095472e-05,
+ "loss": 0.602,
+ "step": 2150
+ },
+ {
+ "epoch": 2.1516051389453286,
+ "grad_norm": 0.47906339168548584,
+ "learning_rate": 3.681169283995279e-05,
+ "loss": 0.7002,
+ "step": 2151
+ },
+ {
+ "epoch": 2.152605420274452,
+ "grad_norm": 0.46951034665107727,
+ "learning_rate": 3.673048184147281e-05,
+ "loss": 0.6469,
+ "step": 2152
+ },
+ {
+ "epoch": 2.153605701603576,
+ "grad_norm": 0.4949340522289276,
+ "learning_rate": 3.664934036475104e-05,
+ "loss": 0.6304,
+ "step": 2153
+ },
+ {
+ "epoch": 2.1546059829327,
+ "grad_norm": 0.44482266902923584,
+ "learning_rate": 3.656826849894726e-05,
+ "loss": 0.4853,
+ "step": 2154
+ },
+ {
+ "epoch": 2.1556062642618237,
+ "grad_norm": 0.5063248872756958,
+ "learning_rate": 3.648726633314475e-05,
+ "loss": 0.6082,
+ "step": 2155
+ },
+ {
+ "epoch": 2.1566065455909476,
+ "grad_norm": 0.5235609412193298,
+ "learning_rate": 3.640633395635032e-05,
+ "loss": 0.6015,
+ "step": 2156
+ },
+ {
+ "epoch": 2.1576068269200714,
+ "grad_norm": 0.4473001956939697,
+ "learning_rate": 3.632547145749395e-05,
+ "loss": 0.5672,
+ "step": 2157
+ },
+ {
+ "epoch": 2.158607108249195,
+ "grad_norm": 0.4669405519962311,
+ "learning_rate": 3.624467892542895e-05,
+ "loss": 0.5406,
+ "step": 2158
+ },
+ {
+ "epoch": 2.1596073895783188,
+ "grad_norm": 0.4765620827674866,
+ "learning_rate": 3.616395644893166e-05,
+ "loss": 0.6277,
+ "step": 2159
+ },
+ {
+ "epoch": 2.1606076709074427,
+ "grad_norm": 0.47696128487586975,
+ "learning_rate": 3.6083304116701535e-05,
+ "loss": 0.5853,
+ "step": 2160
+ },
+ {
+ "epoch": 2.1616079522365665,
+ "grad_norm": 0.504258394241333,
+ "learning_rate": 3.600272201736082e-05,
+ "loss": 0.6468,
+ "step": 2161
+ },
+ {
+ "epoch": 2.1626082335656904,
+ "grad_norm": 0.5608981251716614,
+ "learning_rate": 3.5922210239454764e-05,
+ "loss": 0.6373,
+ "step": 2162
+ },
+ {
+ "epoch": 2.1636085148948143,
+ "grad_norm": 0.46563276648521423,
+ "learning_rate": 3.5841768871451185e-05,
+ "loss": 0.6602,
+ "step": 2163
+ },
+ {
+ "epoch": 2.1646087962239378,
+ "grad_norm": 0.44680067896842957,
+ "learning_rate": 3.57613980017406e-05,
+ "loss": 0.6879,
+ "step": 2164
+ },
+ {
+ "epoch": 2.1656090775530616,
+ "grad_norm": 0.5114299058914185,
+ "learning_rate": 3.568109771863613e-05,
+ "loss": 0.6655,
+ "step": 2165
+ },
+ {
+ "epoch": 2.1666093588821855,
+ "grad_norm": 0.4544784426689148,
+ "learning_rate": 3.560086811037316e-05,
+ "loss": 0.6687,
+ "step": 2166
+ },
+ {
+ "epoch": 2.1676096402113094,
+ "grad_norm": 0.4559856057167053,
+ "learning_rate": 3.552070926510962e-05,
+ "loss": 0.5433,
+ "step": 2167
+ },
+ {
+ "epoch": 2.1686099215404333,
+ "grad_norm": 0.506377100944519,
+ "learning_rate": 3.54406212709255e-05,
+ "loss": 0.7024,
+ "step": 2168
+ },
+ {
+ "epoch": 2.169610202869557,
+ "grad_norm": 0.5076850056648254,
+ "learning_rate": 3.536060421582309e-05,
+ "loss": 0.6704,
+ "step": 2169
+ },
+ {
+ "epoch": 2.170610484198681,
+ "grad_norm": 0.4937109351158142,
+ "learning_rate": 3.52806581877266e-05,
+ "loss": 0.6859,
+ "step": 2170
+ },
+ {
+ "epoch": 2.1716107655278045,
+ "grad_norm": 0.49975091218948364,
+ "learning_rate": 3.520078327448232e-05,
+ "loss": 0.5282,
+ "step": 2171
+ },
+ {
+ "epoch": 2.1726110468569284,
+ "grad_norm": 0.5231044888496399,
+ "learning_rate": 3.5120979563858266e-05,
+ "loss": 0.5605,
+ "step": 2172
+ },
+ {
+ "epoch": 2.1736113281860523,
+ "grad_norm": 0.46311333775520325,
+ "learning_rate": 3.5041247143544364e-05,
+ "loss": 0.6421,
+ "step": 2173
+ },
+ {
+ "epoch": 2.174611609515176,
+ "grad_norm": 0.5018386840820312,
+ "learning_rate": 3.496158610115207e-05,
+ "loss": 0.5633,
+ "step": 2174
+ },
+ {
+ "epoch": 2.1756118908443,
+ "grad_norm": 0.42992815375328064,
+ "learning_rate": 3.4881996524214445e-05,
+ "loss": 0.5782,
+ "step": 2175
+ },
+ {
+ "epoch": 2.176612172173424,
+ "grad_norm": 0.4959898889064789,
+ "learning_rate": 3.48024785001861e-05,
+ "loss": 0.6792,
+ "step": 2176
+ },
+ {
+ "epoch": 2.1776124535025474,
+ "grad_norm": 0.5085489749908447,
+ "learning_rate": 3.472303211644289e-05,
+ "loss": 0.6612,
+ "step": 2177
+ },
+ {
+ "epoch": 2.1786127348316713,
+ "grad_norm": 0.4328081011772156,
+ "learning_rate": 3.464365746028208e-05,
+ "loss": 0.6251,
+ "step": 2178
+ },
+ {
+ "epoch": 2.179613016160795,
+ "grad_norm": 0.4798353314399719,
+ "learning_rate": 3.456435461892203e-05,
+ "loss": 0.5382,
+ "step": 2179
+ },
+ {
+ "epoch": 2.180613297489919,
+ "grad_norm": 0.4488179087638855,
+ "learning_rate": 3.4485123679502274e-05,
+ "loss": 0.6123,
+ "step": 2180
+ },
+ {
+ "epoch": 2.181613578819043,
+ "grad_norm": 0.44371160864830017,
+ "learning_rate": 3.4405964729083254e-05,
+ "loss": 0.6717,
+ "step": 2181
+ },
+ {
+ "epoch": 2.182613860148167,
+ "grad_norm": 0.43803316354751587,
+ "learning_rate": 3.43268778546463e-05,
+ "loss": 0.5674,
+ "step": 2182
+ },
+ {
+ "epoch": 2.1836141414772907,
+ "grad_norm": 0.44481751322746277,
+ "learning_rate": 3.424786314309365e-05,
+ "loss": 0.5787,
+ "step": 2183
+ },
+ {
+ "epoch": 2.184614422806414,
+ "grad_norm": 0.5348169803619385,
+ "learning_rate": 3.416892068124812e-05,
+ "loss": 0.7258,
+ "step": 2184
+ },
+ {
+ "epoch": 2.185614704135538,
+ "grad_norm": 0.4896971583366394,
+ "learning_rate": 3.409005055585327e-05,
+ "loss": 0.5921,
+ "step": 2185
+ },
+ {
+ "epoch": 2.186614985464662,
+ "grad_norm": 0.5136271119117737,
+ "learning_rate": 3.401125285357302e-05,
+ "loss": 0.5936,
+ "step": 2186
+ },
+ {
+ "epoch": 2.187615266793786,
+ "grad_norm": 0.45636460185050964,
+ "learning_rate": 3.393252766099187e-05,
+ "loss": 0.6523,
+ "step": 2187
+ },
+ {
+ "epoch": 2.1886155481229097,
+ "grad_norm": 0.7612220644950867,
+ "learning_rate": 3.3853875064614515e-05,
+ "loss": 0.6971,
+ "step": 2188
+ },
+ {
+ "epoch": 2.1896158294520336,
+ "grad_norm": 0.5007143616676331,
+ "learning_rate": 3.377529515086598e-05,
+ "loss": 0.5335,
+ "step": 2189
+ },
+ {
+ "epoch": 2.190616110781157,
+ "grad_norm": 0.47488054633140564,
+ "learning_rate": 3.369678800609134e-05,
+ "loss": 0.6134,
+ "step": 2190
+ },
+ {
+ "epoch": 2.191616392110281,
+ "grad_norm": 0.4808323383331299,
+ "learning_rate": 3.361835371655578e-05,
+ "loss": 0.6084,
+ "step": 2191
+ },
+ {
+ "epoch": 2.192616673439405,
+ "grad_norm": 0.4287136495113373,
+ "learning_rate": 3.353999236844436e-05,
+ "loss": 0.5938,
+ "step": 2192
+ },
+ {
+ "epoch": 2.1936169547685287,
+ "grad_norm": 0.48613372445106506,
+ "learning_rate": 3.3461704047862054e-05,
+ "loss": 0.624,
+ "step": 2193
+ },
+ {
+ "epoch": 2.1946172360976526,
+ "grad_norm": 0.5133928060531616,
+ "learning_rate": 3.33834888408336e-05,
+ "loss": 0.5899,
+ "step": 2194
+ },
+ {
+ "epoch": 2.1956175174267765,
+ "grad_norm": 0.5271064043045044,
+ "learning_rate": 3.3305346833303296e-05,
+ "loss": 0.7295,
+ "step": 2195
+ },
+ {
+ "epoch": 2.1966177987559,
+ "grad_norm": 0.5942690968513489,
+ "learning_rate": 3.322727811113516e-05,
+ "loss": 0.7228,
+ "step": 2196
+ },
+ {
+ "epoch": 2.197618080085024,
+ "grad_norm": 0.47183600068092346,
+ "learning_rate": 3.314928276011251e-05,
+ "loss": 0.6717,
+ "step": 2197
+ },
+ {
+ "epoch": 2.1986183614141477,
+ "grad_norm": 0.4545646011829376,
+ "learning_rate": 3.307136086593821e-05,
+ "loss": 0.5611,
+ "step": 2198
+ },
+ {
+ "epoch": 2.1996186427432716,
+ "grad_norm": 0.4944184422492981,
+ "learning_rate": 3.299351251423426e-05,
+ "loss": 0.551,
+ "step": 2199
+ },
+ {
+ "epoch": 2.2006189240723955,
+ "grad_norm": 0.4972105026245117,
+ "learning_rate": 3.291573779054199e-05,
+ "loss": 0.7719,
+ "step": 2200
+ },
+ {
+ "epoch": 2.2016192054015193,
+ "grad_norm": 0.5225645899772644,
+ "learning_rate": 3.2838036780321715e-05,
+ "loss": 0.7034,
+ "step": 2201
+ },
+ {
+ "epoch": 2.202619486730643,
+ "grad_norm": 0.49897319078445435,
+ "learning_rate": 3.2760409568952766e-05,
+ "loss": 0.6892,
+ "step": 2202
+ },
+ {
+ "epoch": 2.2036197680597667,
+ "grad_norm": 0.4999954402446747,
+ "learning_rate": 3.268285624173347e-05,
+ "loss": 0.6754,
+ "step": 2203
+ },
+ {
+ "epoch": 2.2046200493888906,
+ "grad_norm": 0.4323941469192505,
+ "learning_rate": 3.260537688388086e-05,
+ "loss": 0.5694,
+ "step": 2204
+ },
+ {
+ "epoch": 2.2056203307180144,
+ "grad_norm": 0.5175321102142334,
+ "learning_rate": 3.252797158053077e-05,
+ "loss": 0.6986,
+ "step": 2205
+ },
+ {
+ "epoch": 2.2066206120471383,
+ "grad_norm": 0.5313690900802612,
+ "learning_rate": 3.24506404167376e-05,
+ "loss": 0.6527,
+ "step": 2206
+ },
+ {
+ "epoch": 2.207620893376262,
+ "grad_norm": 0.5002806186676025,
+ "learning_rate": 3.2373383477474354e-05,
+ "loss": 0.5748,
+ "step": 2207
+ },
+ {
+ "epoch": 2.208621174705386,
+ "grad_norm": 0.5108035802841187,
+ "learning_rate": 3.229620084763237e-05,
+ "loss": 0.6255,
+ "step": 2208
+ },
+ {
+ "epoch": 2.2096214560345095,
+ "grad_norm": 0.5970383882522583,
+ "learning_rate": 3.221909261202146e-05,
+ "loss": 0.7238,
+ "step": 2209
+ },
+ {
+ "epoch": 2.2106217373636334,
+ "grad_norm": 0.5481739044189453,
+ "learning_rate": 3.214205885536965e-05,
+ "loss": 0.6904,
+ "step": 2210
+ },
+ {
+ "epoch": 2.2116220186927573,
+ "grad_norm": 0.4750816226005554,
+ "learning_rate": 3.2065099662323017e-05,
+ "loss": 0.5531,
+ "step": 2211
+ },
+ {
+ "epoch": 2.212622300021881,
+ "grad_norm": 0.4694627523422241,
+ "learning_rate": 3.1988215117445896e-05,
+ "loss": 0.503,
+ "step": 2212
+ },
+ {
+ "epoch": 2.213622581351005,
+ "grad_norm": 0.5215654373168945,
+ "learning_rate": 3.191140530522041e-05,
+ "loss": 0.5861,
+ "step": 2213
+ },
+ {
+ "epoch": 2.214622862680129,
+ "grad_norm": 0.49040964245796204,
+ "learning_rate": 3.1834670310046734e-05,
+ "loss": 0.6475,
+ "step": 2214
+ },
+ {
+ "epoch": 2.2156231440092524,
+ "grad_norm": 0.4749949276447296,
+ "learning_rate": 3.1758010216242664e-05,
+ "loss": 0.5104,
+ "step": 2215
+ },
+ {
+ "epoch": 2.2166234253383763,
+ "grad_norm": 0.431478887796402,
+ "learning_rate": 3.168142510804386e-05,
+ "loss": 0.6221,
+ "step": 2216
+ },
+ {
+ "epoch": 2.2176237066675,
+ "grad_norm": 0.5049036741256714,
+ "learning_rate": 3.1604915069603436e-05,
+ "loss": 0.7063,
+ "step": 2217
+ },
+ {
+ "epoch": 2.218623987996624,
+ "grad_norm": 0.5182607173919678,
+ "learning_rate": 3.152848018499215e-05,
+ "loss": 0.5814,
+ "step": 2218
+ },
+ {
+ "epoch": 2.219624269325748,
+ "grad_norm": 0.4258774518966675,
+ "learning_rate": 3.145212053819806e-05,
+ "loss": 0.5629,
+ "step": 2219
+ },
+ {
+ "epoch": 2.220624550654872,
+ "grad_norm": 0.48201316595077515,
+ "learning_rate": 3.137583621312665e-05,
+ "loss": 0.7363,
+ "step": 2220
+ },
+ {
+ "epoch": 2.2216248319839953,
+ "grad_norm": 0.45533907413482666,
+ "learning_rate": 3.1299627293600595e-05,
+ "loss": 0.6155,
+ "step": 2221
+ },
+ {
+ "epoch": 2.222625113313119,
+ "grad_norm": 0.8064365983009338,
+ "learning_rate": 3.122349386335964e-05,
+ "loss": 0.6509,
+ "step": 2222
+ },
+ {
+ "epoch": 2.223625394642243,
+ "grad_norm": 0.4432089924812317,
+ "learning_rate": 3.114743600606078e-05,
+ "loss": 0.5431,
+ "step": 2223
+ },
+ {
+ "epoch": 2.224625675971367,
+ "grad_norm": 0.47190824151039124,
+ "learning_rate": 3.107145380527776e-05,
+ "loss": 0.6119,
+ "step": 2224
+ },
+ {
+ "epoch": 2.225625957300491,
+ "grad_norm": 0.4532092213630676,
+ "learning_rate": 3.099554734450133e-05,
+ "loss": 0.538,
+ "step": 2225
+ },
+ {
+ "epoch": 2.2266262386296147,
+ "grad_norm": 0.4889605641365051,
+ "learning_rate": 3.091971670713889e-05,
+ "loss": 0.6446,
+ "step": 2226
+ },
+ {
+ "epoch": 2.227626519958738,
+ "grad_norm": 0.5159114003181458,
+ "learning_rate": 3.084396197651468e-05,
+ "loss": 0.6148,
+ "step": 2227
+ },
+ {
+ "epoch": 2.228626801287862,
+ "grad_norm": 0.5456231236457825,
+ "learning_rate": 3.076828323586941e-05,
+ "loss": 0.6804,
+ "step": 2228
+ },
+ {
+ "epoch": 2.229627082616986,
+ "grad_norm": 0.5233959555625916,
+ "learning_rate": 3.06926805683603e-05,
+ "loss": 0.5888,
+ "step": 2229
+ },
+ {
+ "epoch": 2.23062736394611,
+ "grad_norm": 0.5444768071174622,
+ "learning_rate": 3.061715405706106e-05,
+ "loss": 0.7607,
+ "step": 2230
+ },
+ {
+ "epoch": 2.2316276452752337,
+ "grad_norm": 0.5582504272460938,
+ "learning_rate": 3.0541703784961615e-05,
+ "loss": 0.613,
+ "step": 2231
+ },
+ {
+ "epoch": 2.2326279266043576,
+ "grad_norm": 0.5025148987770081,
+ "learning_rate": 3.0466329834968233e-05,
+ "loss": 0.6876,
+ "step": 2232
+ },
+ {
+ "epoch": 2.2336282079334815,
+ "grad_norm": 0.5344957709312439,
+ "learning_rate": 3.0391032289903188e-05,
+ "loss": 0.7175,
+ "step": 2233
+ },
+ {
+ "epoch": 2.234628489262605,
+ "grad_norm": 0.4237043559551239,
+ "learning_rate": 3.0315811232504922e-05,
+ "loss": 0.5648,
+ "step": 2234
+ },
+ {
+ "epoch": 2.235628770591729,
+ "grad_norm": 0.4444836378097534,
+ "learning_rate": 3.0240666745427713e-05,
+ "loss": 0.494,
+ "step": 2235
+ },
+ {
+ "epoch": 2.2366290519208527,
+ "grad_norm": 0.46955639123916626,
+ "learning_rate": 3.0165598911241832e-05,
+ "loss": 0.4465,
+ "step": 2236
+ },
+ {
+ "epoch": 2.2376293332499766,
+ "grad_norm": 0.49513357877731323,
+ "learning_rate": 3.009060781243319e-05,
+ "loss": 0.6519,
+ "step": 2237
+ },
+ {
+ "epoch": 2.2386296145791005,
+ "grad_norm": 0.4216475784778595,
+ "learning_rate": 3.0015693531403465e-05,
+ "loss": 0.5114,
+ "step": 2238
+ },
+ {
+ "epoch": 2.2396298959082244,
+ "grad_norm": 0.5368056297302246,
+ "learning_rate": 2.994085615046993e-05,
+ "loss": 0.659,
+ "step": 2239
+ },
+ {
+ "epoch": 2.240630177237348,
+ "grad_norm": 0.4923858642578125,
+ "learning_rate": 2.9866095751865297e-05,
+ "loss": 0.7238,
+ "step": 2240
+ },
+ {
+ "epoch": 2.2416304585664717,
+ "grad_norm": 0.5030984282493591,
+ "learning_rate": 2.979141241773775e-05,
+ "loss": 0.6715,
+ "step": 2241
+ },
+ {
+ "epoch": 2.2426307398955956,
+ "grad_norm": 0.5050022006034851,
+ "learning_rate": 2.971680623015074e-05,
+ "loss": 0.5918,
+ "step": 2242
+ },
+ {
+ "epoch": 2.2436310212247195,
+ "grad_norm": 0.5575593709945679,
+ "learning_rate": 2.9642277271083008e-05,
+ "loss": 0.5425,
+ "step": 2243
+ },
+ {
+ "epoch": 2.2446313025538434,
+ "grad_norm": 0.4873676300048828,
+ "learning_rate": 2.9567825622428358e-05,
+ "loss": 0.6573,
+ "step": 2244
+ },
+ {
+ "epoch": 2.2456315838829672,
+ "grad_norm": 0.4244104325771332,
+ "learning_rate": 2.9493451365995737e-05,
+ "loss": 0.619,
+ "step": 2245
+ },
+ {
+ "epoch": 2.246631865212091,
+ "grad_norm": 0.5072455406188965,
+ "learning_rate": 2.9419154583508978e-05,
+ "loss": 0.6958,
+ "step": 2246
+ },
+ {
+ "epoch": 2.2476321465412146,
+ "grad_norm": 0.4528377950191498,
+ "learning_rate": 2.9344935356606773e-05,
+ "loss": 0.5586,
+ "step": 2247
+ },
+ {
+ "epoch": 2.2486324278703385,
+ "grad_norm": 0.46183013916015625,
+ "learning_rate": 2.9270793766842697e-05,
+ "loss": 0.5195,
+ "step": 2248
+ },
+ {
+ "epoch": 2.2496327091994623,
+ "grad_norm": 0.5115411281585693,
+ "learning_rate": 2.9196729895684884e-05,
+ "loss": 0.6447,
+ "step": 2249
+ },
+ {
+ "epoch": 2.2506329905285862,
+ "grad_norm": 0.44066107273101807,
+ "learning_rate": 2.9122743824516195e-05,
+ "loss": 0.5917,
+ "step": 2250
+ },
+ {
+ "epoch": 2.25163327185771,
+ "grad_norm": 0.4783106744289398,
+ "learning_rate": 2.9048835634633887e-05,
+ "loss": 0.5601,
+ "step": 2251
+ },
+ {
+ "epoch": 2.2526335531868336,
+ "grad_norm": 0.46325576305389404,
+ "learning_rate": 2.897500540724972e-05,
+ "loss": 0.6595,
+ "step": 2252
+ },
+ {
+ "epoch": 2.2536338345159574,
+ "grad_norm": 0.4388025403022766,
+ "learning_rate": 2.8901253223489754e-05,
+ "loss": 0.5466,
+ "step": 2253
+ },
+ {
+ "epoch": 2.2546341158450813,
+ "grad_norm": 0.5207952857017517,
+ "learning_rate": 2.8827579164394347e-05,
+ "loss": 0.7255,
+ "step": 2254
+ },
+ {
+ "epoch": 2.255634397174205,
+ "grad_norm": 0.5066066384315491,
+ "learning_rate": 2.875398331091792e-05,
+ "loss": 0.6495,
+ "step": 2255
+ },
+ {
+ "epoch": 2.256634678503329,
+ "grad_norm": 0.577724277973175,
+ "learning_rate": 2.8680465743928985e-05,
+ "loss": 0.6658,
+ "step": 2256
+ },
+ {
+ "epoch": 2.257634959832453,
+ "grad_norm": 0.4669063985347748,
+ "learning_rate": 2.8607026544210114e-05,
+ "loss": 0.582,
+ "step": 2257
+ },
+ {
+ "epoch": 2.258635241161577,
+ "grad_norm": 0.4934767186641693,
+ "learning_rate": 2.8533665792457644e-05,
+ "loss": 0.7237,
+ "step": 2258
+ },
+ {
+ "epoch": 2.2596355224907003,
+ "grad_norm": 0.44358426332473755,
+ "learning_rate": 2.8460383569281824e-05,
+ "loss": 0.5016,
+ "step": 2259
+ },
+ {
+ "epoch": 2.260635803819824,
+ "grad_norm": 0.5185582637786865,
+ "learning_rate": 2.8387179955206523e-05,
+ "loss": 0.6666,
+ "step": 2260
+ },
+ {
+ "epoch": 2.261636085148948,
+ "grad_norm": 0.5082037448883057,
+ "learning_rate": 2.831405503066932e-05,
+ "loss": 0.7377,
+ "step": 2261
+ },
+ {
+ "epoch": 2.262636366478072,
+ "grad_norm": 0.47630825638771057,
+ "learning_rate": 2.8241008876021215e-05,
+ "loss": 0.5947,
+ "step": 2262
+ },
+ {
+ "epoch": 2.263636647807196,
+ "grad_norm": 0.5042298436164856,
+ "learning_rate": 2.8168041571526805e-05,
+ "loss": 0.6501,
+ "step": 2263
+ },
+ {
+ "epoch": 2.2646369291363198,
+ "grad_norm": 0.4552183747291565,
+ "learning_rate": 2.8095153197363887e-05,
+ "loss": 0.5852,
+ "step": 2264
+ },
+ {
+ "epoch": 2.265637210465443,
+ "grad_norm": 0.4342525005340576,
+ "learning_rate": 2.8022343833623666e-05,
+ "loss": 0.5362,
+ "step": 2265
+ },
+ {
+ "epoch": 2.266637491794567,
+ "grad_norm": 0.4309101700782776,
+ "learning_rate": 2.7949613560310438e-05,
+ "loss": 0.4905,
+ "step": 2266
+ },
+ {
+ "epoch": 2.267637773123691,
+ "grad_norm": 0.5703599452972412,
+ "learning_rate": 2.787696245734155e-05,
+ "loss": 0.6974,
+ "step": 2267
+ },
+ {
+ "epoch": 2.268638054452815,
+ "grad_norm": 0.5007729530334473,
+ "learning_rate": 2.7804390604547557e-05,
+ "loss": 0.6958,
+ "step": 2268
+ },
+ {
+ "epoch": 2.2696383357819387,
+ "grad_norm": 0.47054824233055115,
+ "learning_rate": 2.7731898081671702e-05,
+ "loss": 0.5988,
+ "step": 2269
+ },
+ {
+ "epoch": 2.2706386171110626,
+ "grad_norm": 0.4500153362751007,
+ "learning_rate": 2.765948496837022e-05,
+ "loss": 0.6857,
+ "step": 2270
+ },
+ {
+ "epoch": 2.2716388984401865,
+ "grad_norm": 0.5590565204620361,
+ "learning_rate": 2.758715134421197e-05,
+ "loss": 0.6839,
+ "step": 2271
+ },
+ {
+ "epoch": 2.27263917976931,
+ "grad_norm": 0.486512690782547,
+ "learning_rate": 2.7514897288678578e-05,
+ "loss": 0.6154,
+ "step": 2272
+ },
+ {
+ "epoch": 2.273639461098434,
+ "grad_norm": 0.48422694206237793,
+ "learning_rate": 2.744272288116416e-05,
+ "loss": 0.6642,
+ "step": 2273
+ },
+ {
+ "epoch": 2.2746397424275577,
+ "grad_norm": 0.4691951870918274,
+ "learning_rate": 2.7370628200975302e-05,
+ "loss": 0.6612,
+ "step": 2274
+ },
+ {
+ "epoch": 2.2756400237566816,
+ "grad_norm": 0.4122920036315918,
+ "learning_rate": 2.729861332733108e-05,
+ "loss": 0.491,
+ "step": 2275
+ },
+ {
+ "epoch": 2.2766403050858055,
+ "grad_norm": 0.4303779602050781,
+ "learning_rate": 2.7226678339362755e-05,
+ "loss": 0.5108,
+ "step": 2276
+ },
+ {
+ "epoch": 2.2776405864149294,
+ "grad_norm": 0.45343050360679626,
+ "learning_rate": 2.7154823316113932e-05,
+ "loss": 0.5594,
+ "step": 2277
+ },
+ {
+ "epoch": 2.278640867744053,
+ "grad_norm": 0.4414820969104767,
+ "learning_rate": 2.708304833654023e-05,
+ "loss": 0.563,
+ "step": 2278
+ },
+ {
+ "epoch": 2.2796411490731767,
+ "grad_norm": 0.45219919085502625,
+ "learning_rate": 2.7011353479509426e-05,
+ "loss": 0.5469,
+ "step": 2279
+ },
+ {
+ "epoch": 2.2806414304023006,
+ "grad_norm": 0.4790736734867096,
+ "learning_rate": 2.693973882380114e-05,
+ "loss": 0.7291,
+ "step": 2280
+ },
+ {
+ "epoch": 2.2816417117314245,
+ "grad_norm": 0.4839097261428833,
+ "learning_rate": 2.686820444810696e-05,
+ "loss": 0.4779,
+ "step": 2281
+ },
+ {
+ "epoch": 2.2826419930605484,
+ "grad_norm": 0.4934631586074829,
+ "learning_rate": 2.679675043103026e-05,
+ "loss": 0.6304,
+ "step": 2282
+ },
+ {
+ "epoch": 2.2836422743896723,
+ "grad_norm": 0.5528481006622314,
+ "learning_rate": 2.6725376851086025e-05,
+ "loss": 0.7422,
+ "step": 2283
+ },
+ {
+ "epoch": 2.284642555718796,
+ "grad_norm": 0.45739200711250305,
+ "learning_rate": 2.6654083786700955e-05,
+ "loss": 0.5069,
+ "step": 2284
+ },
+ {
+ "epoch": 2.2856428370479196,
+ "grad_norm": 0.4885886013507843,
+ "learning_rate": 2.6582871316213198e-05,
+ "loss": 0.6197,
+ "step": 2285
+ },
+ {
+ "epoch": 2.2866431183770435,
+ "grad_norm": 0.518183171749115,
+ "learning_rate": 2.6511739517872426e-05,
+ "loss": 0.5722,
+ "step": 2286
+ },
+ {
+ "epoch": 2.2876433997061674,
+ "grad_norm": 0.4652218520641327,
+ "learning_rate": 2.644068846983956e-05,
+ "loss": 0.5164,
+ "step": 2287
+ },
+ {
+ "epoch": 2.2886436810352913,
+ "grad_norm": 0.48020297288894653,
+ "learning_rate": 2.6369718250186914e-05,
+ "loss": 0.6531,
+ "step": 2288
+ },
+ {
+ "epoch": 2.289643962364415,
+ "grad_norm": 0.386658251285553,
+ "learning_rate": 2.6298828936897867e-05,
+ "loss": 0.524,
+ "step": 2289
+ },
+ {
+ "epoch": 2.2906442436935386,
+ "grad_norm": 0.5391028523445129,
+ "learning_rate": 2.622802060786702e-05,
+ "loss": 0.741,
+ "step": 2290
+ },
+ {
+ "epoch": 2.2916445250226625,
+ "grad_norm": 0.44360673427581787,
+ "learning_rate": 2.6157293340899857e-05,
+ "loss": 0.5736,
+ "step": 2291
+ },
+ {
+ "epoch": 2.2926448063517864,
+ "grad_norm": 0.4551480710506439,
+ "learning_rate": 2.60866472137129e-05,
+ "loss": 0.5947,
+ "step": 2292
+ },
+ {
+ "epoch": 2.2936450876809102,
+ "grad_norm": 0.4541544020175934,
+ "learning_rate": 2.6016082303933454e-05,
+ "loss": 0.5172,
+ "step": 2293
+ },
+ {
+ "epoch": 2.294645369010034,
+ "grad_norm": 0.5024133920669556,
+ "learning_rate": 2.594559868909956e-05,
+ "loss": 0.6832,
+ "step": 2294
+ },
+ {
+ "epoch": 2.295645650339158,
+ "grad_norm": 0.5044113993644714,
+ "learning_rate": 2.587519644666001e-05,
+ "loss": 0.5062,
+ "step": 2295
+ },
+ {
+ "epoch": 2.296645931668282,
+ "grad_norm": 0.5235409736633301,
+ "learning_rate": 2.580487565397406e-05,
+ "loss": 0.6371,
+ "step": 2296
+ },
+ {
+ "epoch": 2.2976462129974053,
+ "grad_norm": 0.5046529769897461,
+ "learning_rate": 2.573463638831166e-05,
+ "loss": 0.5736,
+ "step": 2297
+ },
+ {
+ "epoch": 2.2986464943265292,
+ "grad_norm": 0.44103822112083435,
+ "learning_rate": 2.566447872685298e-05,
+ "loss": 0.6557,
+ "step": 2298
+ },
+ {
+ "epoch": 2.299646775655653,
+ "grad_norm": 0.5476238131523132,
+ "learning_rate": 2.559440274668864e-05,
+ "loss": 0.7338,
+ "step": 2299
+ },
+ {
+ "epoch": 2.300647056984777,
+ "grad_norm": 0.5168704986572266,
+ "learning_rate": 2.5524408524819453e-05,
+ "loss": 0.6034,
+ "step": 2300
+ },
+ {
+ "epoch": 2.301647338313901,
+ "grad_norm": 0.4194817543029785,
+ "learning_rate": 2.545449613815639e-05,
+ "loss": 0.56,
+ "step": 2301
+ },
+ {
+ "epoch": 2.302647619643025,
+ "grad_norm": 0.43994396924972534,
+ "learning_rate": 2.5384665663520558e-05,
+ "loss": 0.6744,
+ "step": 2302
+ },
+ {
+ "epoch": 2.303647900972148,
+ "grad_norm": 0.4471636116504669,
+ "learning_rate": 2.5314917177642972e-05,
+ "loss": 0.703,
+ "step": 2303
+ },
+ {
+ "epoch": 2.304648182301272,
+ "grad_norm": 0.48795682191848755,
+ "learning_rate": 2.5245250757164663e-05,
+ "loss": 0.7916,
+ "step": 2304
+ },
+ {
+ "epoch": 2.305648463630396,
+ "grad_norm": 0.5703234076499939,
+ "learning_rate": 2.5175666478636374e-05,
+ "loss": 0.7299,
+ "step": 2305
+ },
+ {
+ "epoch": 2.30664874495952,
+ "grad_norm": 0.532819926738739,
+ "learning_rate": 2.5106164418518686e-05,
+ "loss": 0.7058,
+ "step": 2306
+ },
+ {
+ "epoch": 2.3076490262886438,
+ "grad_norm": 0.4922640919685364,
+ "learning_rate": 2.5036744653181753e-05,
+ "loss": 0.708,
+ "step": 2307
+ },
+ {
+ "epoch": 2.3086493076177677,
+ "grad_norm": 0.4729764461517334,
+ "learning_rate": 2.4967407258905385e-05,
+ "loss": 0.6014,
+ "step": 2308
+ },
+ {
+ "epoch": 2.3096495889468915,
+ "grad_norm": 0.48904159665107727,
+ "learning_rate": 2.48981523118788e-05,
+ "loss": 0.6238,
+ "step": 2309
+ },
+ {
+ "epoch": 2.310649870276015,
+ "grad_norm": 0.49575427174568176,
+ "learning_rate": 2.4828979888200698e-05,
+ "loss": 0.7582,
+ "step": 2310
+ },
+ {
+ "epoch": 2.311650151605139,
+ "grad_norm": 0.5098155736923218,
+ "learning_rate": 2.475989006387901e-05,
+ "loss": 0.6547,
+ "step": 2311
+ },
+ {
+ "epoch": 2.3126504329342628,
+ "grad_norm": 0.45581546425819397,
+ "learning_rate": 2.4690882914831004e-05,
+ "loss": 0.6139,
+ "step": 2312
+ },
+ {
+ "epoch": 2.3136507142633866,
+ "grad_norm": 0.5907835960388184,
+ "learning_rate": 2.462195851688306e-05,
+ "loss": 0.5946,
+ "step": 2313
+ },
+ {
+ "epoch": 2.3146509955925105,
+ "grad_norm": 0.46699121594429016,
+ "learning_rate": 2.4553116945770583e-05,
+ "loss": 0.5983,
+ "step": 2314
+ },
+ {
+ "epoch": 2.315651276921634,
+ "grad_norm": 0.47327184677124023,
+ "learning_rate": 2.4484358277138065e-05,
+ "loss": 0.6508,
+ "step": 2315
+ },
+ {
+ "epoch": 2.316651558250758,
+ "grad_norm": 0.4823262095451355,
+ "learning_rate": 2.441568258653879e-05,
+ "loss": 0.6737,
+ "step": 2316
+ },
+ {
+ "epoch": 2.3176518395798817,
+ "grad_norm": 0.49834492802619934,
+ "learning_rate": 2.4347089949434988e-05,
+ "loss": 0.6298,
+ "step": 2317
+ },
+ {
+ "epoch": 2.3186521209090056,
+ "grad_norm": 0.49446576833724976,
+ "learning_rate": 2.4278580441197484e-05,
+ "loss": 0.659,
+ "step": 2318
+ },
+ {
+ "epoch": 2.3196524022381295,
+ "grad_norm": 0.4349921643733978,
+ "learning_rate": 2.421015413710591e-05,
+ "loss": 0.6368,
+ "step": 2319
+ },
+ {
+ "epoch": 2.3206526835672534,
+ "grad_norm": 0.6123060584068298,
+ "learning_rate": 2.4141811112348377e-05,
+ "loss": 0.6668,
+ "step": 2320
+ },
+ {
+ "epoch": 2.3216529648963773,
+ "grad_norm": 0.46083199977874756,
+ "learning_rate": 2.407355144202147e-05,
+ "loss": 0.6067,
+ "step": 2321
+ },
+ {
+ "epoch": 2.3226532462255007,
+ "grad_norm": 0.5320808291435242,
+ "learning_rate": 2.4005375201130274e-05,
+ "loss": 0.5295,
+ "step": 2322
+ },
+ {
+ "epoch": 2.3236535275546246,
+ "grad_norm": 0.616462767124176,
+ "learning_rate": 2.3937282464588108e-05,
+ "loss": 0.8035,
+ "step": 2323
+ },
+ {
+ "epoch": 2.3246538088837485,
+ "grad_norm": 0.5211688280105591,
+ "learning_rate": 2.3869273307216612e-05,
+ "loss": 0.7214,
+ "step": 2324
+ },
+ {
+ "epoch": 2.3256540902128724,
+ "grad_norm": 0.494314044713974,
+ "learning_rate": 2.3801347803745512e-05,
+ "loss": 0.7419,
+ "step": 2325
+ },
+ {
+ "epoch": 2.3266543715419963,
+ "grad_norm": 0.5524937510490417,
+ "learning_rate": 2.3733506028812658e-05,
+ "loss": 0.7203,
+ "step": 2326
+ },
+ {
+ "epoch": 2.32765465287112,
+ "grad_norm": 0.5332032442092896,
+ "learning_rate": 2.3665748056963956e-05,
+ "loss": 0.6084,
+ "step": 2327
+ },
+ {
+ "epoch": 2.3286549342002436,
+ "grad_norm": 0.49110063910484314,
+ "learning_rate": 2.3598073962653066e-05,
+ "loss": 0.5949,
+ "step": 2328
+ },
+ {
+ "epoch": 2.3296552155293675,
+ "grad_norm": 0.41060465574264526,
+ "learning_rate": 2.3530483820241656e-05,
+ "loss": 0.5049,
+ "step": 2329
+ },
+ {
+ "epoch": 2.3306554968584914,
+ "grad_norm": 0.4867851436138153,
+ "learning_rate": 2.3462977703999023e-05,
+ "loss": 0.6684,
+ "step": 2330
+ },
+ {
+ "epoch": 2.3316557781876153,
+ "grad_norm": 0.4757525622844696,
+ "learning_rate": 2.339555568810221e-05,
+ "loss": 0.4925,
+ "step": 2331
+ },
+ {
+ "epoch": 2.332656059516739,
+ "grad_norm": 0.4445713758468628,
+ "learning_rate": 2.332821784663578e-05,
+ "loss": 0.4718,
+ "step": 2332
+ },
+ {
+ "epoch": 2.333656340845863,
+ "grad_norm": 0.48554399609565735,
+ "learning_rate": 2.3260964253591898e-05,
+ "loss": 0.574,
+ "step": 2333
+ },
+ {
+ "epoch": 2.334656622174987,
+ "grad_norm": 0.5004045963287354,
+ "learning_rate": 2.3193794982870044e-05,
+ "loss": 0.5835,
+ "step": 2334
+ },
+ {
+ "epoch": 2.3356569035041104,
+ "grad_norm": 0.511870801448822,
+ "learning_rate": 2.312671010827715e-05,
+ "loss": 0.5677,
+ "step": 2335
+ },
+ {
+ "epoch": 2.3366571848332343,
+ "grad_norm": 0.4409622251987457,
+ "learning_rate": 2.30597097035273e-05,
+ "loss": 0.6213,
+ "step": 2336
+ },
+ {
+ "epoch": 2.337657466162358,
+ "grad_norm": 0.5014410614967346,
+ "learning_rate": 2.29927938422419e-05,
+ "loss": 0.5394,
+ "step": 2337
+ },
+ {
+ "epoch": 2.338657747491482,
+ "grad_norm": 0.572220504283905,
+ "learning_rate": 2.2925962597949302e-05,
+ "loss": 0.7122,
+ "step": 2338
+ },
+ {
+ "epoch": 2.339658028820606,
+ "grad_norm": 0.5024709105491638,
+ "learning_rate": 2.285921604408502e-05,
+ "loss": 0.6339,
+ "step": 2339
+ },
+ {
+ "epoch": 2.34065831014973,
+ "grad_norm": 0.5063747763633728,
+ "learning_rate": 2.2792554253991415e-05,
+ "loss": 0.6029,
+ "step": 2340
+ },
+ {
+ "epoch": 2.3416585914788532,
+ "grad_norm": 0.44128766655921936,
+ "learning_rate": 2.272597730091769e-05,
+ "loss": 0.522,
+ "step": 2341
+ },
+ {
+ "epoch": 2.342658872807977,
+ "grad_norm": 0.45375195145606995,
+ "learning_rate": 2.2659485258019976e-05,
+ "loss": 0.605,
+ "step": 2342
+ },
+ {
+ "epoch": 2.343659154137101,
+ "grad_norm": 0.4744661748409271,
+ "learning_rate": 2.259307819836093e-05,
+ "loss": 0.6479,
+ "step": 2343
+ },
+ {
+ "epoch": 2.344659435466225,
+ "grad_norm": 0.49682337045669556,
+ "learning_rate": 2.252675619490996e-05,
+ "loss": 0.6434,
+ "step": 2344
+ },
+ {
+ "epoch": 2.345659716795349,
+ "grad_norm": 0.5090720653533936,
+ "learning_rate": 2.2460519320542883e-05,
+ "loss": 0.6712,
+ "step": 2345
+ },
+ {
+ "epoch": 2.3466599981244727,
+ "grad_norm": 0.4929216206073761,
+ "learning_rate": 2.2394367648042102e-05,
+ "loss": 0.6301,
+ "step": 2346
+ },
+ {
+ "epoch": 2.3476602794535966,
+ "grad_norm": 0.45321589708328247,
+ "learning_rate": 2.2328301250096327e-05,
+ "loss": 0.5003,
+ "step": 2347
+ },
+ {
+ "epoch": 2.34866056078272,
+ "grad_norm": 0.5079351663589478,
+ "learning_rate": 2.2262320199300557e-05,
+ "loss": 0.5639,
+ "step": 2348
+ },
+ {
+ "epoch": 2.349660842111844,
+ "grad_norm": 0.459007203578949,
+ "learning_rate": 2.2196424568156073e-05,
+ "loss": 0.5999,
+ "step": 2349
+ },
+ {
+ "epoch": 2.350661123440968,
+ "grad_norm": 0.44086092710494995,
+ "learning_rate": 2.2130614429070207e-05,
+ "loss": 0.5651,
+ "step": 2350
+ },
+ {
+ "epoch": 2.3516614047700917,
+ "grad_norm": 0.4631021320819855,
+ "learning_rate": 2.206488985435645e-05,
+ "loss": 0.5437,
+ "step": 2351
+ },
+ {
+ "epoch": 2.3526616860992156,
+ "grad_norm": 0.5214501619338989,
+ "learning_rate": 2.199925091623418e-05,
+ "loss": 0.712,
+ "step": 2352
+ },
+ {
+ "epoch": 2.353661967428339,
+ "grad_norm": 0.4480469524860382,
+ "learning_rate": 2.193369768682877e-05,
+ "loss": 0.559,
+ "step": 2353
+ },
+ {
+ "epoch": 2.354662248757463,
+ "grad_norm": 0.48840999603271484,
+ "learning_rate": 2.1868230238171293e-05,
+ "loss": 0.5665,
+ "step": 2354
+ },
+ {
+ "epoch": 2.3556625300865868,
+ "grad_norm": 0.48083189129829407,
+ "learning_rate": 2.1802848642198692e-05,
+ "loss": 0.6433,
+ "step": 2355
+ },
+ {
+ "epoch": 2.3566628114157107,
+ "grad_norm": 0.5179978609085083,
+ "learning_rate": 2.1737552970753526e-05,
+ "loss": 0.6703,
+ "step": 2356
+ },
+ {
+ "epoch": 2.3576630927448345,
+ "grad_norm": 0.45892852544784546,
+ "learning_rate": 2.1672343295583873e-05,
+ "loss": 0.5417,
+ "step": 2357
+ },
+ {
+ "epoch": 2.3586633740739584,
+ "grad_norm": 0.5100318789482117,
+ "learning_rate": 2.160721968834344e-05,
+ "loss": 0.6418,
+ "step": 2358
+ },
+ {
+ "epoch": 2.3596636554030823,
+ "grad_norm": 0.5485228300094604,
+ "learning_rate": 2.154218222059122e-05,
+ "loss": 0.5995,
+ "step": 2359
+ },
+ {
+ "epoch": 2.3606639367322058,
+ "grad_norm": 0.4661252200603485,
+ "learning_rate": 2.1477230963791706e-05,
+ "loss": 0.5304,
+ "step": 2360
+ },
+ {
+ "epoch": 2.3616642180613296,
+ "grad_norm": 0.45510804653167725,
+ "learning_rate": 2.141236598931451e-05,
+ "loss": 0.5799,
+ "step": 2361
+ },
+ {
+ "epoch": 2.3626644993904535,
+ "grad_norm": 0.5123688578605652,
+ "learning_rate": 2.1347587368434575e-05,
+ "loss": 0.6454,
+ "step": 2362
+ },
+ {
+ "epoch": 2.3636647807195774,
+ "grad_norm": 0.4892440736293793,
+ "learning_rate": 2.1282895172331817e-05,
+ "loss": 0.6505,
+ "step": 2363
+ },
+ {
+ "epoch": 2.3646650620487013,
+ "grad_norm": 0.44063228368759155,
+ "learning_rate": 2.1218289472091336e-05,
+ "loss": 0.4452,
+ "step": 2364
+ },
+ {
+ "epoch": 2.365665343377825,
+ "grad_norm": 0.48182591795921326,
+ "learning_rate": 2.115377033870305e-05,
+ "loss": 0.5295,
+ "step": 2365
+ },
+ {
+ "epoch": 2.3666656247069486,
+ "grad_norm": 0.5330935716629028,
+ "learning_rate": 2.1089337843061863e-05,
+ "loss": 0.7468,
+ "step": 2366
+ },
+ {
+ "epoch": 2.3676659060360725,
+ "grad_norm": 0.4640701413154602,
+ "learning_rate": 2.102499205596743e-05,
+ "loss": 0.5936,
+ "step": 2367
+ },
+ {
+ "epoch": 2.3686661873651964,
+ "grad_norm": 0.40019840002059937,
+ "learning_rate": 2.0960733048124083e-05,
+ "loss": 0.489,
+ "step": 2368
+ },
+ {
+ "epoch": 2.3696664686943203,
+ "grad_norm": 0.5035619139671326,
+ "learning_rate": 2.0896560890140913e-05,
+ "loss": 0.6878,
+ "step": 2369
+ },
+ {
+ "epoch": 2.370666750023444,
+ "grad_norm": 0.46381524205207825,
+ "learning_rate": 2.0832475652531447e-05,
+ "loss": 0.603,
+ "step": 2370
+ },
+ {
+ "epoch": 2.371667031352568,
+ "grad_norm": 0.48167362809181213,
+ "learning_rate": 2.076847740571387e-05,
+ "loss": 0.4978,
+ "step": 2371
+ },
+ {
+ "epoch": 2.372667312681692,
+ "grad_norm": 0.5534481406211853,
+ "learning_rate": 2.070456622001059e-05,
+ "loss": 0.5736,
+ "step": 2372
+ },
+ {
+ "epoch": 2.3736675940108154,
+ "grad_norm": 0.47841575741767883,
+ "learning_rate": 2.064074216564852e-05,
+ "loss": 0.6201,
+ "step": 2373
+ },
+ {
+ "epoch": 2.3746678753399393,
+ "grad_norm": 0.5568225383758545,
+ "learning_rate": 2.0577005312758703e-05,
+ "loss": 0.7379,
+ "step": 2374
+ },
+ {
+ "epoch": 2.375668156669063,
+ "grad_norm": 0.5010125637054443,
+ "learning_rate": 2.0513355731376395e-05,
+ "loss": 0.6665,
+ "step": 2375
+ },
+ {
+ "epoch": 2.376668437998187,
+ "grad_norm": 0.5012779831886292,
+ "learning_rate": 2.0449793491441028e-05,
+ "loss": 0.6885,
+ "step": 2376
+ },
+ {
+ "epoch": 2.377668719327311,
+ "grad_norm": 0.4700705409049988,
+ "learning_rate": 2.0386318662795957e-05,
+ "loss": 0.7168,
+ "step": 2377
+ },
+ {
+ "epoch": 2.3786690006564344,
+ "grad_norm": 0.5078738927841187,
+ "learning_rate": 2.0322931315188586e-05,
+ "loss": 0.5534,
+ "step": 2378
+ },
+ {
+ "epoch": 2.3796692819855583,
+ "grad_norm": 0.4546999931335449,
+ "learning_rate": 2.0259631518270105e-05,
+ "loss": 0.6172,
+ "step": 2379
+ },
+ {
+ "epoch": 2.380669563314682,
+ "grad_norm": 0.4790453314781189,
+ "learning_rate": 2.0196419341595595e-05,
+ "loss": 0.5531,
+ "step": 2380
+ },
+ {
+ "epoch": 2.381669844643806,
+ "grad_norm": 0.4022303819656372,
+ "learning_rate": 2.013329485462374e-05,
+ "loss": 0.5742,
+ "step": 2381
+ },
+ {
+ "epoch": 2.38267012597293,
+ "grad_norm": 0.4902719557285309,
+ "learning_rate": 2.0070258126717e-05,
+ "loss": 0.6463,
+ "step": 2382
+ },
+ {
+ "epoch": 2.383670407302054,
+ "grad_norm": 0.4552217721939087,
+ "learning_rate": 2.000730922714128e-05,
+ "loss": 0.5703,
+ "step": 2383
+ },
+ {
+ "epoch": 2.3846706886311777,
+ "grad_norm": 0.5057043433189392,
+ "learning_rate": 1.9944448225066093e-05,
+ "loss": 0.6637,
+ "step": 2384
+ },
+ {
+ "epoch": 2.385670969960301,
+ "grad_norm": 0.5370767712593079,
+ "learning_rate": 1.9881675189564254e-05,
+ "loss": 0.6248,
+ "step": 2385
+ },
+ {
+ "epoch": 2.386671251289425,
+ "grad_norm": 0.5089964866638184,
+ "learning_rate": 1.981899018961202e-05,
+ "loss": 0.5206,
+ "step": 2386
+ },
+ {
+ "epoch": 2.387671532618549,
+ "grad_norm": 0.5276069641113281,
+ "learning_rate": 1.975639329408887e-05,
+ "loss": 0.5773,
+ "step": 2387
+ },
+ {
+ "epoch": 2.388671813947673,
+ "grad_norm": 0.4586690664291382,
+ "learning_rate": 1.9693884571777432e-05,
+ "loss": 0.4942,
+ "step": 2388
+ },
+ {
+ "epoch": 2.3896720952767967,
+ "grad_norm": 0.4275995194911957,
+ "learning_rate": 1.963146409136354e-05,
+ "loss": 0.5222,
+ "step": 2389
+ },
+ {
+ "epoch": 2.3906723766059206,
+ "grad_norm": 0.46232300996780396,
+ "learning_rate": 1.9569131921435956e-05,
+ "loss": 0.5835,
+ "step": 2390
+ },
+ {
+ "epoch": 2.391672657935044,
+ "grad_norm": 0.4702429175376892,
+ "learning_rate": 1.950688813048652e-05,
+ "loss": 0.5547,
+ "step": 2391
+ },
+ {
+ "epoch": 2.392672939264168,
+ "grad_norm": 0.45481425523757935,
+ "learning_rate": 1.944473278690986e-05,
+ "loss": 0.5783,
+ "step": 2392
+ },
+ {
+ "epoch": 2.393673220593292,
+ "grad_norm": 0.4588642120361328,
+ "learning_rate": 1.9382665959003477e-05,
+ "loss": 0.6411,
+ "step": 2393
+ },
+ {
+ "epoch": 2.3946735019224157,
+ "grad_norm": 0.4446251690387726,
+ "learning_rate": 1.93206877149676e-05,
+ "loss": 0.5831,
+ "step": 2394
+ },
+ {
+ "epoch": 2.3956737832515396,
+ "grad_norm": 0.48509830236434937,
+ "learning_rate": 1.9258798122905064e-05,
+ "loss": 0.5913,
+ "step": 2395
+ },
+ {
+ "epoch": 2.3966740645806635,
+ "grad_norm": 0.4828680753707886,
+ "learning_rate": 1.9196997250821392e-05,
+ "loss": 0.6255,
+ "step": 2396
+ },
+ {
+ "epoch": 2.3976743459097873,
+ "grad_norm": 0.5534887909889221,
+ "learning_rate": 1.913528516662452e-05,
+ "loss": 0.7288,
+ "step": 2397
+ },
+ {
+ "epoch": 2.398674627238911,
+ "grad_norm": 0.41673797369003296,
+ "learning_rate": 1.907366193812491e-05,
+ "loss": 0.5078,
+ "step": 2398
+ },
+ {
+ "epoch": 2.3996749085680347,
+ "grad_norm": 0.4955064356327057,
+ "learning_rate": 1.9012127633035305e-05,
+ "loss": 0.5843,
+ "step": 2399
+ },
+ {
+ "epoch": 2.4006751898971586,
+ "grad_norm": 0.6254858374595642,
+ "learning_rate": 1.895068231897079e-05,
+ "loss": 0.6269,
+ "step": 2400
+ },
+ {
+ "epoch": 2.4016754712262824,
+ "grad_norm": 0.5201045870780945,
+ "learning_rate": 1.8889326063448697e-05,
+ "loss": 0.521,
+ "step": 2401
+ },
+ {
+ "epoch": 2.4026757525554063,
+ "grad_norm": 0.49939653277397156,
+ "learning_rate": 1.8828058933888392e-05,
+ "loss": 0.718,
+ "step": 2402
+ },
+ {
+ "epoch": 2.40367603388453,
+ "grad_norm": 0.45866259932518005,
+ "learning_rate": 1.8766880997611424e-05,
+ "loss": 0.6565,
+ "step": 2403
+ },
+ {
+ "epoch": 2.4046763152136537,
+ "grad_norm": 0.5090838670730591,
+ "learning_rate": 1.870579232184122e-05,
+ "loss": 0.5909,
+ "step": 2404
+ },
+ {
+ "epoch": 2.4056765965427775,
+ "grad_norm": 0.4334961175918579,
+ "learning_rate": 1.864479297370325e-05,
+ "loss": 0.4925,
+ "step": 2405
+ },
+ {
+ "epoch": 2.4066768778719014,
+ "grad_norm": 0.5367956757545471,
+ "learning_rate": 1.8583883020224724e-05,
+ "loss": 0.6032,
+ "step": 2406
+ },
+ {
+ "epoch": 2.4076771592010253,
+ "grad_norm": 0.4732288420200348,
+ "learning_rate": 1.8523062528334688e-05,
+ "loss": 0.5782,
+ "step": 2407
+ },
+ {
+ "epoch": 2.408677440530149,
+ "grad_norm": 0.5187519788742065,
+ "learning_rate": 1.8462331564863832e-05,
+ "loss": 0.6937,
+ "step": 2408
+ },
+ {
+ "epoch": 2.409677721859273,
+ "grad_norm": 0.47051140666007996,
+ "learning_rate": 1.8401690196544552e-05,
+ "loss": 0.7286,
+ "step": 2409
+ },
+ {
+ "epoch": 2.410678003188397,
+ "grad_norm": 0.49937713146209717,
+ "learning_rate": 1.834113849001069e-05,
+ "loss": 0.6367,
+ "step": 2410
+ },
+ {
+ "epoch": 2.4116782845175204,
+ "grad_norm": 0.5458667278289795,
+ "learning_rate": 1.8280676511797666e-05,
+ "loss": 0.6091,
+ "step": 2411
+ },
+ {
+ "epoch": 2.4126785658466443,
+ "grad_norm": 0.5090888142585754,
+ "learning_rate": 1.8220304328342252e-05,
+ "loss": 0.659,
+ "step": 2412
+ },
+ {
+ "epoch": 2.413678847175768,
+ "grad_norm": 0.4376786947250366,
+ "learning_rate": 1.8160022005982515e-05,
+ "loss": 0.5746,
+ "step": 2413
+ },
+ {
+ "epoch": 2.414679128504892,
+ "grad_norm": 0.43062934279441833,
+ "learning_rate": 1.8099829610957863e-05,
+ "loss": 0.5639,
+ "step": 2414
+ },
+ {
+ "epoch": 2.415679409834016,
+ "grad_norm": 0.4858124256134033,
+ "learning_rate": 1.8039727209408842e-05,
+ "loss": 0.6354,
+ "step": 2415
+ },
+ {
+ "epoch": 2.4166796911631394,
+ "grad_norm": 0.49024826288223267,
+ "learning_rate": 1.7979714867377152e-05,
+ "loss": 0.5691,
+ "step": 2416
+ },
+ {
+ "epoch": 2.4176799724922633,
+ "grad_norm": 0.593974769115448,
+ "learning_rate": 1.7919792650805455e-05,
+ "loss": 0.621,
+ "step": 2417
+ },
+ {
+ "epoch": 2.418680253821387,
+ "grad_norm": 0.5197362303733826,
+ "learning_rate": 1.7859960625537476e-05,
+ "loss": 0.6257,
+ "step": 2418
+ },
+ {
+ "epoch": 2.419680535150511,
+ "grad_norm": 0.4285022020339966,
+ "learning_rate": 1.7800218857317742e-05,
+ "loss": 0.6256,
+ "step": 2419
+ },
+ {
+ "epoch": 2.420680816479635,
+ "grad_norm": 0.4791402816772461,
+ "learning_rate": 1.774056741179171e-05,
+ "loss": 0.4882,
+ "step": 2420
+ },
+ {
+ "epoch": 2.421681097808759,
+ "grad_norm": 0.4530814290046692,
+ "learning_rate": 1.7681006354505493e-05,
+ "loss": 0.5457,
+ "step": 2421
+ },
+ {
+ "epoch": 2.4226813791378827,
+ "grad_norm": 0.4875739812850952,
+ "learning_rate": 1.7621535750905905e-05,
+ "loss": 0.7153,
+ "step": 2422
+ },
+ {
+ "epoch": 2.423681660467006,
+ "grad_norm": 0.4448545575141907,
+ "learning_rate": 1.756215566634043e-05,
+ "loss": 0.5714,
+ "step": 2423
+ },
+ {
+ "epoch": 2.42468194179613,
+ "grad_norm": 0.4434129595756531,
+ "learning_rate": 1.7502866166056986e-05,
+ "loss": 0.6356,
+ "step": 2424
+ },
+ {
+ "epoch": 2.425682223125254,
+ "grad_norm": 0.48909735679626465,
+ "learning_rate": 1.744366731520408e-05,
+ "loss": 0.7804,
+ "step": 2425
+ },
+ {
+ "epoch": 2.426682504454378,
+ "grad_norm": 0.4361596703529358,
+ "learning_rate": 1.7384559178830472e-05,
+ "loss": 0.6193,
+ "step": 2426
+ },
+ {
+ "epoch": 2.4276827857835017,
+ "grad_norm": 0.48339372873306274,
+ "learning_rate": 1.7325541821885384e-05,
+ "loss": 0.6213,
+ "step": 2427
+ },
+ {
+ "epoch": 2.4286830671126256,
+ "grad_norm": 0.5264155864715576,
+ "learning_rate": 1.726661530921815e-05,
+ "loss": 0.7486,
+ "step": 2428
+ },
+ {
+ "epoch": 2.429683348441749,
+ "grad_norm": 0.5361571311950684,
+ "learning_rate": 1.7207779705578375e-05,
+ "loss": 0.7603,
+ "step": 2429
+ },
+ {
+ "epoch": 2.430683629770873,
+ "grad_norm": 0.42906150221824646,
+ "learning_rate": 1.7149035075615794e-05,
+ "loss": 0.5387,
+ "step": 2430
+ },
+ {
+ "epoch": 2.431683911099997,
+ "grad_norm": 0.4638700783252716,
+ "learning_rate": 1.709038148388007e-05,
+ "loss": 0.5149,
+ "step": 2431
+ },
+ {
+ "epoch": 2.4326841924291207,
+ "grad_norm": 0.5149651765823364,
+ "learning_rate": 1.7031818994820926e-05,
+ "loss": 0.7173,
+ "step": 2432
+ },
+ {
+ "epoch": 2.4336844737582446,
+ "grad_norm": 0.48855680227279663,
+ "learning_rate": 1.697334767278792e-05,
+ "loss": 0.649,
+ "step": 2433
+ },
+ {
+ "epoch": 2.4346847550873685,
+ "grad_norm": 0.4574027955532074,
+ "learning_rate": 1.6914967582030493e-05,
+ "loss": 0.5281,
+ "step": 2434
+ },
+ {
+ "epoch": 2.4356850364164924,
+ "grad_norm": 0.49395766854286194,
+ "learning_rate": 1.6856678786697778e-05,
+ "loss": 0.5188,
+ "step": 2435
+ },
+ {
+ "epoch": 2.436685317745616,
+ "grad_norm": 0.4664051830768585,
+ "learning_rate": 1.6798481350838648e-05,
+ "loss": 0.6979,
+ "step": 2436
+ },
+ {
+ "epoch": 2.4376855990747397,
+ "grad_norm": 0.4599386751651764,
+ "learning_rate": 1.6740375338401526e-05,
+ "loss": 0.5938,
+ "step": 2437
+ },
+ {
+ "epoch": 2.4386858804038636,
+ "grad_norm": 0.45751938223838806,
+ "learning_rate": 1.6682360813234444e-05,
+ "loss": 0.5343,
+ "step": 2438
+ },
+ {
+ "epoch": 2.4396861617329875,
+ "grad_norm": 0.48478764295578003,
+ "learning_rate": 1.6624437839084862e-05,
+ "loss": 0.6195,
+ "step": 2439
+ },
+ {
+ "epoch": 2.4406864430621114,
+ "grad_norm": 0.46098843216896057,
+ "learning_rate": 1.656660647959962e-05,
+ "loss": 0.5366,
+ "step": 2440
+ },
+ {
+ "epoch": 2.441686724391235,
+ "grad_norm": 0.5450953245162964,
+ "learning_rate": 1.6508866798324986e-05,
+ "loss": 0.5466,
+ "step": 2441
+ },
+ {
+ "epoch": 2.4426870057203587,
+ "grad_norm": 0.5364235639572144,
+ "learning_rate": 1.6451218858706374e-05,
+ "loss": 0.7062,
+ "step": 2442
+ },
+ {
+ "epoch": 2.4436872870494826,
+ "grad_norm": 0.5759331583976746,
+ "learning_rate": 1.6393662724088478e-05,
+ "loss": 0.6879,
+ "step": 2443
+ },
+ {
+ "epoch": 2.4446875683786065,
+ "grad_norm": 0.5829169154167175,
+ "learning_rate": 1.633619845771501e-05,
+ "loss": 0.5463,
+ "step": 2444
+ },
+ {
+ "epoch": 2.4456878497077303,
+ "grad_norm": 0.4662203788757324,
+ "learning_rate": 1.627882612272893e-05,
+ "loss": 0.5994,
+ "step": 2445
+ },
+ {
+ "epoch": 2.4466881310368542,
+ "grad_norm": 0.5085203647613525,
+ "learning_rate": 1.622154578217199e-05,
+ "loss": 0.654,
+ "step": 2446
+ },
+ {
+ "epoch": 2.447688412365978,
+ "grad_norm": 0.4459596872329712,
+ "learning_rate": 1.6164357498984893e-05,
+ "loss": 0.4425,
+ "step": 2447
+ },
+ {
+ "epoch": 2.4486886936951016,
+ "grad_norm": 0.5450780987739563,
+ "learning_rate": 1.6107261336007285e-05,
+ "loss": 0.6722,
+ "step": 2448
+ },
+ {
+ "epoch": 2.4496889750242254,
+ "grad_norm": 0.4960186779499054,
+ "learning_rate": 1.605025735597746e-05,
+ "loss": 0.5617,
+ "step": 2449
+ },
+ {
+ "epoch": 2.4506892563533493,
+ "grad_norm": 0.49519863724708557,
+ "learning_rate": 1.599334562153254e-05,
+ "loss": 0.5659,
+ "step": 2450
+ },
+ {
+ "epoch": 2.451689537682473,
+ "grad_norm": 0.42490801215171814,
+ "learning_rate": 1.593652619520819e-05,
+ "loss": 0.5917,
+ "step": 2451
+ },
+ {
+ "epoch": 2.452689819011597,
+ "grad_norm": 0.42814430594444275,
+ "learning_rate": 1.587979913943871e-05,
+ "loss": 0.5453,
+ "step": 2452
+ },
+ {
+ "epoch": 2.453690100340721,
+ "grad_norm": 0.40873077511787415,
+ "learning_rate": 1.5823164516556842e-05,
+ "loss": 0.519,
+ "step": 2453
+ },
+ {
+ "epoch": 2.4546903816698444,
+ "grad_norm": 0.46831750869750977,
+ "learning_rate": 1.5766622388793838e-05,
+ "loss": 0.6087,
+ "step": 2454
+ },
+ {
+ "epoch": 2.4556906629989683,
+ "grad_norm": 0.47180086374282837,
+ "learning_rate": 1.5710172818279222e-05,
+ "loss": 0.5621,
+ "step": 2455
+ },
+ {
+ "epoch": 2.456690944328092,
+ "grad_norm": 0.4417397081851959,
+ "learning_rate": 1.5653815867040923e-05,
+ "loss": 0.588,
+ "step": 2456
+ },
+ {
+ "epoch": 2.457691225657216,
+ "grad_norm": 0.47052255272865295,
+ "learning_rate": 1.5597551597004966e-05,
+ "loss": 0.5726,
+ "step": 2457
+ },
+ {
+ "epoch": 2.45869150698634,
+ "grad_norm": 0.5345332622528076,
+ "learning_rate": 1.554138006999568e-05,
+ "loss": 0.7633,
+ "step": 2458
+ },
+ {
+ "epoch": 2.459691788315464,
+ "grad_norm": 0.4659261703491211,
+ "learning_rate": 1.5485301347735348e-05,
+ "loss": 0.6815,
+ "step": 2459
+ },
+ {
+ "epoch": 2.4606920696445878,
+ "grad_norm": 0.4760098159313202,
+ "learning_rate": 1.5429315491844388e-05,
+ "loss": 0.502,
+ "step": 2460
+ },
+ {
+ "epoch": 2.461692350973711,
+ "grad_norm": 0.4500744044780731,
+ "learning_rate": 1.5373422563841133e-05,
+ "loss": 0.5937,
+ "step": 2461
+ },
+ {
+ "epoch": 2.462692632302835,
+ "grad_norm": 0.4707466661930084,
+ "learning_rate": 1.531762262514177e-05,
+ "loss": 0.5163,
+ "step": 2462
+ },
+ {
+ "epoch": 2.463692913631959,
+ "grad_norm": 0.4404618740081787,
+ "learning_rate": 1.5261915737060384e-05,
+ "loss": 0.6068,
+ "step": 2463
+ },
+ {
+ "epoch": 2.464693194961083,
+ "grad_norm": 0.44543537497520447,
+ "learning_rate": 1.5206301960808722e-05,
+ "loss": 0.5419,
+ "step": 2464
+ },
+ {
+ "epoch": 2.4656934762902067,
+ "grad_norm": 0.4371756911277771,
+ "learning_rate": 1.5150781357496314e-05,
+ "loss": 0.5073,
+ "step": 2465
+ },
+ {
+ "epoch": 2.4666937576193306,
+ "grad_norm": 0.4547995328903198,
+ "learning_rate": 1.5095353988130235e-05,
+ "loss": 0.5694,
+ "step": 2466
+ },
+ {
+ "epoch": 2.467694038948454,
+ "grad_norm": 0.45245441794395447,
+ "learning_rate": 1.5040019913615123e-05,
+ "loss": 0.5318,
+ "step": 2467
+ },
+ {
+ "epoch": 2.468694320277578,
+ "grad_norm": 0.5516065359115601,
+ "learning_rate": 1.4984779194753151e-05,
+ "loss": 0.7389,
+ "step": 2468
+ },
+ {
+ "epoch": 2.469694601606702,
+ "grad_norm": 0.5618095993995667,
+ "learning_rate": 1.4929631892243856e-05,
+ "loss": 0.8326,
+ "step": 2469
+ },
+ {
+ "epoch": 2.4706948829358257,
+ "grad_norm": 0.5322582125663757,
+ "learning_rate": 1.4874578066684186e-05,
+ "loss": 0.6577,
+ "step": 2470
+ },
+ {
+ "epoch": 2.4716951642649496,
+ "grad_norm": 0.4064349830150604,
+ "learning_rate": 1.4819617778568285e-05,
+ "loss": 0.545,
+ "step": 2471
+ },
+ {
+ "epoch": 2.4726954455940735,
+ "grad_norm": 0.4223059415817261,
+ "learning_rate": 1.476475108828762e-05,
+ "loss": 0.6278,
+ "step": 2472
+ },
+ {
+ "epoch": 2.4736957269231974,
+ "grad_norm": 0.4212653934955597,
+ "learning_rate": 1.4709978056130713e-05,
+ "loss": 0.6044,
+ "step": 2473
+ },
+ {
+ "epoch": 2.474696008252321,
+ "grad_norm": 0.479046106338501,
+ "learning_rate": 1.4655298742283252e-05,
+ "loss": 0.5589,
+ "step": 2474
+ },
+ {
+ "epoch": 2.4756962895814447,
+ "grad_norm": 0.41649335622787476,
+ "learning_rate": 1.4600713206827932e-05,
+ "loss": 0.5048,
+ "step": 2475
+ },
+ {
+ "epoch": 2.4766965709105686,
+ "grad_norm": 0.41688817739486694,
+ "learning_rate": 1.454622150974434e-05,
+ "loss": 0.5926,
+ "step": 2476
+ },
+ {
+ "epoch": 2.4776968522396925,
+ "grad_norm": 0.5202938914299011,
+ "learning_rate": 1.4491823710909047e-05,
+ "loss": 0.666,
+ "step": 2477
+ },
+ {
+ "epoch": 2.4786971335688164,
+ "grad_norm": 0.5899435877799988,
+ "learning_rate": 1.4437519870095329e-05,
+ "loss": 0.6623,
+ "step": 2478
+ },
+ {
+ "epoch": 2.47969741489794,
+ "grad_norm": 0.43830054998397827,
+ "learning_rate": 1.4383310046973365e-05,
+ "loss": 0.4251,
+ "step": 2479
+ },
+ {
+ "epoch": 2.4806976962270637,
+ "grad_norm": 0.509669840335846,
+ "learning_rate": 1.4329194301109872e-05,
+ "loss": 0.6183,
+ "step": 2480
+ },
+ {
+ "epoch": 2.4816979775561876,
+ "grad_norm": 0.5677187442779541,
+ "learning_rate": 1.427517269196833e-05,
+ "loss": 0.5949,
+ "step": 2481
+ },
+ {
+ "epoch": 2.4826982588853115,
+ "grad_norm": 0.4813043773174286,
+ "learning_rate": 1.4221245278908668e-05,
+ "loss": 0.6929,
+ "step": 2482
+ },
+ {
+ "epoch": 2.4836985402144354,
+ "grad_norm": 0.4711589217185974,
+ "learning_rate": 1.4167412121187406e-05,
+ "loss": 0.6191,
+ "step": 2483
+ },
+ {
+ "epoch": 2.4846988215435593,
+ "grad_norm": 0.4879576861858368,
+ "learning_rate": 1.4113673277957395e-05,
+ "loss": 0.5817,
+ "step": 2484
+ },
+ {
+ "epoch": 2.485699102872683,
+ "grad_norm": 0.5340747833251953,
+ "learning_rate": 1.4060028808267967e-05,
+ "loss": 0.6396,
+ "step": 2485
+ },
+ {
+ "epoch": 2.4866993842018066,
+ "grad_norm": 0.655983567237854,
+ "learning_rate": 1.4006478771064646e-05,
+ "loss": 0.6395,
+ "step": 2486
+ },
+ {
+ "epoch": 2.4876996655309305,
+ "grad_norm": 0.44405293464660645,
+ "learning_rate": 1.3953023225189243e-05,
+ "loss": 0.5643,
+ "step": 2487
+ },
+ {
+ "epoch": 2.4886999468600544,
+ "grad_norm": 0.4928829073905945,
+ "learning_rate": 1.389966222937974e-05,
+ "loss": 0.5402,
+ "step": 2488
+ },
+ {
+ "epoch": 2.4897002281891782,
+ "grad_norm": 0.3877166509628296,
+ "learning_rate": 1.3846395842270232e-05,
+ "loss": 0.5906,
+ "step": 2489
+ },
+ {
+ "epoch": 2.490700509518302,
+ "grad_norm": 0.5130916237831116,
+ "learning_rate": 1.3793224122390858e-05,
+ "loss": 0.711,
+ "step": 2490
+ },
+ {
+ "epoch": 2.491700790847426,
+ "grad_norm": 0.5104362368583679,
+ "learning_rate": 1.374014712816768e-05,
+ "loss": 0.634,
+ "step": 2491
+ },
+ {
+ "epoch": 2.4927010721765495,
+ "grad_norm": 0.5256757736206055,
+ "learning_rate": 1.3687164917922768e-05,
+ "loss": 0.7387,
+ "step": 2492
+ },
+ {
+ "epoch": 2.4937013535056733,
+ "grad_norm": 0.5370634198188782,
+ "learning_rate": 1.3634277549873953e-05,
+ "loss": 0.6866,
+ "step": 2493
+ },
+ {
+ "epoch": 2.4947016348347972,
+ "grad_norm": 0.45995843410491943,
+ "learning_rate": 1.3581485082134882e-05,
+ "loss": 0.4967,
+ "step": 2494
+ },
+ {
+ "epoch": 2.495701916163921,
+ "grad_norm": 0.5517768263816833,
+ "learning_rate": 1.3528787572714952e-05,
+ "loss": 0.6128,
+ "step": 2495
+ },
+ {
+ "epoch": 2.496702197493045,
+ "grad_norm": 0.45371124148368835,
+ "learning_rate": 1.3476185079519177e-05,
+ "loss": 0.6992,
+ "step": 2496
+ },
+ {
+ "epoch": 2.497702478822169,
+ "grad_norm": 0.5358415842056274,
+ "learning_rate": 1.342367766034821e-05,
+ "loss": 0.6828,
+ "step": 2497
+ },
+ {
+ "epoch": 2.498702760151293,
+ "grad_norm": 0.5540277361869812,
+ "learning_rate": 1.3371265372898167e-05,
+ "loss": 0.7249,
+ "step": 2498
+ },
+ {
+ "epoch": 2.499703041480416,
+ "grad_norm": 0.4393683671951294,
+ "learning_rate": 1.3318948274760734e-05,
+ "loss": 0.5943,
+ "step": 2499
+ },
+ {
+ "epoch": 2.50070332280954,
+ "grad_norm": 0.4958156645298004,
+ "learning_rate": 1.326672642342287e-05,
+ "loss": 0.6252,
+ "step": 2500
+ },
+ {
+ "epoch": 2.501703604138664,
+ "grad_norm": 0.4665940999984741,
+ "learning_rate": 1.3214599876266998e-05,
+ "loss": 0.6677,
+ "step": 2501
+ },
+ {
+ "epoch": 2.502703885467788,
+ "grad_norm": 0.49584901332855225,
+ "learning_rate": 1.3162568690570743e-05,
+ "loss": 0.6062,
+ "step": 2502
+ },
+ {
+ "epoch": 2.5037041667969118,
+ "grad_norm": 0.602810263633728,
+ "learning_rate": 1.311063292350696e-05,
+ "loss": 0.7821,
+ "step": 2503
+ },
+ {
+ "epoch": 2.504704448126035,
+ "grad_norm": 0.5082575678825378,
+ "learning_rate": 1.3058792632143701e-05,
+ "loss": 0.6479,
+ "step": 2504
+ },
+ {
+ "epoch": 2.505704729455159,
+ "grad_norm": 0.5330275893211365,
+ "learning_rate": 1.3007047873444034e-05,
+ "loss": 0.5603,
+ "step": 2505
+ },
+ {
+ "epoch": 2.506705010784283,
+ "grad_norm": 0.48020118474960327,
+ "learning_rate": 1.295539870426612e-05,
+ "loss": 0.6407,
+ "step": 2506
+ },
+ {
+ "epoch": 2.507705292113407,
+ "grad_norm": 0.509580135345459,
+ "learning_rate": 1.2903845181363017e-05,
+ "loss": 0.5958,
+ "step": 2507
+ },
+ {
+ "epoch": 2.5087055734425308,
+ "grad_norm": 0.5482074022293091,
+ "learning_rate": 1.2852387361382767e-05,
+ "loss": 0.7589,
+ "step": 2508
+ },
+ {
+ "epoch": 2.5097058547716546,
+ "grad_norm": 0.5284531712532043,
+ "learning_rate": 1.2801025300868163e-05,
+ "loss": 0.5623,
+ "step": 2509
+ },
+ {
+ "epoch": 2.5107061361007785,
+ "grad_norm": 0.47718411684036255,
+ "learning_rate": 1.2749759056256883e-05,
+ "loss": 0.6324,
+ "step": 2510
+ },
+ {
+ "epoch": 2.5117064174299024,
+ "grad_norm": 0.5444175004959106,
+ "learning_rate": 1.2698588683881186e-05,
+ "loss": 0.6239,
+ "step": 2511
+ },
+ {
+ "epoch": 2.512706698759026,
+ "grad_norm": 0.49655935168266296,
+ "learning_rate": 1.2647514239968105e-05,
+ "loss": 0.6174,
+ "step": 2512
+ },
+ {
+ "epoch": 2.5137069800881497,
+ "grad_norm": 0.49785614013671875,
+ "learning_rate": 1.2596535780639218e-05,
+ "loss": 0.5825,
+ "step": 2513
+ },
+ {
+ "epoch": 2.5147072614172736,
+ "grad_norm": 0.4897768795490265,
+ "learning_rate": 1.2545653361910592e-05,
+ "loss": 0.5733,
+ "step": 2514
+ },
+ {
+ "epoch": 2.5157075427463975,
+ "grad_norm": 0.7728829383850098,
+ "learning_rate": 1.2494867039692848e-05,
+ "loss": 0.6317,
+ "step": 2515
+ },
+ {
+ "epoch": 2.5167078240755214,
+ "grad_norm": 0.4777291417121887,
+ "learning_rate": 1.2444176869790925e-05,
+ "loss": 0.6592,
+ "step": 2516
+ },
+ {
+ "epoch": 2.517708105404645,
+ "grad_norm": 0.49749183654785156,
+ "learning_rate": 1.23935829079042e-05,
+ "loss": 0.5385,
+ "step": 2517
+ },
+ {
+ "epoch": 2.5187083867337687,
+ "grad_norm": 0.5671753883361816,
+ "learning_rate": 1.2343085209626225e-05,
+ "loss": 0.5705,
+ "step": 2518
+ },
+ {
+ "epoch": 2.5197086680628926,
+ "grad_norm": 0.47141382098197937,
+ "learning_rate": 1.2292683830444919e-05,
+ "loss": 0.5273,
+ "step": 2519
+ },
+ {
+ "epoch": 2.5207089493920165,
+ "grad_norm": 0.5097513198852539,
+ "learning_rate": 1.224237882574224e-05,
+ "loss": 0.5135,
+ "step": 2520
+ },
+ {
+ "epoch": 2.5217092307211404,
+ "grad_norm": 0.4620420038700104,
+ "learning_rate": 1.2192170250794277e-05,
+ "loss": 0.5822,
+ "step": 2521
+ },
+ {
+ "epoch": 2.5227095120502643,
+ "grad_norm": 0.4511956572532654,
+ "learning_rate": 1.2142058160771207e-05,
+ "loss": 0.6099,
+ "step": 2522
+ },
+ {
+ "epoch": 2.523709793379388,
+ "grad_norm": 0.4892938733100891,
+ "learning_rate": 1.2092042610737108e-05,
+ "loss": 0.6966,
+ "step": 2523
+ },
+ {
+ "epoch": 2.5247100747085116,
+ "grad_norm": 0.5775454044342041,
+ "learning_rate": 1.2042123655650084e-05,
+ "loss": 0.6882,
+ "step": 2524
+ },
+ {
+ "epoch": 2.5257103560376355,
+ "grad_norm": 0.514741063117981,
+ "learning_rate": 1.1992301350361978e-05,
+ "loss": 0.5352,
+ "step": 2525
+ },
+ {
+ "epoch": 2.5267106373667594,
+ "grad_norm": 0.5064074397087097,
+ "learning_rate": 1.1942575749618546e-05,
+ "loss": 0.635,
+ "step": 2526
+ },
+ {
+ "epoch": 2.5277109186958833,
+ "grad_norm": 0.44427722692489624,
+ "learning_rate": 1.1892946908059188e-05,
+ "loss": 0.5949,
+ "step": 2527
+ },
+ {
+ "epoch": 2.528711200025007,
+ "grad_norm": 0.45052123069763184,
+ "learning_rate": 1.1843414880217051e-05,
+ "loss": 0.4692,
+ "step": 2528
+ },
+ {
+ "epoch": 2.5297114813541306,
+ "grad_norm": 0.4366517663002014,
+ "learning_rate": 1.1793979720518866e-05,
+ "loss": 0.5817,
+ "step": 2529
+ },
+ {
+ "epoch": 2.5307117626832545,
+ "grad_norm": 0.4776402711868286,
+ "learning_rate": 1.1744641483284946e-05,
+ "loss": 0.536,
+ "step": 2530
+ },
+ {
+ "epoch": 2.5317120440123784,
+ "grad_norm": 0.422873318195343,
+ "learning_rate": 1.1695400222729058e-05,
+ "loss": 0.5172,
+ "step": 2531
+ },
+ {
+ "epoch": 2.5327123253415023,
+ "grad_norm": 0.5048778653144836,
+ "learning_rate": 1.1646255992958466e-05,
+ "loss": 0.6095,
+ "step": 2532
+ },
+ {
+ "epoch": 2.533712606670626,
+ "grad_norm": 0.5859957933425903,
+ "learning_rate": 1.1597208847973818e-05,
+ "loss": 0.5288,
+ "step": 2533
+ },
+ {
+ "epoch": 2.53471288799975,
+ "grad_norm": 0.4474251866340637,
+ "learning_rate": 1.1548258841669002e-05,
+ "loss": 0.6044,
+ "step": 2534
+ },
+ {
+ "epoch": 2.535713169328874,
+ "grad_norm": 0.5231831669807434,
+ "learning_rate": 1.1499406027831262e-05,
+ "loss": 0.5941,
+ "step": 2535
+ },
+ {
+ "epoch": 2.536713450657998,
+ "grad_norm": 0.4984927773475647,
+ "learning_rate": 1.1450650460140977e-05,
+ "loss": 0.6244,
+ "step": 2536
+ },
+ {
+ "epoch": 2.5377137319871212,
+ "grad_norm": 0.46725359559059143,
+ "learning_rate": 1.140199219217174e-05,
+ "loss": 0.5356,
+ "step": 2537
+ },
+ {
+ "epoch": 2.538714013316245,
+ "grad_norm": 0.492912620306015,
+ "learning_rate": 1.1353431277390126e-05,
+ "loss": 0.634,
+ "step": 2538
+ },
+ {
+ "epoch": 2.539714294645369,
+ "grad_norm": 0.560057520866394,
+ "learning_rate": 1.1304967769155861e-05,
+ "loss": 0.639,
+ "step": 2539
+ },
+ {
+ "epoch": 2.540714575974493,
+ "grad_norm": 0.5444757342338562,
+ "learning_rate": 1.1256601720721549e-05,
+ "loss": 0.6101,
+ "step": 2540
+ },
+ {
+ "epoch": 2.541714857303617,
+ "grad_norm": 0.45627185702323914,
+ "learning_rate": 1.1208333185232712e-05,
+ "loss": 0.5345,
+ "step": 2541
+ },
+ {
+ "epoch": 2.5427151386327402,
+ "grad_norm": 0.5010327696800232,
+ "learning_rate": 1.1160162215727765e-05,
+ "loss": 0.6542,
+ "step": 2542
+ },
+ {
+ "epoch": 2.543715419961864,
+ "grad_norm": 0.5232365727424622,
+ "learning_rate": 1.111208886513787e-05,
+ "loss": 0.6409,
+ "step": 2543
+ },
+ {
+ "epoch": 2.544715701290988,
+ "grad_norm": 1.0535163879394531,
+ "learning_rate": 1.1064113186286984e-05,
+ "loss": 0.771,
+ "step": 2544
+ },
+ {
+ "epoch": 2.545715982620112,
+ "grad_norm": 0.5562714338302612,
+ "learning_rate": 1.1016235231891658e-05,
+ "loss": 0.5875,
+ "step": 2545
+ },
+ {
+ "epoch": 2.546716263949236,
+ "grad_norm": 0.5882842540740967,
+ "learning_rate": 1.0968455054561145e-05,
+ "loss": 0.7083,
+ "step": 2546
+ },
+ {
+ "epoch": 2.5477165452783597,
+ "grad_norm": 0.5215815901756287,
+ "learning_rate": 1.0920772706797167e-05,
+ "loss": 0.6468,
+ "step": 2547
+ },
+ {
+ "epoch": 2.5487168266074836,
+ "grad_norm": 0.48281216621398926,
+ "learning_rate": 1.0873188240994048e-05,
+ "loss": 0.525,
+ "step": 2548
+ },
+ {
+ "epoch": 2.5497171079366074,
+ "grad_norm": 0.4125751852989197,
+ "learning_rate": 1.0825701709438508e-05,
+ "loss": 0.5541,
+ "step": 2549
+ },
+ {
+ "epoch": 2.550717389265731,
+ "grad_norm": 0.49239426851272583,
+ "learning_rate": 1.0778313164309616e-05,
+ "loss": 0.6849,
+ "step": 2550
+ },
+ {
+ "epoch": 2.5517176705948548,
+ "grad_norm": 0.520441472530365,
+ "learning_rate": 1.0731022657678869e-05,
+ "loss": 0.6564,
+ "step": 2551
+ },
+ {
+ "epoch": 2.5527179519239787,
+ "grad_norm": 0.4203236699104309,
+ "learning_rate": 1.068383024150994e-05,
+ "loss": 0.514,
+ "step": 2552
+ },
+ {
+ "epoch": 2.5537182332531025,
+ "grad_norm": 0.5112154483795166,
+ "learning_rate": 1.0636735967658784e-05,
+ "loss": 0.5987,
+ "step": 2553
+ },
+ {
+ "epoch": 2.554718514582226,
+ "grad_norm": 0.5111169219017029,
+ "learning_rate": 1.0589739887873462e-05,
+ "loss": 0.6117,
+ "step": 2554
+ },
+ {
+ "epoch": 2.55571879591135,
+ "grad_norm": 0.5053681135177612,
+ "learning_rate": 1.05428420537942e-05,
+ "loss": 0.5615,
+ "step": 2555
+ },
+ {
+ "epoch": 2.5567190772404738,
+ "grad_norm": 0.5257356762886047,
+ "learning_rate": 1.0496042516953209e-05,
+ "loss": 0.8089,
+ "step": 2556
+ },
+ {
+ "epoch": 2.5577193585695976,
+ "grad_norm": 0.6174327731132507,
+ "learning_rate": 1.0449341328774743e-05,
+ "loss": 0.5099,
+ "step": 2557
+ },
+ {
+ "epoch": 2.5587196398987215,
+ "grad_norm": 0.4347025752067566,
+ "learning_rate": 1.040273854057493e-05,
+ "loss": 0.5404,
+ "step": 2558
+ },
+ {
+ "epoch": 2.5597199212278454,
+ "grad_norm": 0.5227190256118774,
+ "learning_rate": 1.0356234203561832e-05,
+ "loss": 0.6956,
+ "step": 2559
+ },
+ {
+ "epoch": 2.5607202025569693,
+ "grad_norm": 0.4151594936847687,
+ "learning_rate": 1.0309828368835305e-05,
+ "loss": 0.5877,
+ "step": 2560
+ },
+ {
+ "epoch": 2.561720483886093,
+ "grad_norm": 0.5167533159255981,
+ "learning_rate": 1.026352108738694e-05,
+ "loss": 0.6133,
+ "step": 2561
+ },
+ {
+ "epoch": 2.5627207652152166,
+ "grad_norm": 0.5157315135002136,
+ "learning_rate": 1.0217312410100089e-05,
+ "loss": 0.6234,
+ "step": 2562
+ },
+ {
+ "epoch": 2.5637210465443405,
+ "grad_norm": 0.4833580553531647,
+ "learning_rate": 1.0171202387749724e-05,
+ "loss": 0.6334,
+ "step": 2563
+ },
+ {
+ "epoch": 2.5647213278734644,
+ "grad_norm": 0.5104491710662842,
+ "learning_rate": 1.0125191071002438e-05,
+ "loss": 0.6461,
+ "step": 2564
+ },
+ {
+ "epoch": 2.5657216092025883,
+ "grad_norm": 0.4097834825515747,
+ "learning_rate": 1.0079278510416313e-05,
+ "loss": 0.5277,
+ "step": 2565
+ },
+ {
+ "epoch": 2.566721890531712,
+ "grad_norm": 0.499045729637146,
+ "learning_rate": 1.0033464756440991e-05,
+ "loss": 0.6554,
+ "step": 2566
+ },
+ {
+ "epoch": 2.5677221718608356,
+ "grad_norm": 0.4658253490924835,
+ "learning_rate": 9.987749859417484e-06,
+ "loss": 0.5555,
+ "step": 2567
+ },
+ {
+ "epoch": 2.5687224531899595,
+ "grad_norm": 0.5237826108932495,
+ "learning_rate": 9.942133869578164e-06,
+ "loss": 0.6928,
+ "step": 2568
+ },
+ {
+ "epoch": 2.5697227345190834,
+ "grad_norm": 0.4166854918003082,
+ "learning_rate": 9.896616837046813e-06,
+ "loss": 0.6416,
+ "step": 2569
+ },
+ {
+ "epoch": 2.5707230158482073,
+ "grad_norm": 0.5902143716812134,
+ "learning_rate": 9.851198811838368e-06,
+ "loss": 0.7564,
+ "step": 2570
+ },
+ {
+ "epoch": 2.571723297177331,
+ "grad_norm": 0.45861905813217163,
+ "learning_rate": 9.805879843859057e-06,
+ "loss": 0.5297,
+ "step": 2571
+ },
+ {
+ "epoch": 2.572723578506455,
+ "grad_norm": 0.40770262479782104,
+ "learning_rate": 9.760659982906206e-06,
+ "loss": 0.5264,
+ "step": 2572
+ },
+ {
+ "epoch": 2.573723859835579,
+ "grad_norm": 0.49685248732566833,
+ "learning_rate": 9.715539278668284e-06,
+ "loss": 0.5631,
+ "step": 2573
+ },
+ {
+ "epoch": 2.574724141164703,
+ "grad_norm": 0.5326783657073975,
+ "learning_rate": 9.670517780724775e-06,
+ "loss": 0.6279,
+ "step": 2574
+ },
+ {
+ "epoch": 2.5757244224938263,
+ "grad_norm": 0.5041221976280212,
+ "learning_rate": 9.625595538546173e-06,
+ "loss": 0.6835,
+ "step": 2575
+ },
+ {
+ "epoch": 2.57672470382295,
+ "grad_norm": 0.48268288373947144,
+ "learning_rate": 9.580772601493871e-06,
+ "loss": 0.5618,
+ "step": 2576
+ },
+ {
+ "epoch": 2.577724985152074,
+ "grad_norm": 0.4918215572834015,
+ "learning_rate": 9.536049018820192e-06,
+ "loss": 0.6491,
+ "step": 2577
+ },
+ {
+ "epoch": 2.578725266481198,
+ "grad_norm": 0.48889824748039246,
+ "learning_rate": 9.491424839668273e-06,
+ "loss": 0.7428,
+ "step": 2578
+ },
+ {
+ "epoch": 2.579725547810322,
+ "grad_norm": 0.5224034786224365,
+ "learning_rate": 9.446900113072e-06,
+ "loss": 0.5438,
+ "step": 2579
+ },
+ {
+ "epoch": 2.5807258291394453,
+ "grad_norm": 0.5095034241676331,
+ "learning_rate": 9.402474887956015e-06,
+ "loss": 0.6882,
+ "step": 2580
+ },
+ {
+ "epoch": 2.581726110468569,
+ "grad_norm": 0.4882202744483948,
+ "learning_rate": 9.35814921313557e-06,
+ "loss": 0.6531,
+ "step": 2581
+ },
+ {
+ "epoch": 2.582726391797693,
+ "grad_norm": 0.4663775861263275,
+ "learning_rate": 9.313923137316616e-06,
+ "loss": 0.6819,
+ "step": 2582
+ },
+ {
+ "epoch": 2.583726673126817,
+ "grad_norm": 0.5433738231658936,
+ "learning_rate": 9.269796709095558e-06,
+ "loss": 0.5615,
+ "step": 2583
+ },
+ {
+ "epoch": 2.584726954455941,
+ "grad_norm": 0.6087539792060852,
+ "learning_rate": 9.225769976959408e-06,
+ "loss": 0.6036,
+ "step": 2584
+ },
+ {
+ "epoch": 2.5857272357850647,
+ "grad_norm": 0.5084554553031921,
+ "learning_rate": 9.18184298928556e-06,
+ "loss": 0.6909,
+ "step": 2585
+ },
+ {
+ "epoch": 2.5867275171141886,
+ "grad_norm": 0.5152381658554077,
+ "learning_rate": 9.138015794341815e-06,
+ "loss": 0.7595,
+ "step": 2586
+ },
+ {
+ "epoch": 2.587727798443312,
+ "grad_norm": 0.5154881477355957,
+ "learning_rate": 9.09428844028637e-06,
+ "loss": 0.6029,
+ "step": 2587
+ },
+ {
+ "epoch": 2.588728079772436,
+ "grad_norm": 0.6392176151275635,
+ "learning_rate": 9.050660975167635e-06,
+ "loss": 0.669,
+ "step": 2588
+ },
+ {
+ "epoch": 2.58972836110156,
+ "grad_norm": 0.49754637479782104,
+ "learning_rate": 9.007133446924343e-06,
+ "loss": 0.5979,
+ "step": 2589
+ },
+ {
+ "epoch": 2.5907286424306837,
+ "grad_norm": 0.4721156060695648,
+ "learning_rate": 8.963705903385345e-06,
+ "loss": 0.6168,
+ "step": 2590
+ },
+ {
+ "epoch": 2.5917289237598076,
+ "grad_norm": 0.45940566062927246,
+ "learning_rate": 8.92037839226969e-06,
+ "loss": 0.5187,
+ "step": 2591
+ },
+ {
+ "epoch": 2.592729205088931,
+ "grad_norm": 0.5272762775421143,
+ "learning_rate": 8.87715096118642e-06,
+ "loss": 0.6327,
+ "step": 2592
+ },
+ {
+ "epoch": 2.593729486418055,
+ "grad_norm": 0.6224849820137024,
+ "learning_rate": 8.834023657634739e-06,
+ "loss": 0.573,
+ "step": 2593
+ },
+ {
+ "epoch": 2.594729767747179,
+ "grad_norm": 0.47467121481895447,
+ "learning_rate": 8.790996529003726e-06,
+ "loss": 0.5926,
+ "step": 2594
+ },
+ {
+ "epoch": 2.5957300490763027,
+ "grad_norm": 0.46342840790748596,
+ "learning_rate": 8.748069622572386e-06,
+ "loss": 0.5939,
+ "step": 2595
+ },
+ {
+ "epoch": 2.5967303304054266,
+ "grad_norm": 0.46991854906082153,
+ "learning_rate": 8.705242985509665e-06,
+ "loss": 0.645,
+ "step": 2596
+ },
+ {
+ "epoch": 2.5977306117345504,
+ "grad_norm": 0.4724017381668091,
+ "learning_rate": 8.662516664874254e-06,
+ "loss": 0.6146,
+ "step": 2597
+ },
+ {
+ "epoch": 2.5987308930636743,
+ "grad_norm": 0.5314664840698242,
+ "learning_rate": 8.619890707614686e-06,
+ "loss": 0.624,
+ "step": 2598
+ },
+ {
+ "epoch": 2.599731174392798,
+ "grad_norm": 0.4588354229927063,
+ "learning_rate": 8.57736516056915e-06,
+ "loss": 0.6522,
+ "step": 2599
+ },
+ {
+ "epoch": 2.6007314557219217,
+ "grad_norm": 0.5070285797119141,
+ "learning_rate": 8.534940070465568e-06,
+ "loss": 0.6735,
+ "step": 2600
+ },
+ {
+ "epoch": 2.6017317370510455,
+ "grad_norm": 0.4644957184791565,
+ "learning_rate": 8.492615483921395e-06,
+ "loss": 0.579,
+ "step": 2601
+ },
+ {
+ "epoch": 2.6027320183801694,
+ "grad_norm": 0.5481740236282349,
+ "learning_rate": 8.45039144744374e-06,
+ "loss": 0.7007,
+ "step": 2602
+ },
+ {
+ "epoch": 2.6037322997092933,
+ "grad_norm": 0.546405553817749,
+ "learning_rate": 8.408268007429154e-06,
+ "loss": 0.5788,
+ "step": 2603
+ },
+ {
+ "epoch": 2.604732581038417,
+ "grad_norm": 0.5205674767494202,
+ "learning_rate": 8.366245210163703e-06,
+ "loss": 0.6106,
+ "step": 2604
+ },
+ {
+ "epoch": 2.6057328623675406,
+ "grad_norm": 0.4366490840911865,
+ "learning_rate": 8.324323101822829e-06,
+ "loss": 0.4587,
+ "step": 2605
+ },
+ {
+ "epoch": 2.6067331436966645,
+ "grad_norm": 0.5368449091911316,
+ "learning_rate": 8.282501728471315e-06,
+ "loss": 0.6456,
+ "step": 2606
+ },
+ {
+ "epoch": 2.6077334250257884,
+ "grad_norm": 0.4531463384628296,
+ "learning_rate": 8.240781136063346e-06,
+ "loss": 0.5619,
+ "step": 2607
+ },
+ {
+ "epoch": 2.6087337063549123,
+ "grad_norm": 0.4811895787715912,
+ "learning_rate": 8.199161370442276e-06,
+ "loss": 0.631,
+ "step": 2608
+ },
+ {
+ "epoch": 2.609733987684036,
+ "grad_norm": 0.4792177081108093,
+ "learning_rate": 8.15764247734071e-06,
+ "loss": 0.5037,
+ "step": 2609
+ },
+ {
+ "epoch": 2.61073426901316,
+ "grad_norm": 0.42504701018333435,
+ "learning_rate": 8.116224502380387e-06,
+ "loss": 0.4708,
+ "step": 2610
+ },
+ {
+ "epoch": 2.611734550342284,
+ "grad_norm": 0.49016115069389343,
+ "learning_rate": 8.074907491072203e-06,
+ "loss": 0.543,
+ "step": 2611
+ },
+ {
+ "epoch": 2.612734831671408,
+ "grad_norm": 0.4593164920806885,
+ "learning_rate": 8.033691488816054e-06,
+ "loss": 0.5459,
+ "step": 2612
+ },
+ {
+ "epoch": 2.6137351130005313,
+ "grad_norm": 0.4173170328140259,
+ "learning_rate": 7.992576540900876e-06,
+ "loss": 0.5879,
+ "step": 2613
+ },
+ {
+ "epoch": 2.614735394329655,
+ "grad_norm": 0.4295860826969147,
+ "learning_rate": 7.951562692504566e-06,
+ "loss": 0.5277,
+ "step": 2614
+ },
+ {
+ "epoch": 2.615735675658779,
+ "grad_norm": 0.47074851393699646,
+ "learning_rate": 7.910649988693908e-06,
+ "loss": 0.6649,
+ "step": 2615
+ },
+ {
+ "epoch": 2.616735956987903,
+ "grad_norm": 0.5880094170570374,
+ "learning_rate": 7.869838474424607e-06,
+ "loss": 0.6774,
+ "step": 2616
+ },
+ {
+ "epoch": 2.6177362383170264,
+ "grad_norm": 0.46067488193511963,
+ "learning_rate": 7.82912819454109e-06,
+ "loss": 0.688,
+ "step": 2617
+ },
+ {
+ "epoch": 2.6187365196461503,
+ "grad_norm": 0.4428972601890564,
+ "learning_rate": 7.788519193776634e-06,
+ "loss": 0.6106,
+ "step": 2618
+ },
+ {
+ "epoch": 2.619736800975274,
+ "grad_norm": 0.46289440989494324,
+ "learning_rate": 7.74801151675314e-06,
+ "loss": 0.5249,
+ "step": 2619
+ },
+ {
+ "epoch": 2.620737082304398,
+ "grad_norm": 0.5188909769058228,
+ "learning_rate": 7.707605207981262e-06,
+ "loss": 0.5232,
+ "step": 2620
+ },
+ {
+ "epoch": 2.621737363633522,
+ "grad_norm": 0.5134142637252808,
+ "learning_rate": 7.667300311860193e-06,
+ "loss": 0.5621,
+ "step": 2621
+ },
+ {
+ "epoch": 2.622737644962646,
+ "grad_norm": 0.5168859362602234,
+ "learning_rate": 7.6270968726777414e-06,
+ "loss": 0.5812,
+ "step": 2622
+ },
+ {
+ "epoch": 2.6237379262917697,
+ "grad_norm": 0.547895073890686,
+ "learning_rate": 7.586994934610225e-06,
+ "loss": 0.5649,
+ "step": 2623
+ },
+ {
+ "epoch": 2.6247382076208936,
+ "grad_norm": 0.5019206404685974,
+ "learning_rate": 7.546994541722408e-06,
+ "loss": 0.6145,
+ "step": 2624
+ },
+ {
+ "epoch": 2.625738488950017,
+ "grad_norm": 0.5064947605133057,
+ "learning_rate": 7.507095737967495e-06,
+ "loss": 0.5313,
+ "step": 2625
+ },
+ {
+ "epoch": 2.626738770279141,
+ "grad_norm": 0.44344326853752136,
+ "learning_rate": 7.467298567187042e-06,
+ "loss": 0.6279,
+ "step": 2626
+ },
+ {
+ "epoch": 2.627739051608265,
+ "grad_norm": 0.43664419651031494,
+ "learning_rate": 7.427603073110967e-06,
+ "loss": 0.5739,
+ "step": 2627
+ },
+ {
+ "epoch": 2.6287393329373887,
+ "grad_norm": 0.43493202328681946,
+ "learning_rate": 7.3880092993574125e-06,
+ "loss": 0.5842,
+ "step": 2628
+ },
+ {
+ "epoch": 2.6297396142665126,
+ "grad_norm": 0.4212566614151001,
+ "learning_rate": 7.3485172894327995e-06,
+ "loss": 0.5263,
+ "step": 2629
+ },
+ {
+ "epoch": 2.630739895595636,
+ "grad_norm": 0.5687553882598877,
+ "learning_rate": 7.309127086731671e-06,
+ "loss": 0.5821,
+ "step": 2630
+ },
+ {
+ "epoch": 2.63174017692476,
+ "grad_norm": 0.49854451417922974,
+ "learning_rate": 7.269838734536771e-06,
+ "loss": 0.6423,
+ "step": 2631
+ },
+ {
+ "epoch": 2.632740458253884,
+ "grad_norm": 0.550553023815155,
+ "learning_rate": 7.23065227601889e-06,
+ "loss": 0.698,
+ "step": 2632
+ },
+ {
+ "epoch": 2.6337407395830077,
+ "grad_norm": 0.5211242437362671,
+ "learning_rate": 7.1915677542368275e-06,
+ "loss": 0.5771,
+ "step": 2633
+ },
+ {
+ "epoch": 2.6347410209121316,
+ "grad_norm": 0.4371758997440338,
+ "learning_rate": 7.152585212137441e-06,
+ "loss": 0.4751,
+ "step": 2634
+ },
+ {
+ "epoch": 2.6357413022412555,
+ "grad_norm": 0.4474377930164337,
+ "learning_rate": 7.113704692555467e-06,
+ "loss": 0.5445,
+ "step": 2635
+ },
+ {
+ "epoch": 2.6367415835703794,
+ "grad_norm": 0.46216362714767456,
+ "learning_rate": 7.0749262382135754e-06,
+ "loss": 0.5238,
+ "step": 2636
+ },
+ {
+ "epoch": 2.6377418648995032,
+ "grad_norm": 0.46242502331733704,
+ "learning_rate": 7.03624989172228e-06,
+ "loss": 0.5582,
+ "step": 2637
+ },
+ {
+ "epoch": 2.6387421462286267,
+ "grad_norm": 0.5101394653320312,
+ "learning_rate": 6.997675695579897e-06,
+ "loss": 0.5347,
+ "step": 2638
+ },
+ {
+ "epoch": 2.6397424275577506,
+ "grad_norm": 0.5134918093681335,
+ "learning_rate": 6.9592036921724894e-06,
+ "loss": 0.6227,
+ "step": 2639
+ },
+ {
+ "epoch": 2.6407427088868745,
+ "grad_norm": 0.4822455048561096,
+ "learning_rate": 6.920833923773795e-06,
+ "loss": 0.6462,
+ "step": 2640
+ },
+ {
+ "epoch": 2.6417429902159983,
+ "grad_norm": 0.45851707458496094,
+ "learning_rate": 6.882566432545301e-06,
+ "loss": 0.5912,
+ "step": 2641
+ },
+ {
+ "epoch": 2.6427432715451222,
+ "grad_norm": 0.5093351602554321,
+ "learning_rate": 6.844401260536026e-06,
+ "loss": 0.6531,
+ "step": 2642
+ },
+ {
+ "epoch": 2.6437435528742457,
+ "grad_norm": 0.47607871890068054,
+ "learning_rate": 6.806338449682614e-06,
+ "loss": 0.5018,
+ "step": 2643
+ },
+ {
+ "epoch": 2.6447438342033696,
+ "grad_norm": 0.4657599925994873,
+ "learning_rate": 6.768378041809187e-06,
+ "loss": 0.5503,
+ "step": 2644
+ },
+ {
+ "epoch": 2.6457441155324934,
+ "grad_norm": 0.5305604934692383,
+ "learning_rate": 6.7305200786274e-06,
+ "loss": 0.6492,
+ "step": 2645
+ },
+ {
+ "epoch": 2.6467443968616173,
+ "grad_norm": 0.5009193420410156,
+ "learning_rate": 6.692764601736268e-06,
+ "loss": 0.679,
+ "step": 2646
+ },
+ {
+ "epoch": 2.647744678190741,
+ "grad_norm": 0.4341311454772949,
+ "learning_rate": 6.65511165262227e-06,
+ "loss": 0.5792,
+ "step": 2647
+ },
+ {
+ "epoch": 2.648744959519865,
+ "grad_norm": 0.45918843150138855,
+ "learning_rate": 6.617561272659156e-06,
+ "loss": 0.6714,
+ "step": 2648
+ },
+ {
+ "epoch": 2.649745240848989,
+ "grad_norm": 0.5379105806350708,
+ "learning_rate": 6.580113503108032e-06,
+ "loss": 0.6407,
+ "step": 2649
+ },
+ {
+ "epoch": 2.6507455221781124,
+ "grad_norm": 0.4882265627384186,
+ "learning_rate": 6.542768385117193e-06,
+ "loss": 0.6109,
+ "step": 2650
+ },
+ {
+ "epoch": 2.6517458035072363,
+ "grad_norm": 0.4729757010936737,
+ "learning_rate": 6.505525959722181e-06,
+ "loss": 0.6844,
+ "step": 2651
+ },
+ {
+ "epoch": 2.65274608483636,
+ "grad_norm": 0.4830170273780823,
+ "learning_rate": 6.468386267845717e-06,
+ "loss": 0.5381,
+ "step": 2652
+ },
+ {
+ "epoch": 2.653746366165484,
+ "grad_norm": 0.45024967193603516,
+ "learning_rate": 6.431349350297555e-06,
+ "loss": 0.5851,
+ "step": 2653
+ },
+ {
+ "epoch": 2.654746647494608,
+ "grad_norm": 0.5587695837020874,
+ "learning_rate": 6.394415247774621e-06,
+ "loss": 0.6963,
+ "step": 2654
+ },
+ {
+ "epoch": 2.6557469288237314,
+ "grad_norm": 0.49297550320625305,
+ "learning_rate": 6.357584000860761e-06,
+ "loss": 0.7176,
+ "step": 2655
+ },
+ {
+ "epoch": 2.6567472101528553,
+ "grad_norm": 0.48664823174476624,
+ "learning_rate": 6.320855650026902e-06,
+ "loss": 0.6957,
+ "step": 2656
+ },
+ {
+ "epoch": 2.657747491481979,
+ "grad_norm": 0.44724440574645996,
+ "learning_rate": 6.284230235630828e-06,
+ "loss": 0.6146,
+ "step": 2657
+ },
+ {
+ "epoch": 2.658747772811103,
+ "grad_norm": 0.5338836908340454,
+ "learning_rate": 6.247707797917257e-06,
+ "loss": 0.6419,
+ "step": 2658
+ },
+ {
+ "epoch": 2.659748054140227,
+ "grad_norm": 0.45886749029159546,
+ "learning_rate": 6.2112883770177545e-06,
+ "loss": 0.5304,
+ "step": 2659
+ },
+ {
+ "epoch": 2.660748335469351,
+ "grad_norm": 0.524269163608551,
+ "learning_rate": 6.174972012950642e-06,
+ "loss": 0.6969,
+ "step": 2660
+ },
+ {
+ "epoch": 2.6617486167984747,
+ "grad_norm": 0.46799468994140625,
+ "learning_rate": 6.138758745621087e-06,
+ "loss": 0.615,
+ "step": 2661
+ },
+ {
+ "epoch": 2.6627488981275986,
+ "grad_norm": 0.49158328771591187,
+ "learning_rate": 6.102648614820861e-06,
+ "loss": 0.5517,
+ "step": 2662
+ },
+ {
+ "epoch": 2.663749179456722,
+ "grad_norm": 0.43124398589134216,
+ "learning_rate": 6.066641660228522e-06,
+ "loss": 0.5201,
+ "step": 2663
+ },
+ {
+ "epoch": 2.664749460785846,
+ "grad_norm": 0.5165560841560364,
+ "learning_rate": 6.030737921409169e-06,
+ "loss": 0.6387,
+ "step": 2664
+ },
+ {
+ "epoch": 2.66574974211497,
+ "grad_norm": 0.5018811225891113,
+ "learning_rate": 5.994937437814518e-06,
+ "loss": 0.6102,
+ "step": 2665
+ },
+ {
+ "epoch": 2.6667500234440937,
+ "grad_norm": 0.5465936660766602,
+ "learning_rate": 5.95924024878286e-06,
+ "loss": 0.7051,
+ "step": 2666
+ },
+ {
+ "epoch": 2.6677503047732176,
+ "grad_norm": 0.4999270737171173,
+ "learning_rate": 5.9236463935389065e-06,
+ "loss": 0.6397,
+ "step": 2667
+ },
+ {
+ "epoch": 2.668750586102341,
+ "grad_norm": 0.46113353967666626,
+ "learning_rate": 5.888155911193893e-06,
+ "loss": 0.5537,
+ "step": 2668
+ },
+ {
+ "epoch": 2.669750867431465,
+ "grad_norm": 0.5522956848144531,
+ "learning_rate": 5.852768840745426e-06,
+ "loss": 0.5461,
+ "step": 2669
+ },
+ {
+ "epoch": 2.670751148760589,
+ "grad_norm": 0.5183952450752258,
+ "learning_rate": 5.817485221077523e-06,
+ "loss": 0.6702,
+ "step": 2670
+ },
+ {
+ "epoch": 2.6717514300897127,
+ "grad_norm": 0.4962097704410553,
+ "learning_rate": 5.78230509096046e-06,
+ "loss": 0.6262,
+ "step": 2671
+ },
+ {
+ "epoch": 2.6727517114188366,
+ "grad_norm": 0.49934902787208557,
+ "learning_rate": 5.747228489050871e-06,
+ "loss": 0.5219,
+ "step": 2672
+ },
+ {
+ "epoch": 2.6737519927479605,
+ "grad_norm": 0.46629029512405396,
+ "learning_rate": 5.71225545389158e-06,
+ "loss": 0.5426,
+ "step": 2673
+ },
+ {
+ "epoch": 2.6747522740770844,
+ "grad_norm": 0.4061794579029083,
+ "learning_rate": 5.677386023911646e-06,
+ "loss": 0.6232,
+ "step": 2674
+ },
+ {
+ "epoch": 2.6757525554062083,
+ "grad_norm": 0.4792473018169403,
+ "learning_rate": 5.6426202374262435e-06,
+ "loss": 0.6289,
+ "step": 2675
+ },
+ {
+ "epoch": 2.6767528367353317,
+ "grad_norm": 0.47763511538505554,
+ "learning_rate": 5.607958132636715e-06,
+ "loss": 0.4832,
+ "step": 2676
+ },
+ {
+ "epoch": 2.6777531180644556,
+ "grad_norm": 0.5005731582641602,
+ "learning_rate": 5.573399747630403e-06,
+ "loss": 0.6503,
+ "step": 2677
+ },
+ {
+ "epoch": 2.6787533993935795,
+ "grad_norm": 0.5461527705192566,
+ "learning_rate": 5.538945120380767e-06,
+ "loss": 0.6782,
+ "step": 2678
+ },
+ {
+ "epoch": 2.6797536807227034,
+ "grad_norm": 0.521238386631012,
+ "learning_rate": 5.504594288747189e-06,
+ "loss": 0.7179,
+ "step": 2679
+ },
+ {
+ "epoch": 2.680753962051827,
+ "grad_norm": 0.5225351452827454,
+ "learning_rate": 5.470347290474987e-06,
+ "loss": 0.5818,
+ "step": 2680
+ },
+ {
+ "epoch": 2.6817542433809507,
+ "grad_norm": 0.41406211256980896,
+ "learning_rate": 5.43620416319548e-06,
+ "loss": 0.4876,
+ "step": 2681
+ },
+ {
+ "epoch": 2.6827545247100746,
+ "grad_norm": 0.5101236701011658,
+ "learning_rate": 5.402164944425758e-06,
+ "loss": 0.6266,
+ "step": 2682
+ },
+ {
+ "epoch": 2.6837548060391985,
+ "grad_norm": 0.5178688168525696,
+ "learning_rate": 5.36822967156878e-06,
+ "loss": 0.7004,
+ "step": 2683
+ },
+ {
+ "epoch": 2.6847550873683224,
+ "grad_norm": 0.5223900079727173,
+ "learning_rate": 5.334398381913252e-06,
+ "loss": 0.6962,
+ "step": 2684
+ },
+ {
+ "epoch": 2.6857553686974462,
+ "grad_norm": 0.5571050047874451,
+ "learning_rate": 5.300671112633671e-06,
+ "loss": 0.6795,
+ "step": 2685
+ },
+ {
+ "epoch": 2.68675565002657,
+ "grad_norm": 0.52665776014328,
+ "learning_rate": 5.267047900790201e-06,
+ "loss": 0.5888,
+ "step": 2686
+ },
+ {
+ "epoch": 2.687755931355694,
+ "grad_norm": 0.5287081003189087,
+ "learning_rate": 5.233528783328634e-06,
+ "loss": 0.7187,
+ "step": 2687
+ },
+ {
+ "epoch": 2.6887562126848175,
+ "grad_norm": 0.45864158868789673,
+ "learning_rate": 5.200113797080463e-06,
+ "loss": 0.6097,
+ "step": 2688
+ },
+ {
+ "epoch": 2.6897564940139413,
+ "grad_norm": 0.5025407075881958,
+ "learning_rate": 5.166802978762697e-06,
+ "loss": 0.6521,
+ "step": 2689
+ },
+ {
+ "epoch": 2.6907567753430652,
+ "grad_norm": 0.4856833517551422,
+ "learning_rate": 5.1335963649779e-06,
+ "loss": 0.675,
+ "step": 2690
+ },
+ {
+ "epoch": 2.691757056672189,
+ "grad_norm": 0.5605478882789612,
+ "learning_rate": 5.100493992214128e-06,
+ "loss": 0.5815,
+ "step": 2691
+ },
+ {
+ "epoch": 2.692757338001313,
+ "grad_norm": 0.44705259799957275,
+ "learning_rate": 5.067495896844931e-06,
+ "loss": 0.5879,
+ "step": 2692
+ },
+ {
+ "epoch": 2.6937576193304364,
+ "grad_norm": 0.49051278829574585,
+ "learning_rate": 5.034602115129206e-06,
+ "loss": 0.4866,
+ "step": 2693
+ },
+ {
+ "epoch": 2.6947579006595603,
+ "grad_norm": 0.5016437768936157,
+ "learning_rate": 5.001812683211305e-06,
+ "loss": 0.5978,
+ "step": 2694
+ },
+ {
+ "epoch": 2.695758181988684,
+ "grad_norm": 0.47565481066703796,
+ "learning_rate": 4.969127637120863e-06,
+ "loss": 0.5891,
+ "step": 2695
+ },
+ {
+ "epoch": 2.696758463317808,
+ "grad_norm": 0.5603350400924683,
+ "learning_rate": 4.9365470127728404e-06,
+ "loss": 0.6422,
+ "step": 2696
+ },
+ {
+ "epoch": 2.697758744646932,
+ "grad_norm": 0.4406677782535553,
+ "learning_rate": 4.904070845967468e-06,
+ "loss": 0.5968,
+ "step": 2697
+ },
+ {
+ "epoch": 2.698759025976056,
+ "grad_norm": 0.5400702357292175,
+ "learning_rate": 4.871699172390154e-06,
+ "loss": 0.6665,
+ "step": 2698
+ },
+ {
+ "epoch": 2.6997593073051798,
+ "grad_norm": 0.4825376868247986,
+ "learning_rate": 4.839432027611535e-06,
+ "loss": 0.601,
+ "step": 2699
+ },
+ {
+ "epoch": 2.7007595886343037,
+ "grad_norm": 0.514046311378479,
+ "learning_rate": 4.807269447087348e-06,
+ "loss": 0.6344,
+ "step": 2700
+ },
+ {
+ "epoch": 2.701759869963427,
+ "grad_norm": 0.549150824546814,
+ "learning_rate": 4.775211466158469e-06,
+ "loss": 0.7308,
+ "step": 2701
+ },
+ {
+ "epoch": 2.702760151292551,
+ "grad_norm": 0.4731218218803406,
+ "learning_rate": 4.743258120050809e-06,
+ "loss": 0.557,
+ "step": 2702
+ },
+ {
+ "epoch": 2.703760432621675,
+ "grad_norm": 0.4153502881526947,
+ "learning_rate": 4.7114094438753255e-06,
+ "loss": 0.5125,
+ "step": 2703
+ },
+ {
+ "epoch": 2.7047607139507988,
+ "grad_norm": 0.4964785575866699,
+ "learning_rate": 4.679665472627947e-06,
+ "loss": 0.4441,
+ "step": 2704
+ },
+ {
+ "epoch": 2.7057609952799226,
+ "grad_norm": 0.4732365608215332,
+ "learning_rate": 4.648026241189563e-06,
+ "loss": 0.5892,
+ "step": 2705
+ },
+ {
+ "epoch": 2.706761276609046,
+ "grad_norm": 0.4849886894226074,
+ "learning_rate": 4.616491784325972e-06,
+ "loss": 0.6223,
+ "step": 2706
+ },
+ {
+ "epoch": 2.70776155793817,
+ "grad_norm": 0.5276421904563904,
+ "learning_rate": 4.585062136687812e-06,
+ "loss": 0.6232,
+ "step": 2707
+ },
+ {
+ "epoch": 2.708761839267294,
+ "grad_norm": 0.5398087501525879,
+ "learning_rate": 4.553737332810614e-06,
+ "loss": 0.7134,
+ "step": 2708
+ },
+ {
+ "epoch": 2.7097621205964177,
+ "grad_norm": 0.6187136173248291,
+ "learning_rate": 4.5225174071146455e-06,
+ "loss": 0.7167,
+ "step": 2709
+ },
+ {
+ "epoch": 2.7107624019255416,
+ "grad_norm": 0.5205793380737305,
+ "learning_rate": 4.4914023939049755e-06,
+ "loss": 0.6382,
+ "step": 2710
+ },
+ {
+ "epoch": 2.7117626832546655,
+ "grad_norm": 0.4959447383880615,
+ "learning_rate": 4.460392327371377e-06,
+ "loss": 0.6533,
+ "step": 2711
+ },
+ {
+ "epoch": 2.7127629645837894,
+ "grad_norm": 0.5069653987884521,
+ "learning_rate": 4.429487241588304e-06,
+ "loss": 0.6428,
+ "step": 2712
+ },
+ {
+ "epoch": 2.713763245912913,
+ "grad_norm": 0.5361613035202026,
+ "learning_rate": 4.398687170514859e-06,
+ "loss": 0.6852,
+ "step": 2713
+ },
+ {
+ "epoch": 2.7147635272420367,
+ "grad_norm": 0.5680058598518372,
+ "learning_rate": 4.367992147994738e-06,
+ "loss": 0.6239,
+ "step": 2714
+ },
+ {
+ "epoch": 2.7157638085711606,
+ "grad_norm": 0.531086802482605,
+ "learning_rate": 4.337402207756236e-06,
+ "loss": 0.7462,
+ "step": 2715
+ },
+ {
+ "epoch": 2.7167640899002845,
+ "grad_norm": 0.4760245680809021,
+ "learning_rate": 4.306917383412134e-06,
+ "loss": 0.5898,
+ "step": 2716
+ },
+ {
+ "epoch": 2.7177643712294084,
+ "grad_norm": 0.5797866582870483,
+ "learning_rate": 4.276537708459783e-06,
+ "loss": 0.6475,
+ "step": 2717
+ },
+ {
+ "epoch": 2.718764652558532,
+ "grad_norm": 0.47268563508987427,
+ "learning_rate": 4.24626321628091e-06,
+ "loss": 0.5935,
+ "step": 2718
+ },
+ {
+ "epoch": 2.7197649338876557,
+ "grad_norm": 0.40147414803504944,
+ "learning_rate": 4.2160939401417524e-06,
+ "loss": 0.5189,
+ "step": 2719
+ },
+ {
+ "epoch": 2.7207652152167796,
+ "grad_norm": 0.5176621675491333,
+ "learning_rate": 4.186029913192846e-06,
+ "loss": 0.7196,
+ "step": 2720
+ },
+ {
+ "epoch": 2.7217654965459035,
+ "grad_norm": 0.493111789226532,
+ "learning_rate": 4.156071168469145e-06,
+ "loss": 0.6061,
+ "step": 2721
+ },
+ {
+ "epoch": 2.7227657778750274,
+ "grad_norm": 0.4477739930152893,
+ "learning_rate": 4.12621773888987e-06,
+ "loss": 0.5587,
+ "step": 2722
+ },
+ {
+ "epoch": 2.7237660592041513,
+ "grad_norm": 0.5118638873100281,
+ "learning_rate": 4.096469657258573e-06,
+ "loss": 0.6849,
+ "step": 2723
+ },
+ {
+ "epoch": 2.724766340533275,
+ "grad_norm": 0.4399039149284363,
+ "learning_rate": 4.066826956262981e-06,
+ "loss": 0.5657,
+ "step": 2724
+ },
+ {
+ "epoch": 2.725766621862399,
+ "grad_norm": 0.46115806698799133,
+ "learning_rate": 4.037289668475086e-06,
+ "loss": 0.6126,
+ "step": 2725
+ },
+ {
+ "epoch": 2.7267669031915225,
+ "grad_norm": 0.4800427556037903,
+ "learning_rate": 4.007857826351024e-06,
+ "loss": 0.7171,
+ "step": 2726
+ },
+ {
+ "epoch": 2.7277671845206464,
+ "grad_norm": 0.5179166793823242,
+ "learning_rate": 3.9785314622310495e-06,
+ "loss": 0.6882,
+ "step": 2727
+ },
+ {
+ "epoch": 2.7287674658497703,
+ "grad_norm": 0.47279757261276245,
+ "learning_rate": 3.949310608339552e-06,
+ "loss": 0.5468,
+ "step": 2728
+ },
+ {
+ "epoch": 2.729767747178894,
+ "grad_norm": 0.49115264415740967,
+ "learning_rate": 3.9201952967849565e-06,
+ "loss": 0.604,
+ "step": 2729
+ },
+ {
+ "epoch": 2.730768028508018,
+ "grad_norm": 0.5141243934631348,
+ "learning_rate": 3.8911855595597295e-06,
+ "loss": 0.5632,
+ "step": 2730
+ },
+ {
+ "epoch": 2.7317683098371415,
+ "grad_norm": 0.4766594469547272,
+ "learning_rate": 3.862281428540315e-06,
+ "loss": 0.5918,
+ "step": 2731
+ },
+ {
+ "epoch": 2.7327685911662654,
+ "grad_norm": 0.44311001896858215,
+ "learning_rate": 3.8334829354871315e-06,
+ "loss": 0.5901,
+ "step": 2732
+ },
+ {
+ "epoch": 2.7337688724953892,
+ "grad_norm": 0.46211880445480347,
+ "learning_rate": 3.8047901120445316e-06,
+ "loss": 0.4982,
+ "step": 2733
+ },
+ {
+ "epoch": 2.734769153824513,
+ "grad_norm": 0.5215771198272705,
+ "learning_rate": 3.776202989740707e-06,
+ "loss": 0.613,
+ "step": 2734
+ },
+ {
+ "epoch": 2.735769435153637,
+ "grad_norm": 0.509265661239624,
+ "learning_rate": 3.7477215999877635e-06,
+ "loss": 0.6917,
+ "step": 2735
+ },
+ {
+ "epoch": 2.736769716482761,
+ "grad_norm": 0.5061389803886414,
+ "learning_rate": 3.7193459740815674e-06,
+ "loss": 0.602,
+ "step": 2736
+ },
+ {
+ "epoch": 2.737769997811885,
+ "grad_norm": 0.5833300948143005,
+ "learning_rate": 3.6910761432018328e-06,
+ "loss": 0.698,
+ "step": 2737
+ },
+ {
+ "epoch": 2.7387702791410087,
+ "grad_norm": 0.4603955149650574,
+ "learning_rate": 3.662912138411967e-06,
+ "loss": 0.5256,
+ "step": 2738
+ },
+ {
+ "epoch": 2.739770560470132,
+ "grad_norm": 0.45078399777412415,
+ "learning_rate": 3.634853990659126e-06,
+ "loss": 0.5505,
+ "step": 2739
+ },
+ {
+ "epoch": 2.740770841799256,
+ "grad_norm": 0.4314641058444977,
+ "learning_rate": 3.606901730774159e-06,
+ "loss": 0.5805,
+ "step": 2740
+ },
+ {
+ "epoch": 2.74177112312838,
+ "grad_norm": 0.462260365486145,
+ "learning_rate": 3.5790553894715085e-06,
+ "loss": 0.5582,
+ "step": 2741
+ },
+ {
+ "epoch": 2.742771404457504,
+ "grad_norm": 0.444097638130188,
+ "learning_rate": 3.5513149973492976e-06,
+ "loss": 0.5816,
+ "step": 2742
+ },
+ {
+ "epoch": 2.7437716857866272,
+ "grad_norm": 0.47750967741012573,
+ "learning_rate": 3.5236805848891886e-06,
+ "loss": 0.6297,
+ "step": 2743
+ },
+ {
+ "epoch": 2.744771967115751,
+ "grad_norm": 0.4356045126914978,
+ "learning_rate": 3.4961521824564026e-06,
+ "loss": 0.5083,
+ "step": 2744
+ },
+ {
+ "epoch": 2.745772248444875,
+ "grad_norm": 0.44277358055114746,
+ "learning_rate": 3.4687298202996655e-06,
+ "loss": 0.5229,
+ "step": 2745
+ },
+ {
+ "epoch": 2.746772529773999,
+ "grad_norm": 0.42699354887008667,
+ "learning_rate": 3.4414135285512183e-06,
+ "loss": 0.5047,
+ "step": 2746
+ },
+ {
+ "epoch": 2.7477728111031228,
+ "grad_norm": 0.43430620431900024,
+ "learning_rate": 3.4142033372266957e-06,
+ "loss": 0.5838,
+ "step": 2747
+ },
+ {
+ "epoch": 2.7487730924322467,
+ "grad_norm": 0.5193162560462952,
+ "learning_rate": 3.3870992762252143e-06,
+ "loss": 0.6018,
+ "step": 2748
+ },
+ {
+ "epoch": 2.7497733737613705,
+ "grad_norm": 0.520524799823761,
+ "learning_rate": 3.3601013753291945e-06,
+ "loss": 0.4765,
+ "step": 2749
+ },
+ {
+ "epoch": 2.7507736550904944,
+ "grad_norm": 0.49503692984580994,
+ "learning_rate": 3.333209664204473e-06,
+ "loss": 0.6935,
+ "step": 2750
+ },
+ {
+ "epoch": 2.751773936419618,
+ "grad_norm": 0.44826647639274597,
+ "learning_rate": 3.3064241724001797e-06,
+ "loss": 0.4779,
+ "step": 2751
+ },
+ {
+ "epoch": 2.7527742177487418,
+ "grad_norm": 0.4832427203655243,
+ "learning_rate": 3.2797449293487048e-06,
+ "loss": 0.5629,
+ "step": 2752
+ },
+ {
+ "epoch": 2.7537744990778656,
+ "grad_norm": 0.491946280002594,
+ "learning_rate": 3.253171964365731e-06,
+ "loss": 0.6086,
+ "step": 2753
+ },
+ {
+ "epoch": 2.7547747804069895,
+ "grad_norm": 0.5541719794273376,
+ "learning_rate": 3.226705306650113e-06,
+ "loss": 0.6197,
+ "step": 2754
+ },
+ {
+ "epoch": 2.7557750617361134,
+ "grad_norm": 0.44589972496032715,
+ "learning_rate": 3.200344985283965e-06,
+ "loss": 0.4589,
+ "step": 2755
+ },
+ {
+ "epoch": 2.756775343065237,
+ "grad_norm": 0.4537385404109955,
+ "learning_rate": 3.174091029232473e-06,
+ "loss": 0.5947,
+ "step": 2756
+ },
+ {
+ "epoch": 2.7577756243943607,
+ "grad_norm": 0.44235026836395264,
+ "learning_rate": 3.1479434673440167e-06,
+ "loss": 0.5439,
+ "step": 2757
+ },
+ {
+ "epoch": 2.7587759057234846,
+ "grad_norm": 0.4530452787876129,
+ "learning_rate": 3.1219023283500238e-06,
+ "loss": 0.5916,
+ "step": 2758
+ },
+ {
+ "epoch": 2.7597761870526085,
+ "grad_norm": 0.48509615659713745,
+ "learning_rate": 3.095967640864983e-06,
+ "loss": 0.5148,
+ "step": 2759
+ },
+ {
+ "epoch": 2.7607764683817324,
+ "grad_norm": 0.4340590536594391,
+ "learning_rate": 3.070139433386454e-06,
+ "loss": 0.557,
+ "step": 2760
+ },
+ {
+ "epoch": 2.7617767497108563,
+ "grad_norm": 0.5271961688995361,
+ "learning_rate": 3.0444177342949464e-06,
+ "loss": 0.5723,
+ "step": 2761
+ },
+ {
+ "epoch": 2.76277703103998,
+ "grad_norm": 0.45134633779525757,
+ "learning_rate": 3.0188025718539624e-06,
+ "loss": 0.6171,
+ "step": 2762
+ },
+ {
+ "epoch": 2.763777312369104,
+ "grad_norm": 0.5668414831161499,
+ "learning_rate": 2.9932939742099208e-06,
+ "loss": 0.6798,
+ "step": 2763
+ },
+ {
+ "epoch": 2.7647775936982275,
+ "grad_norm": 0.5560891628265381,
+ "learning_rate": 2.9678919693921894e-06,
+ "loss": 0.5539,
+ "step": 2764
+ },
+ {
+ "epoch": 2.7657778750273514,
+ "grad_norm": 0.45360177755355835,
+ "learning_rate": 2.942596585312929e-06,
+ "loss": 0.5844,
+ "step": 2765
+ },
+ {
+ "epoch": 2.7667781563564753,
+ "grad_norm": 0.5192467570304871,
+ "learning_rate": 2.9174078497672397e-06,
+ "loss": 0.6873,
+ "step": 2766
+ },
+ {
+ "epoch": 2.767778437685599,
+ "grad_norm": 0.47330018877983093,
+ "learning_rate": 2.892325790432948e-06,
+ "loss": 0.5392,
+ "step": 2767
+ },
+ {
+ "epoch": 2.768778719014723,
+ "grad_norm": 0.5141651630401611,
+ "learning_rate": 2.8673504348707412e-06,
+ "loss": 0.6731,
+ "step": 2768
+ },
+ {
+ "epoch": 2.7697790003438465,
+ "grad_norm": 0.5411480665206909,
+ "learning_rate": 2.842481810523978e-06,
+ "loss": 0.741,
+ "step": 2769
+ },
+ {
+ "epoch": 2.7707792816729704,
+ "grad_norm": 0.5882455706596375,
+ "learning_rate": 2.8177199447187994e-06,
+ "loss": 0.632,
+ "step": 2770
+ },
+ {
+ "epoch": 2.7717795630020943,
+ "grad_norm": 0.5335837602615356,
+ "learning_rate": 2.7930648646640188e-06,
+ "loss": 0.7015,
+ "step": 2771
+ },
+ {
+ "epoch": 2.772779844331218,
+ "grad_norm": 0.5579439997673035,
+ "learning_rate": 2.7685165974510986e-06,
+ "loss": 0.6333,
+ "step": 2772
+ },
+ {
+ "epoch": 2.773780125660342,
+ "grad_norm": 0.4753457307815552,
+ "learning_rate": 2.744075170054161e-06,
+ "loss": 0.4558,
+ "step": 2773
+ },
+ {
+ "epoch": 2.774780406989466,
+ "grad_norm": 0.4914829730987549,
+ "learning_rate": 2.7197406093299018e-06,
+ "loss": 0.5485,
+ "step": 2774
+ },
+ {
+ "epoch": 2.77578068831859,
+ "grad_norm": 0.4584093689918518,
+ "learning_rate": 2.6955129420176196e-06,
+ "loss": 0.5949,
+ "step": 2775
+ },
+ {
+ "epoch": 2.7767809696477133,
+ "grad_norm": 0.45382729172706604,
+ "learning_rate": 2.6713921947391085e-06,
+ "loss": 0.6013,
+ "step": 2776
+ },
+ {
+ "epoch": 2.777781250976837,
+ "grad_norm": 0.5035733580589294,
+ "learning_rate": 2.647378393998745e-06,
+ "loss": 0.6465,
+ "step": 2777
+ },
+ {
+ "epoch": 2.778781532305961,
+ "grad_norm": 0.41726845502853394,
+ "learning_rate": 2.623471566183322e-06,
+ "loss": 0.4836,
+ "step": 2778
+ },
+ {
+ "epoch": 2.779781813635085,
+ "grad_norm": 0.4969714283943176,
+ "learning_rate": 2.599671737562137e-06,
+ "loss": 0.6482,
+ "step": 2779
+ },
+ {
+ "epoch": 2.780782094964209,
+ "grad_norm": 0.518803596496582,
+ "learning_rate": 2.5759789342868935e-06,
+ "loss": 0.6234,
+ "step": 2780
+ },
+ {
+ "epoch": 2.7817823762933322,
+ "grad_norm": 0.4382246434688568,
+ "learning_rate": 2.552393182391677e-06,
+ "loss": 0.5992,
+ "step": 2781
+ },
+ {
+ "epoch": 2.782782657622456,
+ "grad_norm": 0.4642046391963959,
+ "learning_rate": 2.528914507793001e-06,
+ "loss": 0.5579,
+ "step": 2782
+ },
+ {
+ "epoch": 2.78378293895158,
+ "grad_norm": 0.4862242341041565,
+ "learning_rate": 2.505542936289651e-06,
+ "loss": 0.5966,
+ "step": 2783
+ },
+ {
+ "epoch": 2.784783220280704,
+ "grad_norm": 0.511661171913147,
+ "learning_rate": 2.482278493562784e-06,
+ "loss": 0.5925,
+ "step": 2784
+ },
+ {
+ "epoch": 2.785783501609828,
+ "grad_norm": 0.5070545077323914,
+ "learning_rate": 2.4591212051757962e-06,
+ "loss": 0.5739,
+ "step": 2785
+ },
+ {
+ "epoch": 2.7867837829389517,
+ "grad_norm": 0.5460003614425659,
+ "learning_rate": 2.436071096574366e-06,
+ "loss": 0.6896,
+ "step": 2786
+ },
+ {
+ "epoch": 2.7877840642680756,
+ "grad_norm": 0.5260170102119446,
+ "learning_rate": 2.4131281930864002e-06,
+ "loss": 0.5991,
+ "step": 2787
+ },
+ {
+ "epoch": 2.7887843455971995,
+ "grad_norm": 0.616073727607727,
+ "learning_rate": 2.390292519921977e-06,
+ "loss": 0.7948,
+ "step": 2788
+ },
+ {
+ "epoch": 2.789784626926323,
+ "grad_norm": 0.4741346538066864,
+ "learning_rate": 2.367564102173403e-06,
+ "loss": 0.5259,
+ "step": 2789
+ },
+ {
+ "epoch": 2.790784908255447,
+ "grad_norm": 0.475841224193573,
+ "learning_rate": 2.3449429648150665e-06,
+ "loss": 0.495,
+ "step": 2790
+ },
+ {
+ "epoch": 2.7917851895845707,
+ "grad_norm": 0.8698926568031311,
+ "learning_rate": 2.3224291327035407e-06,
+ "loss": 0.7022,
+ "step": 2791
+ },
+ {
+ "epoch": 2.7927854709136946,
+ "grad_norm": 0.47991234064102173,
+ "learning_rate": 2.3000226305774255e-06,
+ "loss": 0.6974,
+ "step": 2792
+ },
+ {
+ "epoch": 2.7937857522428184,
+ "grad_norm": 0.45636966824531555,
+ "learning_rate": 2.277723483057448e-06,
+ "loss": 0.5445,
+ "step": 2793
+ },
+ {
+ "epoch": 2.794786033571942,
+ "grad_norm": 0.541229784488678,
+ "learning_rate": 2.2555317146462975e-06,
+ "loss": 0.6116,
+ "step": 2794
+ },
+ {
+ "epoch": 2.7957863149010658,
+ "grad_norm": 1.3022198677062988,
+ "learning_rate": 2.2334473497287454e-06,
+ "loss": 0.7207,
+ "step": 2795
+ },
+ {
+ "epoch": 2.7967865962301897,
+ "grad_norm": 0.5061864256858826,
+ "learning_rate": 2.2114704125714924e-06,
+ "loss": 0.4593,
+ "step": 2796
+ },
+ {
+ "epoch": 2.7977868775593135,
+ "grad_norm": 0.44555899500846863,
+ "learning_rate": 2.1896009273232433e-06,
+ "loss": 0.6366,
+ "step": 2797
+ },
+ {
+ "epoch": 2.7987871588884374,
+ "grad_norm": 0.6055293679237366,
+ "learning_rate": 2.1678389180145865e-06,
+ "loss": 0.6926,
+ "step": 2798
+ },
+ {
+ "epoch": 2.7997874402175613,
+ "grad_norm": 0.4621210992336273,
+ "learning_rate": 2.1461844085580385e-06,
+ "loss": 0.4991,
+ "step": 2799
+ },
+ {
+ "epoch": 2.800787721546685,
+ "grad_norm": 0.5449041128158569,
+ "learning_rate": 2.124637422747999e-06,
+ "loss": 0.6282,
+ "step": 2800
+ },
+ {
+ "epoch": 2.801788002875809,
+ "grad_norm": 0.5077667236328125,
+ "learning_rate": 2.1031979842606853e-06,
+ "loss": 0.5311,
+ "step": 2801
+ },
+ {
+ "epoch": 2.8027882842049325,
+ "grad_norm": 0.46800270676612854,
+ "learning_rate": 2.0818661166542074e-06,
+ "loss": 0.5343,
+ "step": 2802
+ },
+ {
+ "epoch": 2.8037885655340564,
+ "grad_norm": 0.4870428442955017,
+ "learning_rate": 2.0606418433683828e-06,
+ "loss": 0.5614,
+ "step": 2803
+ },
+ {
+ "epoch": 2.8047888468631803,
+ "grad_norm": 0.47305482625961304,
+ "learning_rate": 2.0395251877248778e-06,
+ "loss": 0.5367,
+ "step": 2804
+ },
+ {
+ "epoch": 2.805789128192304,
+ "grad_norm": 0.4642845094203949,
+ "learning_rate": 2.0185161729270653e-06,
+ "loss": 0.6471,
+ "step": 2805
+ },
+ {
+ "epoch": 2.8067894095214276,
+ "grad_norm": 0.6057828664779663,
+ "learning_rate": 1.9976148220600457e-06,
+ "loss": 0.6366,
+ "step": 2806
+ },
+ {
+ "epoch": 2.8077896908505515,
+ "grad_norm": 0.45656031370162964,
+ "learning_rate": 1.976821158090647e-06,
+ "loss": 0.5977,
+ "step": 2807
+ },
+ {
+ "epoch": 2.8087899721796754,
+ "grad_norm": 0.5119034051895142,
+ "learning_rate": 1.9561352038673263e-06,
+ "loss": 0.5596,
+ "step": 2808
+ },
+ {
+ "epoch": 2.8097902535087993,
+ "grad_norm": 0.5202760696411133,
+ "learning_rate": 1.9355569821202234e-06,
+ "loss": 0.5544,
+ "step": 2809
+ },
+ {
+ "epoch": 2.810790534837923,
+ "grad_norm": 0.5742132067680359,
+ "learning_rate": 1.915086515461062e-06,
+ "loss": 0.6186,
+ "step": 2810
+ },
+ {
+ "epoch": 2.811790816167047,
+ "grad_norm": 0.4370170831680298,
+ "learning_rate": 1.8947238263832046e-06,
+ "loss": 0.5024,
+ "step": 2811
+ },
+ {
+ "epoch": 2.812791097496171,
+ "grad_norm": 0.505889356136322,
+ "learning_rate": 1.874468937261531e-06,
+ "loss": 0.7124,
+ "step": 2812
+ },
+ {
+ "epoch": 2.813791378825295,
+ "grad_norm": 0.4973495900630951,
+ "learning_rate": 1.8543218703525378e-06,
+ "loss": 0.5697,
+ "step": 2813
+ },
+ {
+ "epoch": 2.8147916601544183,
+ "grad_norm": 0.5079476237297058,
+ "learning_rate": 1.8342826477941944e-06,
+ "loss": 0.7185,
+ "step": 2814
+ },
+ {
+ "epoch": 2.815791941483542,
+ "grad_norm": 0.5448799729347229,
+ "learning_rate": 1.8143512916059646e-06,
+ "loss": 0.5735,
+ "step": 2815
+ },
+ {
+ "epoch": 2.816792222812666,
+ "grad_norm": 0.5600875020027161,
+ "learning_rate": 1.7945278236888408e-06,
+ "loss": 0.6879,
+ "step": 2816
+ },
+ {
+ "epoch": 2.81779250414179,
+ "grad_norm": 0.5398167371749878,
+ "learning_rate": 1.7748122658251876e-06,
+ "loss": 0.71,
+ "step": 2817
+ },
+ {
+ "epoch": 2.818792785470914,
+ "grad_norm": 0.42669185996055603,
+ "learning_rate": 1.7552046396788757e-06,
+ "loss": 0.5373,
+ "step": 2818
+ },
+ {
+ "epoch": 2.8197930668000373,
+ "grad_norm": 0.5382595062255859,
+ "learning_rate": 1.7357049667951043e-06,
+ "loss": 0.5894,
+ "step": 2819
+ },
+ {
+ "epoch": 2.820793348129161,
+ "grad_norm": 0.48555222153663635,
+ "learning_rate": 1.7163132686005223e-06,
+ "loss": 0.6377,
+ "step": 2820
+ },
+ {
+ "epoch": 2.821793629458285,
+ "grad_norm": 0.5174646973609924,
+ "learning_rate": 1.6970295664030745e-06,
+ "loss": 0.5741,
+ "step": 2821
+ },
+ {
+ "epoch": 2.822793910787409,
+ "grad_norm": 0.4947803318500519,
+ "learning_rate": 1.6778538813920775e-06,
+ "loss": 0.6765,
+ "step": 2822
+ },
+ {
+ "epoch": 2.823794192116533,
+ "grad_norm": 0.46039316058158875,
+ "learning_rate": 1.6587862346381321e-06,
+ "loss": 0.5825,
+ "step": 2823
+ },
+ {
+ "epoch": 2.8247944734456567,
+ "grad_norm": 0.491996169090271,
+ "learning_rate": 1.6398266470931344e-06,
+ "loss": 0.6033,
+ "step": 2824
+ },
+ {
+ "epoch": 2.8257947547747806,
+ "grad_norm": 0.5310854315757751,
+ "learning_rate": 1.6209751395902417e-06,
+ "loss": 0.8231,
+ "step": 2825
+ },
+ {
+ "epoch": 2.8267950361039045,
+ "grad_norm": 0.43436646461486816,
+ "learning_rate": 1.6022317328438506e-06,
+ "loss": 0.6196,
+ "step": 2826
+ },
+ {
+ "epoch": 2.827795317433028,
+ "grad_norm": 0.47770586609840393,
+ "learning_rate": 1.5835964474495868e-06,
+ "loss": 0.7245,
+ "step": 2827
+ },
+ {
+ "epoch": 2.828795598762152,
+ "grad_norm": 0.4826139807701111,
+ "learning_rate": 1.5650693038842367e-06,
+ "loss": 0.5927,
+ "step": 2828
+ },
+ {
+ "epoch": 2.8297958800912757,
+ "grad_norm": 0.5051170587539673,
+ "learning_rate": 1.5466503225058048e-06,
+ "loss": 0.6526,
+ "step": 2829
+ },
+ {
+ "epoch": 2.8307961614203996,
+ "grad_norm": 0.47110068798065186,
+ "learning_rate": 1.5283395235534015e-06,
+ "loss": 0.6556,
+ "step": 2830
+ },
+ {
+ "epoch": 2.8317964427495235,
+ "grad_norm": 0.48018333315849304,
+ "learning_rate": 1.5101369271472988e-06,
+ "loss": 0.7027,
+ "step": 2831
+ },
+ {
+ "epoch": 2.832796724078647,
+ "grad_norm": 0.5039500594139099,
+ "learning_rate": 1.4920425532888526e-06,
+ "loss": 0.5812,
+ "step": 2832
+ },
+ {
+ "epoch": 2.833797005407771,
+ "grad_norm": 0.5217962265014648,
+ "learning_rate": 1.4740564218605035e-06,
+ "loss": 0.6007,
+ "step": 2833
+ },
+ {
+ "epoch": 2.8347972867368947,
+ "grad_norm": 0.4518835246562958,
+ "learning_rate": 1.4561785526257533e-06,
+ "loss": 0.6296,
+ "step": 2834
+ },
+ {
+ "epoch": 2.8357975680660186,
+ "grad_norm": 0.44320374727249146,
+ "learning_rate": 1.4384089652291543e-06,
+ "loss": 0.5989,
+ "step": 2835
+ },
+ {
+ "epoch": 2.8367978493951425,
+ "grad_norm": 0.44888973236083984,
+ "learning_rate": 1.420747679196277e-06,
+ "loss": 0.5615,
+ "step": 2836
+ },
+ {
+ "epoch": 2.8377981307242663,
+ "grad_norm": 0.49741706252098083,
+ "learning_rate": 1.4031947139336643e-06,
+ "loss": 0.4757,
+ "step": 2837
+ },
+ {
+ "epoch": 2.8387984120533902,
+ "grad_norm": 0.4207344651222229,
+ "learning_rate": 1.3857500887288544e-06,
+ "loss": 0.4949,
+ "step": 2838
+ },
+ {
+ "epoch": 2.8397986933825137,
+ "grad_norm": 0.4773598611354828,
+ "learning_rate": 1.3684138227503474e-06,
+ "loss": 0.6138,
+ "step": 2839
+ },
+ {
+ "epoch": 2.8407989747116376,
+ "grad_norm": 0.4747704267501831,
+ "learning_rate": 1.3511859350475497e-06,
+ "loss": 0.5415,
+ "step": 2840
+ },
+ {
+ "epoch": 2.8417992560407614,
+ "grad_norm": 0.49928930401802063,
+ "learning_rate": 1.3340664445507966e-06,
+ "loss": 0.649,
+ "step": 2841
+ },
+ {
+ "epoch": 2.8427995373698853,
+ "grad_norm": 0.43006622791290283,
+ "learning_rate": 1.3170553700713294e-06,
+ "loss": 0.4549,
+ "step": 2842
+ },
+ {
+ "epoch": 2.843799818699009,
+ "grad_norm": 0.6910367012023926,
+ "learning_rate": 1.3001527303012184e-06,
+ "loss": 0.6361,
+ "step": 2843
+ },
+ {
+ "epoch": 2.8448001000281327,
+ "grad_norm": 0.4559337794780731,
+ "learning_rate": 1.2833585438134287e-06,
+ "loss": 0.482,
+ "step": 2844
+ },
+ {
+ "epoch": 2.8458003813572565,
+ "grad_norm": 0.44005000591278076,
+ "learning_rate": 1.2666728290617213e-06,
+ "loss": 0.582,
+ "step": 2845
+ },
+ {
+ "epoch": 2.8468006626863804,
+ "grad_norm": 0.5919110774993896,
+ "learning_rate": 1.2500956043806744e-06,
+ "loss": 0.6233,
+ "step": 2846
+ },
+ {
+ "epoch": 2.8478009440155043,
+ "grad_norm": 0.5048378705978394,
+ "learning_rate": 1.2336268879856727e-06,
+ "loss": 0.6467,
+ "step": 2847
+ },
+ {
+ "epoch": 2.848801225344628,
+ "grad_norm": 0.5845475792884827,
+ "learning_rate": 1.217266697972852e-06,
+ "loss": 0.7489,
+ "step": 2848
+ },
+ {
+ "epoch": 2.849801506673752,
+ "grad_norm": 0.4406288266181946,
+ "learning_rate": 1.201015052319099e-06,
+ "loss": 0.5419,
+ "step": 2849
+ },
+ {
+ "epoch": 2.850801788002876,
+ "grad_norm": 0.3929692208766937,
+ "learning_rate": 1.1848719688820398e-06,
+ "loss": 0.4808,
+ "step": 2850
+ },
+ {
+ "epoch": 2.851802069332,
+ "grad_norm": 0.5650544166564941,
+ "learning_rate": 1.168837465400008e-06,
+ "loss": 0.6947,
+ "step": 2851
+ },
+ {
+ "epoch": 2.8528023506611233,
+ "grad_norm": 0.45540928840637207,
+ "learning_rate": 1.1529115594920092e-06,
+ "loss": 0.5777,
+ "step": 2852
+ },
+ {
+ "epoch": 2.853802631990247,
+ "grad_norm": 0.4697714149951935,
+ "learning_rate": 1.1370942686577347e-06,
+ "loss": 0.5871,
+ "step": 2853
+ },
+ {
+ "epoch": 2.854802913319371,
+ "grad_norm": 0.48228269815444946,
+ "learning_rate": 1.1213856102775366e-06,
+ "loss": 0.5365,
+ "step": 2854
+ },
+ {
+ "epoch": 2.855803194648495,
+ "grad_norm": 0.5245192646980286,
+ "learning_rate": 1.1057856016123858e-06,
+ "loss": 0.6173,
+ "step": 2855
+ },
+ {
+ "epoch": 2.856803475977619,
+ "grad_norm": 0.49397265911102295,
+ "learning_rate": 1.09029425980387e-06,
+ "loss": 0.6989,
+ "step": 2856
+ },
+ {
+ "epoch": 2.8578037573067423,
+ "grad_norm": 0.4669159948825836,
+ "learning_rate": 1.0749116018741623e-06,
+ "loss": 0.5667,
+ "step": 2857
+ },
+ {
+ "epoch": 2.858804038635866,
+ "grad_norm": 0.539012610912323,
+ "learning_rate": 1.0596376447260414e-06,
+ "loss": 0.5201,
+ "step": 2858
+ },
+ {
+ "epoch": 2.85980431996499,
+ "grad_norm": 0.5197359323501587,
+ "learning_rate": 1.0444724051428155e-06,
+ "loss": 0.6549,
+ "step": 2859
+ },
+ {
+ "epoch": 2.860804601294114,
+ "grad_norm": 0.48977217078208923,
+ "learning_rate": 1.029415899788322e-06,
+ "loss": 0.6008,
+ "step": 2860
+ },
+ {
+ "epoch": 2.861804882623238,
+ "grad_norm": 0.5029745101928711,
+ "learning_rate": 1.0144681452069703e-06,
+ "loss": 0.6153,
+ "step": 2861
+ },
+ {
+ "epoch": 2.8628051639523617,
+ "grad_norm": 0.438455730676651,
+ "learning_rate": 9.996291578236228e-07,
+ "loss": 0.6527,
+ "step": 2862
+ },
+ {
+ "epoch": 2.8638054452814856,
+ "grad_norm": 0.4809611439704895,
+ "learning_rate": 9.84898953943636e-07,
+ "loss": 0.6353,
+ "step": 2863
+ },
+ {
+ "epoch": 2.8648057266106095,
+ "grad_norm": 0.5335886478424072,
+ "learning_rate": 9.702775497528516e-07,
+ "loss": 0.6332,
+ "step": 2864
+ },
+ {
+ "epoch": 2.865806007939733,
+ "grad_norm": 0.43682292103767395,
+ "learning_rate": 9.5576496131754e-07,
+ "loss": 0.4341,
+ "step": 2865
+ },
+ {
+ "epoch": 2.866806289268857,
+ "grad_norm": 0.4289715886116028,
+ "learning_rate": 9.413612045844234e-07,
+ "loss": 0.5025,
+ "step": 2866
+ },
+ {
+ "epoch": 2.8678065705979807,
+ "grad_norm": 0.48826637864112854,
+ "learning_rate": 9.270662953806186e-07,
+ "loss": 0.6558,
+ "step": 2867
+ },
+ {
+ "epoch": 2.8688068519271046,
+ "grad_norm": 0.48484766483306885,
+ "learning_rate": 9.128802494136279e-07,
+ "loss": 0.7263,
+ "step": 2868
+ },
+ {
+ "epoch": 2.869807133256228,
+ "grad_norm": 0.5893267393112183,
+ "learning_rate": 8.988030822713822e-07,
+ "loss": 0.6991,
+ "step": 2869
+ },
+ {
+ "epoch": 2.870807414585352,
+ "grad_norm": 0.44123855233192444,
+ "learning_rate": 8.848348094221192e-07,
+ "loss": 0.4416,
+ "step": 2870
+ },
+ {
+ "epoch": 2.871807695914476,
+ "grad_norm": 0.5445358157157898,
+ "learning_rate": 8.709754462144615e-07,
+ "loss": 0.6728,
+ "step": 2871
+ },
+ {
+ "epoch": 2.8728079772435997,
+ "grad_norm": 0.6016637086868286,
+ "learning_rate": 8.57225007877327e-07,
+ "loss": 0.7194,
+ "step": 2872
+ },
+ {
+ "epoch": 2.8738082585727236,
+ "grad_norm": 0.5043215155601501,
+ "learning_rate": 8.435835095199629e-07,
+ "loss": 0.5811,
+ "step": 2873
+ },
+ {
+ "epoch": 2.8748085399018475,
+ "grad_norm": 0.4707871675491333,
+ "learning_rate": 8.300509661319234e-07,
+ "loss": 0.5813,
+ "step": 2874
+ },
+ {
+ "epoch": 2.8758088212309714,
+ "grad_norm": 0.535853385925293,
+ "learning_rate": 8.166273925830137e-07,
+ "loss": 0.6069,
+ "step": 2875
+ },
+ {
+ "epoch": 2.8768091025600953,
+ "grad_norm": 0.5644518136978149,
+ "learning_rate": 8.033128036233129e-07,
+ "loss": 0.7227,
+ "step": 2876
+ },
+ {
+ "epoch": 2.8778093838892187,
+ "grad_norm": 0.47965219616889954,
+ "learning_rate": 7.901072138831511e-07,
+ "loss": 0.4989,
+ "step": 2877
+ },
+ {
+ "epoch": 2.8788096652183426,
+ "grad_norm": 0.5448580384254456,
+ "learning_rate": 7.77010637873088e-07,
+ "loss": 0.7295,
+ "step": 2878
+ },
+ {
+ "epoch": 2.8798099465474665,
+ "grad_norm": 0.5140947699546814,
+ "learning_rate": 7.640230899838785e-07,
+ "loss": 0.659,
+ "step": 2879
+ },
+ {
+ "epoch": 2.8808102278765904,
+ "grad_norm": 0.5335779786109924,
+ "learning_rate": 7.511445844864962e-07,
+ "loss": 0.5623,
+ "step": 2880
+ },
+ {
+ "epoch": 2.8818105092057142,
+ "grad_norm": 0.45926329493522644,
+ "learning_rate": 7.383751355320989e-07,
+ "loss": 0.6922,
+ "step": 2881
+ },
+ {
+ "epoch": 2.8828107905348377,
+ "grad_norm": 0.5175126194953918,
+ "learning_rate": 7.257147571519851e-07,
+ "loss": 0.6969,
+ "step": 2882
+ },
+ {
+ "epoch": 2.8838110718639616,
+ "grad_norm": 0.5154538154602051,
+ "learning_rate": 7.131634632576267e-07,
+ "loss": 0.6349,
+ "step": 2883
+ },
+ {
+ "epoch": 2.8848113531930855,
+ "grad_norm": 0.5200253129005432,
+ "learning_rate": 7.00721267640625e-07,
+ "loss": 0.7039,
+ "step": 2884
+ },
+ {
+ "epoch": 2.8858116345222093,
+ "grad_norm": 0.4848579466342926,
+ "learning_rate": 6.883881839727103e-07,
+ "loss": 0.5744,
+ "step": 2885
+ },
+ {
+ "epoch": 2.8868119158513332,
+ "grad_norm": 0.5093511343002319,
+ "learning_rate": 6.761642258056978e-07,
+ "loss": 0.5857,
+ "step": 2886
+ },
+ {
+ "epoch": 2.887812197180457,
+ "grad_norm": 0.5417166948318481,
+ "learning_rate": 6.640494065715209e-07,
+ "loss": 0.6291,
+ "step": 2887
+ },
+ {
+ "epoch": 2.888812478509581,
+ "grad_norm": 0.4646155536174774,
+ "learning_rate": 6.520437395821755e-07,
+ "loss": 0.5617,
+ "step": 2888
+ },
+ {
+ "epoch": 2.889812759838705,
+ "grad_norm": 0.40162429213523865,
+ "learning_rate": 6.401472380297091e-07,
+ "loss": 0.5889,
+ "step": 2889
+ },
+ {
+ "epoch": 2.8908130411678283,
+ "grad_norm": 0.47493594884872437,
+ "learning_rate": 6.283599149862207e-07,
+ "loss": 0.5754,
+ "step": 2890
+ },
+ {
+ "epoch": 2.891813322496952,
+ "grad_norm": 0.43852469325065613,
+ "learning_rate": 6.166817834038607e-07,
+ "loss": 0.537,
+ "step": 2891
+ },
+ {
+ "epoch": 2.892813603826076,
+ "grad_norm": 0.5104988217353821,
+ "learning_rate": 6.051128561147756e-07,
+ "loss": 0.5929,
+ "step": 2892
+ },
+ {
+ "epoch": 2.8938138851552,
+ "grad_norm": 0.49341636896133423,
+ "learning_rate": 5.93653145831119e-07,
+ "loss": 0.742,
+ "step": 2893
+ },
+ {
+ "epoch": 2.894814166484324,
+ "grad_norm": 0.5017572641372681,
+ "learning_rate": 5.823026651450625e-07,
+ "loss": 0.5905,
+ "step": 2894
+ },
+ {
+ "epoch": 2.8958144478134473,
+ "grad_norm": 0.4559513032436371,
+ "learning_rate": 5.710614265287073e-07,
+ "loss": 0.4846,
+ "step": 2895
+ },
+ {
+ "epoch": 2.896814729142571,
+ "grad_norm": 0.4502454996109009,
+ "learning_rate": 5.599294423341506e-07,
+ "loss": 0.6107,
+ "step": 2896
+ },
+ {
+ "epoch": 2.897815010471695,
+ "grad_norm": 0.4951443374156952,
+ "learning_rate": 5.489067247934298e-07,
+ "loss": 0.5424,
+ "step": 2897
+ },
+ {
+ "epoch": 2.898815291800819,
+ "grad_norm": 0.42103248834609985,
+ "learning_rate": 5.379932860185122e-07,
+ "loss": 0.4246,
+ "step": 2898
+ },
+ {
+ "epoch": 2.899815573129943,
+ "grad_norm": 0.5300295948982239,
+ "learning_rate": 5.271891380013161e-07,
+ "loss": 0.5921,
+ "step": 2899
+ },
+ {
+ "epoch": 2.9008158544590668,
+ "grad_norm": 0.49473997950553894,
+ "learning_rate": 5.164942926136118e-07,
+ "loss": 0.5782,
+ "step": 2900
+ },
+ {
+ "epoch": 2.9018161357881906,
+ "grad_norm": 0.5441948771476746,
+ "learning_rate": 5.059087616071211e-07,
+ "loss": 0.4919,
+ "step": 2901
+ },
+ {
+ "epoch": 2.902816417117314,
+ "grad_norm": 0.4561871588230133,
+ "learning_rate": 4.954325566134177e-07,
+ "loss": 0.5772,
+ "step": 2902
+ },
+ {
+ "epoch": 2.903816698446438,
+ "grad_norm": 0.48668912053108215,
+ "learning_rate": 4.85065689143982e-07,
+ "loss": 0.6259,
+ "step": 2903
+ },
+ {
+ "epoch": 2.904816979775562,
+ "grad_norm": 0.44714733958244324,
+ "learning_rate": 4.748081705900909e-07,
+ "loss": 0.5417,
+ "step": 2904
+ },
+ {
+ "epoch": 2.9058172611046857,
+ "grad_norm": 0.4633501172065735,
+ "learning_rate": 4.646600122229283e-07,
+ "loss": 0.6534,
+ "step": 2905
+ },
+ {
+ "epoch": 2.9068175424338096,
+ "grad_norm": 0.43328484892845154,
+ "learning_rate": 4.546212251934745e-07,
+ "loss": 0.6157,
+ "step": 2906
+ },
+ {
+ "epoch": 2.907817823762933,
+ "grad_norm": 0.4951692819595337,
+ "learning_rate": 4.44691820532539e-07,
+ "loss": 0.6377,
+ "step": 2907
+ },
+ {
+ "epoch": 2.908818105092057,
+ "grad_norm": 0.4766996502876282,
+ "learning_rate": 4.3487180915074976e-07,
+ "loss": 0.584,
+ "step": 2908
+ },
+ {
+ "epoch": 2.909818386421181,
+ "grad_norm": 0.4883562922477722,
+ "learning_rate": 4.251612018385087e-07,
+ "loss": 0.6823,
+ "step": 2909
+ },
+ {
+ "epoch": 2.9108186677503047,
+ "grad_norm": 0.4609594941139221,
+ "learning_rate": 4.155600092660361e-07,
+ "loss": 0.5606,
+ "step": 2910
+ },
+ {
+ "epoch": 2.9118189490794286,
+ "grad_norm": 0.4867727756500244,
+ "learning_rate": 4.0606824198329287e-07,
+ "loss": 0.5412,
+ "step": 2911
+ },
+ {
+ "epoch": 2.9128192304085525,
+ "grad_norm": 0.5095967054367065,
+ "learning_rate": 3.9668591042002487e-07,
+ "loss": 0.6622,
+ "step": 2912
+ },
+ {
+ "epoch": 2.9138195117376764,
+ "grad_norm": 0.49293196201324463,
+ "learning_rate": 3.8741302488570774e-07,
+ "loss": 0.5913,
+ "step": 2913
+ },
+ {
+ "epoch": 2.9148197930668003,
+ "grad_norm": 0.4758484959602356,
+ "learning_rate": 3.782495955695686e-07,
+ "loss": 0.6069,
+ "step": 2914
+ },
+ {
+ "epoch": 2.9158200743959237,
+ "grad_norm": 0.509536862373352,
+ "learning_rate": 3.6919563254056434e-07,
+ "loss": 0.7027,
+ "step": 2915
+ },
+ {
+ "epoch": 2.9168203557250476,
+ "grad_norm": 0.4377104938030243,
+ "learning_rate": 3.6025114574734785e-07,
+ "loss": 0.5351,
+ "step": 2916
+ },
+ {
+ "epoch": 2.9178206370541715,
+ "grad_norm": 0.4997771680355072,
+ "learning_rate": 3.514161450183129e-07,
+ "loss": 0.5461,
+ "step": 2917
+ },
+ {
+ "epoch": 2.9188209183832954,
+ "grad_norm": 0.5173505544662476,
+ "learning_rate": 3.42690640061516e-07,
+ "loss": 0.5791,
+ "step": 2918
+ },
+ {
+ "epoch": 2.9198211997124193,
+ "grad_norm": 0.45158660411834717,
+ "learning_rate": 3.3407464046470993e-07,
+ "loss": 0.5432,
+ "step": 2919
+ },
+ {
+ "epoch": 2.9208214810415427,
+ "grad_norm": 0.43320661783218384,
+ "learning_rate": 3.255681556953216e-07,
+ "loss": 0.5355,
+ "step": 2920
+ },
+ {
+ "epoch": 2.9218217623706666,
+ "grad_norm": 0.4965643882751465,
+ "learning_rate": 3.171711951004408e-07,
+ "loss": 0.6328,
+ "step": 2921
+ },
+ {
+ "epoch": 2.9228220436997905,
+ "grad_norm": 0.5545138716697693,
+ "learning_rate": 3.0888376790679795e-07,
+ "loss": 0.6382,
+ "step": 2922
+ },
+ {
+ "epoch": 2.9238223250289144,
+ "grad_norm": 0.4653552174568176,
+ "learning_rate": 3.007058832207976e-07,
+ "loss": 0.6034,
+ "step": 2923
+ },
+ {
+ "epoch": 2.9248226063580383,
+ "grad_norm": 0.5603210926055908,
+ "learning_rate": 2.926375500284406e-07,
+ "loss": 0.5383,
+ "step": 2924
+ },
+ {
+ "epoch": 2.925822887687162,
+ "grad_norm": 0.4722922742366791,
+ "learning_rate": 2.846787771953574e-07,
+ "loss": 0.6639,
+ "step": 2925
+ },
+ {
+ "epoch": 2.926823169016286,
+ "grad_norm": 0.5118352174758911,
+ "learning_rate": 2.7682957346683026e-07,
+ "loss": 0.6342,
+ "step": 2926
+ },
+ {
+ "epoch": 2.92782345034541,
+ "grad_norm": 0.45839202404022217,
+ "learning_rate": 2.6908994746768226e-07,
+ "loss": 0.6281,
+ "step": 2927
+ },
+ {
+ "epoch": 2.9288237316745334,
+ "grad_norm": 0.4275728464126587,
+ "learning_rate": 2.6145990770238825e-07,
+ "loss": 0.5391,
+ "step": 2928
+ },
+ {
+ "epoch": 2.9298240130036572,
+ "grad_norm": 0.4813961982727051,
+ "learning_rate": 2.539394625549529e-07,
+ "loss": 0.5056,
+ "step": 2929
+ },
+ {
+ "epoch": 2.930824294332781,
+ "grad_norm": 0.48714709281921387,
+ "learning_rate": 2.4652862028902156e-07,
+ "loss": 0.5696,
+ "step": 2930
+ },
+ {
+ "epoch": 2.931824575661905,
+ "grad_norm": 0.4456583857536316,
+ "learning_rate": 2.392273890477359e-07,
+ "loss": 0.4824,
+ "step": 2931
+ },
+ {
+ "epoch": 2.9328248569910285,
+ "grad_norm": 0.47101256251335144,
+ "learning_rate": 2.3203577685385612e-07,
+ "loss": 0.6102,
+ "step": 2932
+ },
+ {
+ "epoch": 2.9338251383201523,
+ "grad_norm": 0.5012255907058716,
+ "learning_rate": 2.2495379160963891e-07,
+ "loss": 0.7072,
+ "step": 2933
+ },
+ {
+ "epoch": 2.9348254196492762,
+ "grad_norm": 0.5074737668037415,
+ "learning_rate": 2.179814410969261e-07,
+ "loss": 0.514,
+ "step": 2934
+ },
+ {
+ "epoch": 2.9358257009784,
+ "grad_norm": 0.483468621969223,
+ "learning_rate": 2.1111873297706696e-07,
+ "loss": 0.5138,
+ "step": 2935
+ },
+ {
+ "epoch": 2.936825982307524,
+ "grad_norm": 0.5193675756454468,
+ "learning_rate": 2.043656747909184e-07,
+ "loss": 0.7277,
+ "step": 2936
+ },
+ {
+ "epoch": 2.937826263636648,
+ "grad_norm": 0.555354118347168,
+ "learning_rate": 1.977222739588891e-07,
+ "loss": 0.6055,
+ "step": 2937
+ },
+ {
+ "epoch": 2.938826544965772,
+ "grad_norm": 0.4599333107471466,
+ "learning_rate": 1.9118853778086199e-07,
+ "loss": 0.5888,
+ "step": 2938
+ },
+ {
+ "epoch": 2.9398268262948957,
+ "grad_norm": 0.5660957098007202,
+ "learning_rate": 1.847644734362497e-07,
+ "loss": 0.5688,
+ "step": 2939
+ },
+ {
+ "epoch": 2.940827107624019,
+ "grad_norm": 0.548714280128479,
+ "learning_rate": 1.7845008798391682e-07,
+ "loss": 0.7159,
+ "step": 2940
+ },
+ {
+ "epoch": 2.941827388953143,
+ "grad_norm": 0.4457051455974579,
+ "learning_rate": 1.7224538836223546e-07,
+ "loss": 0.5161,
+ "step": 2941
+ },
+ {
+ "epoch": 2.942827670282267,
+ "grad_norm": 0.4860127568244934,
+ "learning_rate": 1.6615038138906303e-07,
+ "loss": 0.6364,
+ "step": 2942
+ },
+ {
+ "epoch": 2.9438279516113908,
+ "grad_norm": 0.5117607116699219,
+ "learning_rate": 1.6016507376169777e-07,
+ "loss": 0.629,
+ "step": 2943
+ },
+ {
+ "epoch": 2.9448282329405147,
+ "grad_norm": 0.5450369715690613,
+ "learning_rate": 1.5428947205690103e-07,
+ "loss": 0.6997,
+ "step": 2944
+ },
+ {
+ "epoch": 2.945828514269638,
+ "grad_norm": 0.45853081345558167,
+ "learning_rate": 1.4852358273091948e-07,
+ "loss": 0.6484,
+ "step": 2945
+ },
+ {
+ "epoch": 2.946828795598762,
+ "grad_norm": 0.4037773609161377,
+ "learning_rate": 1.4286741211940736e-07,
+ "loss": 0.4715,
+ "step": 2946
+ },
+ {
+ "epoch": 2.947829076927886,
+ "grad_norm": 0.4062212407588959,
+ "learning_rate": 1.3732096643747084e-07,
+ "loss": 0.5232,
+ "step": 2947
+ },
+ {
+ "epoch": 2.9488293582570098,
+ "grad_norm": 0.5271187424659729,
+ "learning_rate": 1.3188425177966813e-07,
+ "loss": 0.5761,
+ "step": 2948
+ },
+ {
+ "epoch": 2.9498296395861336,
+ "grad_norm": 0.4974057376384735,
+ "learning_rate": 1.2655727411994278e-07,
+ "loss": 0.575,
+ "step": 2949
+ },
+ {
+ "epoch": 2.9508299209152575,
+ "grad_norm": 0.4446798264980316,
+ "learning_rate": 1.2134003931169035e-07,
+ "loss": 0.664,
+ "step": 2950
+ },
+ {
+ "epoch": 2.9518302022443814,
+ "grad_norm": 0.5552793741226196,
+ "learning_rate": 1.1623255308772507e-07,
+ "loss": 0.4794,
+ "step": 2951
+ },
+ {
+ "epoch": 2.9528304835735053,
+ "grad_norm": 0.5090262293815613,
+ "learning_rate": 1.1123482106021322e-07,
+ "loss": 0.6131,
+ "step": 2952
+ },
+ {
+ "epoch": 2.9538307649026287,
+ "grad_norm": 0.5115334987640381,
+ "learning_rate": 1.0634684872079526e-07,
+ "loss": 0.6142,
+ "step": 2953
+ },
+ {
+ "epoch": 2.9548310462317526,
+ "grad_norm": 0.47910353541374207,
+ "learning_rate": 1.0156864144044154e-07,
+ "loss": 0.5872,
+ "step": 2954
+ },
+ {
+ "epoch": 2.9558313275608765,
+ "grad_norm": 0.5572304129600525,
+ "learning_rate": 9.690020446956328e-08,
+ "loss": 0.7159,
+ "step": 2955
+ },
+ {
+ "epoch": 2.9568316088900004,
+ "grad_norm": 0.5390987396240234,
+ "learning_rate": 9.234154293790154e-08,
+ "loss": 0.6768,
+ "step": 2956
+ },
+ {
+ "epoch": 2.9578318902191243,
+ "grad_norm": 0.43942734599113464,
+ "learning_rate": 8.789266185461608e-08,
+ "loss": 0.5291,
+ "step": 2957
+ },
+ {
+ "epoch": 2.9588321715482477,
+ "grad_norm": 0.4815521836280823,
+ "learning_rate": 8.355356610822984e-08,
+ "loss": 0.5884,
+ "step": 2958
+ },
+ {
+ "epoch": 2.9598324528773716,
+ "grad_norm": 0.48746562004089355,
+ "learning_rate": 7.932426046660669e-08,
+ "loss": 0.4794,
+ "step": 2959
+ },
+ {
+ "epoch": 2.9608327342064955,
+ "grad_norm": 0.46092158555984497,
+ "learning_rate": 7.520474957699586e-08,
+ "loss": 0.4798,
+ "step": 2960
+ },
+ {
+ "epoch": 2.9618330155356194,
+ "grad_norm": 0.5099161267280579,
+ "learning_rate": 7.119503796599868e-08,
+ "loss": 0.4535,
+ "step": 2961
+ },
+ {
+ "epoch": 2.9628332968647433,
+ "grad_norm": 0.5089080333709717,
+ "learning_rate": 6.729513003955745e-08,
+ "loss": 0.568,
+ "step": 2962
+ },
+ {
+ "epoch": 2.963833578193867,
+ "grad_norm": 0.49719443917274475,
+ "learning_rate": 6.350503008296648e-08,
+ "loss": 0.7387,
+ "step": 2963
+ },
+ {
+ "epoch": 2.964833859522991,
+ "grad_norm": 0.554434061050415,
+ "learning_rate": 5.98247422608722e-08,
+ "loss": 0.69,
+ "step": 2964
+ },
+ {
+ "epoch": 2.9658341408521145,
+ "grad_norm": 0.4781286120414734,
+ "learning_rate": 5.6254270617228656e-08,
+ "loss": 0.5632,
+ "step": 2965
+ },
+ {
+ "epoch": 2.9668344221812384,
+ "grad_norm": 0.5109730958938599,
+ "learning_rate": 5.279361907534197e-08,
+ "loss": 0.5214,
+ "step": 2966
+ },
+ {
+ "epoch": 2.9678347035103623,
+ "grad_norm": 0.49638715386390686,
+ "learning_rate": 4.9442791437848136e-08,
+ "loss": 0.5354,
+ "step": 2967
+ },
+ {
+ "epoch": 2.968834984839486,
+ "grad_norm": 0.43995532393455505,
+ "learning_rate": 4.620179138670189e-08,
+ "loss": 0.6034,
+ "step": 2968
+ },
+ {
+ "epoch": 2.96983526616861,
+ "grad_norm": 0.4875425696372986,
+ "learning_rate": 4.3070622483165627e-08,
+ "loss": 0.5358,
+ "step": 2969
+ },
+ {
+ "epoch": 2.9708355474977335,
+ "grad_norm": 0.5161439776420593,
+ "learning_rate": 4.0049288167842705e-08,
+ "loss": 0.6613,
+ "step": 2970
+ },
+ {
+ "epoch": 2.9718358288268574,
+ "grad_norm": 0.5133379697799683,
+ "learning_rate": 3.713779176061083e-08,
+ "loss": 0.6611,
+ "step": 2971
+ },
+ {
+ "epoch": 2.9728361101559813,
+ "grad_norm": 0.4583068788051605,
+ "learning_rate": 3.433613646069977e-08,
+ "loss": 0.4999,
+ "step": 2972
+ },
+ {
+ "epoch": 2.973836391485105,
+ "grad_norm": 0.5073477029800415,
+ "learning_rate": 3.164432534662476e-08,
+ "loss": 0.6013,
+ "step": 2973
+ },
+ {
+ "epoch": 2.974836672814229,
+ "grad_norm": 0.5013384222984314,
+ "learning_rate": 2.906236137617535e-08,
+ "loss": 0.6297,
+ "step": 2974
+ },
+ {
+ "epoch": 2.975836954143353,
+ "grad_norm": 0.4707469940185547,
+ "learning_rate": 2.659024738648208e-08,
+ "loss": 0.4792,
+ "step": 2975
+ },
+ {
+ "epoch": 2.976837235472477,
+ "grad_norm": 0.45535582304000854,
+ "learning_rate": 2.4227986093938726e-08,
+ "loss": 0.5529,
+ "step": 2976
+ },
+ {
+ "epoch": 2.9778375168016007,
+ "grad_norm": 0.4660503566265106,
+ "learning_rate": 2.197558009425782e-08,
+ "loss": 0.7177,
+ "step": 2977
+ },
+ {
+ "epoch": 2.978837798130724,
+ "grad_norm": 0.5447966456413269,
+ "learning_rate": 1.983303186241514e-08,
+ "loss": 0.7163,
+ "step": 2978
+ },
+ {
+ "epoch": 2.979838079459848,
+ "grad_norm": 0.5137467980384827,
+ "learning_rate": 1.7800343752683023e-08,
+ "loss": 0.6208,
+ "step": 2979
+ },
+ {
+ "epoch": 2.980838360788972,
+ "grad_norm": 0.48889538645744324,
+ "learning_rate": 1.5877517998630355e-08,
+ "loss": 0.6677,
+ "step": 2980
+ },
+ {
+ "epoch": 2.981838642118096,
+ "grad_norm": 0.5079302191734314,
+ "learning_rate": 1.4064556713089261e-08,
+ "loss": 0.5981,
+ "step": 2981
+ },
+ {
+ "epoch": 2.9828389234472197,
+ "grad_norm": 0.5454959869384766,
+ "learning_rate": 1.2361461888166226e-08,
+ "loss": 0.6449,
+ "step": 2982
+ },
+ {
+ "epoch": 2.983839204776343,
+ "grad_norm": 0.49543872475624084,
+ "learning_rate": 1.0768235395264282e-08,
+ "loss": 0.5868,
+ "step": 2983
+ },
+ {
+ "epoch": 2.984839486105467,
+ "grad_norm": 0.4402853548526764,
+ "learning_rate": 9.284878985038604e-09,
+ "loss": 0.4754,
+ "step": 2984
+ },
+ {
+ "epoch": 2.985839767434591,
+ "grad_norm": 0.5035153031349182,
+ "learning_rate": 7.911394287452023e-09,
+ "loss": 0.5637,
+ "step": 2985
+ },
+ {
+ "epoch": 2.986840048763715,
+ "grad_norm": 0.4882700443267822,
+ "learning_rate": 6.647782811697312e-09,
+ "loss": 0.6263,
+ "step": 2986
+ },
+ {
+ "epoch": 2.9878403300928387,
+ "grad_norm": 0.4206259548664093,
+ "learning_rate": 5.494045946263793e-09,
+ "loss": 0.6474,
+ "step": 2987
+ },
+ {
+ "epoch": 2.9888406114219626,
+ "grad_norm": 0.5478120446205139,
+ "learning_rate": 4.4501849589040355e-09,
+ "loss": 0.6404,
+ "step": 2988
+ },
+ {
+ "epoch": 2.9898408927510864,
+ "grad_norm": 0.4501127302646637,
+ "learning_rate": 3.5162009966227537e-09,
+ "loss": 0.5409,
+ "step": 2989
+ },
+ {
+ "epoch": 2.9908411740802103,
+ "grad_norm": 0.4742661416530609,
+ "learning_rate": 2.692095085699009e-09,
+ "loss": 0.6942,
+ "step": 2990
+ },
+ {
+ "epoch": 2.9918414554093338,
+ "grad_norm": 0.5313809514045715,
+ "learning_rate": 1.977868131675109e-09,
+ "loss": 0.5464,
+ "step": 2991
+ },
+ {
+ "epoch": 2.9928417367384577,
+ "grad_norm": 0.40790101885795593,
+ "learning_rate": 1.3735209193677102e-09,
+ "loss": 0.4489,
+ "step": 2992
+ },
+ {
+ "epoch": 2.9938420180675815,
+ "grad_norm": 0.4673601984977722,
+ "learning_rate": 8.790541128345098e-10,
+ "loss": 0.6204,
+ "step": 2993
+ },
+ {
+ "epoch": 2.9948422993967054,
+ "grad_norm": 0.5095663666725159,
+ "learning_rate": 4.944682554075542e-10,
+ "loss": 0.5743,
+ "step": 2994
+ },
+ {
+ "epoch": 2.995842580725829,
+ "grad_norm": 0.538153350353241,
+ "learning_rate": 2.1976376969323753e-10,
+ "loss": 0.6203,
+ "step": 2995
+ },
+ {
+ "epoch": 2.9968428620549528,
+ "grad_norm": 0.4830429255962372,
+ "learning_rate": 5.4940957516791404e-11,
+ "loss": 0.5864,
+ "step": 2996
+ },
+ {
+ "epoch": 2.9978431433840766,
+ "grad_norm": 0.5563502907752991,
+ "learning_rate": 0.0,
+ "loss": 0.6332,
+ "step": 2997
+ }
+ ],
+ "logging_steps": 1,
+ "max_steps": 2997,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 3,
+ "save_steps": 500,
+ "stateful_callbacks": {
+ "TrainerControl": {
+ "args": {
+ "should_epoch_stop": false,
+ "should_evaluate": false,
+ "should_log": false,
+ "should_save": true,
+ "should_training_stop": true
+ },
+ "attributes": {}
+ }
+ },
+ "total_flos": 1.9389665380417536e+17,
+ "train_batch_size": 2,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/checkpoint-2997/training_args.bin b/checkpoint-2997/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..2f5db54c03213459099040d14f85829b6aeb0666
--- /dev/null
+++ b/checkpoint-2997/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7cb43d78443117126e44061cb7a0c1f9a5c40f27f7bf1d5cd0232587a4334407
+size 5304
diff --git a/checkpoint-500/config.json b/checkpoint-500/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..06ec1df58f28234ccce22a5325e108ece94f0078
--- /dev/null
+++ b/checkpoint-500/config.json
@@ -0,0 +1,34 @@
+{
+ "_name_or_path": "facebook/nllb-200-3.3B",
+ "activation_dropout": 0.0,
+ "activation_function": "relu",
+ "architectures": [
+ "M2M100ForConditionalGeneration"
+ ],
+ "attention_dropout": 0.1,
+ "bos_token_id": 0,
+ "d_model": 2048,
+ "decoder_attention_heads": 16,
+ "decoder_ffn_dim": 8192,
+ "decoder_layerdrop": 0,
+ "decoder_layers": 24,
+ "decoder_start_token_id": 2,
+ "dropout": 0.1,
+ "encoder_attention_heads": 16,
+ "encoder_ffn_dim": 8192,
+ "encoder_layerdrop": 0,
+ "encoder_layers": 24,
+ "eos_token_id": 2,
+ "init_std": 0.02,
+ "is_encoder_decoder": true,
+ "max_length": 200,
+ "max_position_embeddings": 1024,
+ "model_type": "m2m_100",
+ "num_hidden_layers": 24,
+ "pad_token_id": 1,
+ "scale_embedding": true,
+ "torch_dtype": "float32",
+ "transformers_version": "4.43.2",
+ "use_cache": true,
+ "vocab_size": 256206
+}
diff --git a/checkpoint-500/generation_config.json b/checkpoint-500/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..402a1a43d1af8c080466b8139184b4e5b7f3f47c
--- /dev/null
+++ b/checkpoint-500/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 0,
+ "decoder_start_token_id": 2,
+ "eos_token_id": 2,
+ "max_length": 200,
+ "pad_token_id": 1,
+ "transformers_version": "4.43.2"
+}
diff --git a/checkpoint-500/model-00001-of-00003.safetensors b/checkpoint-500/model-00001-of-00003.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..d4ea6bf6304149e4d3e3caf5471adb01f1bb7a97
--- /dev/null
+++ b/checkpoint-500/model-00001-of-00003.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1322fc5e714165adc5003b78fb7a3e4d610003a87d65b681979016c1eed3bb19
+size 4986088344
diff --git a/checkpoint-500/model-00002-of-00003.safetensors b/checkpoint-500/model-00002-of-00003.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..0b615a793d0825c291b30f1d671e122e11453a1a
--- /dev/null
+++ b/checkpoint-500/model-00002-of-00003.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:aab201ea7fa73e0a69d25af44197d5e4703dde7643fb8512088ab26e20e7c294
+size 4985688360
diff --git a/checkpoint-500/model-00003-of-00003.safetensors b/checkpoint-500/model-00003-of-00003.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..7d2f59510a26f3891f71ad05138b75ebe343d4be
--- /dev/null
+++ b/checkpoint-500/model-00003-of-00003.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ca63e9149d65cfd0366ae273ad0891d029a708f8a1e4ce7913e49ba09c136bab
+size 3407796744
diff --git a/checkpoint-500/model.safetensors.index.json b/checkpoint-500/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..66f5db79b23230955de24502c00adc6525edbdfc
--- /dev/null
+++ b/checkpoint-500/model.safetensors.index.json
@@ -0,0 +1,1020 @@
+{
+ "metadata": {
+ "total_size": 13379452928
+ },
+ "weight_map": {
+ "model.decoder.layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.0.encoder_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.encoder_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.encoder_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.encoder_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.encoder_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.encoder_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.encoder_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.encoder_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.encoder_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.encoder_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.0.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.encoder_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.encoder_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.encoder_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.encoder_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.encoder_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.encoder_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.encoder_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.encoder_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.encoder_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.encoder_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.1.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.encoder_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.encoder_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.encoder_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.encoder_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.encoder_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.encoder_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.encoder_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.encoder_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.encoder_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.encoder_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.10.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.11.encoder_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.11.encoder_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.11.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.11.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.11.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.11.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.11.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.11.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.11.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.11.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.11.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.11.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.11.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.12.encoder_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.encoder_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.self_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.self_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.self_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.12.self_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.encoder_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.encoder_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.self_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.self_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.self_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.13.self_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.encoder_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.encoder_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.self_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.self_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.self_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.14.self_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.encoder_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.encoder_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.self_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.self_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.self_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.15.self_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.encoder_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.encoder_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.self_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.self_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.self_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.16.self_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.encoder_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.encoder_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.self_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.self_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.self_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.17.self_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.encoder_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.encoder_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.self_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.self_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.self_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.18.self_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.encoder_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.encoder_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.self_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.self_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.self_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.19.self_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.2.encoder_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.encoder_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.encoder_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.encoder_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.encoder_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.encoder_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.encoder_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.encoder_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.encoder_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.encoder_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.2.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.20.encoder_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.encoder_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.self_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.self_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.self_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.20.self_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.encoder_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.encoder_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.self_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.self_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.self_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.21.self_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.encoder_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.encoder_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.self_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.self_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.self_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.22.self_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.encoder_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.encoder_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.encoder_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.encoder_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.encoder_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.encoder_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.encoder_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.encoder_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.encoder_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.encoder_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.fc1.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.fc1.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.fc2.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.fc2.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.final_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.final_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.self_attn.out_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.self_attn.out_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.self_attn_layer_norm.bias": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.23.self_attn_layer_norm.weight": "model-00003-of-00003.safetensors",
+ "model.decoder.layers.3.encoder_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.encoder_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.encoder_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.encoder_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.encoder_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.encoder_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.encoder_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.encoder_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.encoder_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.encoder_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.3.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.encoder_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.encoder_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.encoder_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.encoder_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.encoder_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.encoder_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.encoder_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.encoder_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.encoder_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.encoder_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.4.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.encoder_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.encoder_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.encoder_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.encoder_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.encoder_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.encoder_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.encoder_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.encoder_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.encoder_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.encoder_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.5.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.encoder_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.encoder_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.encoder_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.encoder_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.encoder_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.encoder_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.encoder_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.encoder_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.encoder_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.encoder_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.6.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.encoder_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.encoder_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.encoder_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.encoder_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.encoder_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.encoder_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.encoder_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.encoder_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.encoder_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.encoder_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.7.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.encoder_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.encoder_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.encoder_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.encoder_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.encoder_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.encoder_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.encoder_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.encoder_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.encoder_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.encoder_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.8.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.encoder_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.encoder_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.encoder_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.encoder_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.encoder_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.encoder_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.encoder_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.encoder_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.encoder_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.encoder_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.decoder.layers.9.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.0.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.0.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.1.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.10.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.11.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.12.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.13.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.14.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.14.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.14.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.14.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.14.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.14.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.14.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.14.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.14.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.14.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.14.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.14.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.14.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.14.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.14.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.14.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.15.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.15.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.16.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.17.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.18.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.19.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.2.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.2.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.20.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.20.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.21.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.22.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.fc1.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.fc1.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.fc2.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.fc2.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.final_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.final_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.self_attn_layer_norm.bias": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.23.self_attn_layer_norm.weight": "model-00002-of-00003.safetensors",
+ "model.encoder.layers.3.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.3.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.4.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.5.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.6.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.7.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.8.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.fc1.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.fc1.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.fc2.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.fc2.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.final_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.final_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.self_attn_layer_norm.bias": "model-00001-of-00003.safetensors",
+ "model.encoder.layers.9.self_attn_layer_norm.weight": "model-00001-of-00003.safetensors",
+ "model.shared.weight": "model-00001-of-00003.safetensors"
+ }
+}
diff --git a/checkpoint-500/optimizer.pt b/checkpoint-500/optimizer.pt
new file mode 100644
index 0000000000000000000000000000000000000000..55b006551b178dab3388b361dae402818f27f36d
--- /dev/null
+++ b/checkpoint-500/optimizer.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4ca7bc98982fe0dfa28c1099c37409b03eab98a654bf7215c7b77d6cc08c9356
+size 16695613
diff --git a/checkpoint-500/rng_state.pth b/checkpoint-500/rng_state.pth
new file mode 100644
index 0000000000000000000000000000000000000000..1585c779a582598854cae7f7d7fcb528ca25004f
--- /dev/null
+++ b/checkpoint-500/rng_state.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:aeaa28e985178872577ac5bd4330c4d8cf9ae42ac066242cb31778b01ff260ec
+size 14244
diff --git a/checkpoint-500/scheduler.pt b/checkpoint-500/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..9cee5592ce4571aaf187e5914e2a5061190c309f
--- /dev/null
+++ b/checkpoint-500/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:dc10672965fd5a4faf482f9128558b0f5647fbb1b90caeb7d1201dc785e0b337
+size 1064
diff --git a/checkpoint-500/sentencepiece.bpe.model b/checkpoint-500/sentencepiece.bpe.model
new file mode 100644
index 0000000000000000000000000000000000000000..dc2262d3e1d375b235eb71c24119c8e73f85d4ad
--- /dev/null
+++ b/checkpoint-500/sentencepiece.bpe.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:14bb8dfb35c0ffdea7bc01e56cea38b9e3d5efcdcb9c251d6b40538e1aab555a
+size 4852054
diff --git a/checkpoint-500/special_tokens_map.json b/checkpoint-500/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..770c6f4e25faf27bbc3878b806f2ecfb88c5169e
--- /dev/null
+++ b/checkpoint-500/special_tokens_map.json
@@ -0,0 +1,255 @@
+{
+ "additional_special_tokens": [
+ "ace_Arab",
+ "ace_Latn",
+ "acm_Arab",
+ "acq_Arab",
+ "aeb_Arab",
+ "afr_Latn",
+ "ajp_Arab",
+ "aka_Latn",
+ "amh_Ethi",
+ "apc_Arab",
+ "arb_Arab",
+ "ars_Arab",
+ "ary_Arab",
+ "arz_Arab",
+ "asm_Beng",
+ "ast_Latn",
+ "awa_Deva",
+ "ayr_Latn",
+ "azb_Arab",
+ "azj_Latn",
+ "bak_Cyrl",
+ "bam_Latn",
+ "ban_Latn",
+ "bel_Cyrl",
+ "bem_Latn",
+ "ben_Beng",
+ "bho_Deva",
+ "bjn_Arab",
+ "bjn_Latn",
+ "bod_Tibt",
+ "bos_Latn",
+ "bug_Latn",
+ "bul_Cyrl",
+ "cat_Latn",
+ "ceb_Latn",
+ "ces_Latn",
+ "cjk_Latn",
+ "ckb_Arab",
+ "crh_Latn",
+ "cym_Latn",
+ "dan_Latn",
+ "deu_Latn",
+ "dik_Latn",
+ "dyu_Latn",
+ "dzo_Tibt",
+ "ell_Grek",
+ "eng_Latn",
+ "epo_Latn",
+ "est_Latn",
+ "eus_Latn",
+ "ewe_Latn",
+ "fao_Latn",
+ "pes_Arab",
+ "fij_Latn",
+ "fin_Latn",
+ "fon_Latn",
+ "fra_Latn",
+ "fur_Latn",
+ "fuv_Latn",
+ "gla_Latn",
+ "gle_Latn",
+ "glg_Latn",
+ "grn_Latn",
+ "guj_Gujr",
+ "hat_Latn",
+ "hau_Latn",
+ "heb_Hebr",
+ "hin_Deva",
+ "hne_Deva",
+ "hrv_Latn",
+ "hun_Latn",
+ "hye_Armn",
+ "ibo_Latn",
+ "ilo_Latn",
+ "ind_Latn",
+ "isl_Latn",
+ "ita_Latn",
+ "jav_Latn",
+ "jpn_Jpan",
+ "kab_Latn",
+ "kac_Latn",
+ "kam_Latn",
+ "kan_Knda",
+ "kas_Arab",
+ "kas_Deva",
+ "kat_Geor",
+ "knc_Arab",
+ "knc_Latn",
+ "kaz_Cyrl",
+ "kbp_Latn",
+ "kea_Latn",
+ "khm_Khmr",
+ "kik_Latn",
+ "kin_Latn",
+ "kir_Cyrl",
+ "kmb_Latn",
+ "kon_Latn",
+ "kor_Hang",
+ "kmr_Latn",
+ "lao_Laoo",
+ "lvs_Latn",
+ "lij_Latn",
+ "lim_Latn",
+ "lin_Latn",
+ "lit_Latn",
+ "lmo_Latn",
+ "ltg_Latn",
+ "ltz_Latn",
+ "lua_Latn",
+ "lug_Latn",
+ "luo_Latn",
+ "lus_Latn",
+ "mag_Deva",
+ "mai_Deva",
+ "mal_Mlym",
+ "mar_Deva",
+ "min_Latn",
+ "mkd_Cyrl",
+ "plt_Latn",
+ "mlt_Latn",
+ "mni_Beng",
+ "khk_Cyrl",
+ "mos_Latn",
+ "mri_Latn",
+ "zsm_Latn",
+ "mya_Mymr",
+ "nld_Latn",
+ "nno_Latn",
+ "nob_Latn",
+ "npi_Deva",
+ "nso_Latn",
+ "nus_Latn",
+ "nya_Latn",
+ "oci_Latn",
+ "gaz_Latn",
+ "ory_Orya",
+ "pag_Latn",
+ "pan_Guru",
+ "pap_Latn",
+ "pol_Latn",
+ "por_Latn",
+ "prs_Arab",
+ "pbt_Arab",
+ "quy_Latn",
+ "ron_Latn",
+ "run_Latn",
+ "rus_Cyrl",
+ "sag_Latn",
+ "san_Deva",
+ "sat_Beng",
+ "scn_Latn",
+ "shn_Mymr",
+ "sin_Sinh",
+ "slk_Latn",
+ "slv_Latn",
+ "smo_Latn",
+ "sna_Latn",
+ "snd_Arab",
+ "som_Latn",
+ "sot_Latn",
+ "spa_Latn",
+ "als_Latn",
+ "srd_Latn",
+ "srp_Cyrl",
+ "ssw_Latn",
+ "sun_Latn",
+ "swe_Latn",
+ "swh_Latn",
+ "szl_Latn",
+ "tam_Taml",
+ "tat_Cyrl",
+ "tel_Telu",
+ "tgk_Cyrl",
+ "tgl_Latn",
+ "tha_Thai",
+ "tir_Ethi",
+ "taq_Latn",
+ "taq_Tfng",
+ "tpi_Latn",
+ "tsn_Latn",
+ "tso_Latn",
+ "tuk_Latn",
+ "tum_Latn",
+ "tur_Latn",
+ "twi_Latn",
+ "tzm_Tfng",
+ "uig_Arab",
+ "ukr_Cyrl",
+ "umb_Latn",
+ "urd_Arab",
+ "uzn_Latn",
+ "vec_Latn",
+ "vie_Latn",
+ "war_Latn",
+ "wol_Latn",
+ "xho_Latn",
+ "ydd_Hebr",
+ "yor_Latn",
+ "yue_Hant",
+ "zho_Hans",
+ "zho_Hant",
+ "zul_Latn"
+ ],
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "cls_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "mask_token": {
+ "content": "",
+ "lstrip": true,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "sep_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/checkpoint-500/tokenizer.json b/checkpoint-500/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..98050e98b98364c06d83b3f41864076220cb8408
--- /dev/null
+++ b/checkpoint-500/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3b39b25b0763a1dd69dec54081fafcf10770d9f2538a3bd975a0c4be6d60a9c2
+size 17331294
diff --git a/checkpoint-500/tokenizer_config.json b/checkpoint-500/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f1424d3657c008568198b44be241646482e7e9f2
--- /dev/null
+++ b/checkpoint-500/tokenizer_config.json
@@ -0,0 +1,1878 @@
+{
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "3": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256001": {
+ "content": "ace_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256002": {
+ "content": "ace_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256003": {
+ "content": "acm_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256004": {
+ "content": "acq_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256005": {
+ "content": "aeb_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256006": {
+ "content": "afr_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256007": {
+ "content": "ajp_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256008": {
+ "content": "aka_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256009": {
+ "content": "amh_Ethi",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256010": {
+ "content": "apc_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256011": {
+ "content": "arb_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256012": {
+ "content": "ars_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256013": {
+ "content": "ary_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256014": {
+ "content": "arz_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256015": {
+ "content": "asm_Beng",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256016": {
+ "content": "ast_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256017": {
+ "content": "awa_Deva",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256018": {
+ "content": "ayr_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256019": {
+ "content": "azb_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256020": {
+ "content": "azj_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256021": {
+ "content": "bak_Cyrl",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256022": {
+ "content": "bam_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256023": {
+ "content": "ban_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256024": {
+ "content": "bel_Cyrl",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256025": {
+ "content": "bem_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256026": {
+ "content": "ben_Beng",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256027": {
+ "content": "bho_Deva",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256028": {
+ "content": "bjn_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256029": {
+ "content": "bjn_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256030": {
+ "content": "bod_Tibt",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256031": {
+ "content": "bos_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256032": {
+ "content": "bug_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256033": {
+ "content": "bul_Cyrl",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256034": {
+ "content": "cat_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256035": {
+ "content": "ceb_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256036": {
+ "content": "ces_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256037": {
+ "content": "cjk_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256038": {
+ "content": "ckb_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256039": {
+ "content": "crh_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256040": {
+ "content": "cym_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256041": {
+ "content": "dan_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256042": {
+ "content": "deu_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256043": {
+ "content": "dik_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256044": {
+ "content": "dyu_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256045": {
+ "content": "dzo_Tibt",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256046": {
+ "content": "ell_Grek",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256047": {
+ "content": "eng_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256048": {
+ "content": "epo_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256049": {
+ "content": "est_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256050": {
+ "content": "eus_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256051": {
+ "content": "ewe_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256052": {
+ "content": "fao_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256053": {
+ "content": "pes_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256054": {
+ "content": "fij_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256055": {
+ "content": "fin_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256056": {
+ "content": "fon_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256057": {
+ "content": "fra_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256058": {
+ "content": "fur_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256059": {
+ "content": "fuv_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256060": {
+ "content": "gla_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256061": {
+ "content": "gle_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256062": {
+ "content": "glg_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256063": {
+ "content": "grn_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256064": {
+ "content": "guj_Gujr",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256065": {
+ "content": "hat_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256066": {
+ "content": "hau_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256067": {
+ "content": "heb_Hebr",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256068": {
+ "content": "hin_Deva",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256069": {
+ "content": "hne_Deva",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256070": {
+ "content": "hrv_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256071": {
+ "content": "hun_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256072": {
+ "content": "hye_Armn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256073": {
+ "content": "ibo_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256074": {
+ "content": "ilo_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256075": {
+ "content": "ind_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256076": {
+ "content": "isl_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256077": {
+ "content": "ita_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256078": {
+ "content": "jav_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256079": {
+ "content": "jpn_Jpan",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256080": {
+ "content": "kab_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256081": {
+ "content": "kac_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256082": {
+ "content": "kam_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256083": {
+ "content": "kan_Knda",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256084": {
+ "content": "kas_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256085": {
+ "content": "kas_Deva",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256086": {
+ "content": "kat_Geor",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256087": {
+ "content": "knc_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256088": {
+ "content": "knc_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256089": {
+ "content": "kaz_Cyrl",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256090": {
+ "content": "kbp_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256091": {
+ "content": "kea_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256092": {
+ "content": "khm_Khmr",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256093": {
+ "content": "kik_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256094": {
+ "content": "kin_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256095": {
+ "content": "kir_Cyrl",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256096": {
+ "content": "kmb_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256097": {
+ "content": "kon_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256098": {
+ "content": "kor_Hang",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256099": {
+ "content": "kmr_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256100": {
+ "content": "lao_Laoo",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256101": {
+ "content": "lvs_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256102": {
+ "content": "lij_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256103": {
+ "content": "lim_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256104": {
+ "content": "lin_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256105": {
+ "content": "lit_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256106": {
+ "content": "lmo_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256107": {
+ "content": "ltg_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256108": {
+ "content": "ltz_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256109": {
+ "content": "lua_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256110": {
+ "content": "lug_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256111": {
+ "content": "luo_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256112": {
+ "content": "lus_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256113": {
+ "content": "mag_Deva",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256114": {
+ "content": "mai_Deva",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256115": {
+ "content": "mal_Mlym",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256116": {
+ "content": "mar_Deva",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256117": {
+ "content": "min_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256118": {
+ "content": "mkd_Cyrl",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256119": {
+ "content": "plt_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256120": {
+ "content": "mlt_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256121": {
+ "content": "mni_Beng",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256122": {
+ "content": "khk_Cyrl",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256123": {
+ "content": "mos_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256124": {
+ "content": "mri_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256125": {
+ "content": "zsm_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256126": {
+ "content": "mya_Mymr",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256127": {
+ "content": "nld_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256128": {
+ "content": "nno_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256129": {
+ "content": "nob_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256130": {
+ "content": "npi_Deva",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256131": {
+ "content": "nso_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256132": {
+ "content": "nus_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256133": {
+ "content": "nya_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256134": {
+ "content": "oci_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256135": {
+ "content": "gaz_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256136": {
+ "content": "ory_Orya",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256137": {
+ "content": "pag_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256138": {
+ "content": "pan_Guru",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256139": {
+ "content": "pap_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256140": {
+ "content": "pol_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256141": {
+ "content": "por_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256142": {
+ "content": "prs_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256143": {
+ "content": "pbt_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256144": {
+ "content": "quy_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256145": {
+ "content": "ron_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256146": {
+ "content": "run_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256147": {
+ "content": "rus_Cyrl",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256148": {
+ "content": "sag_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256149": {
+ "content": "san_Deva",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256150": {
+ "content": "sat_Beng",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256151": {
+ "content": "scn_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256152": {
+ "content": "shn_Mymr",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256153": {
+ "content": "sin_Sinh",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256154": {
+ "content": "slk_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256155": {
+ "content": "slv_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256156": {
+ "content": "smo_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256157": {
+ "content": "sna_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256158": {
+ "content": "snd_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256159": {
+ "content": "som_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256160": {
+ "content": "sot_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256161": {
+ "content": "spa_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256162": {
+ "content": "als_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256163": {
+ "content": "srd_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256164": {
+ "content": "srp_Cyrl",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256165": {
+ "content": "ssw_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256166": {
+ "content": "sun_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256167": {
+ "content": "swe_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256168": {
+ "content": "swh_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256169": {
+ "content": "szl_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256170": {
+ "content": "tam_Taml",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256171": {
+ "content": "tat_Cyrl",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256172": {
+ "content": "tel_Telu",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256173": {
+ "content": "tgk_Cyrl",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256174": {
+ "content": "tgl_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256175": {
+ "content": "tha_Thai",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256176": {
+ "content": "tir_Ethi",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256177": {
+ "content": "taq_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256178": {
+ "content": "taq_Tfng",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256179": {
+ "content": "tpi_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256180": {
+ "content": "tsn_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256181": {
+ "content": "tso_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256182": {
+ "content": "tuk_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256183": {
+ "content": "tum_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256184": {
+ "content": "tur_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256185": {
+ "content": "twi_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256186": {
+ "content": "tzm_Tfng",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256187": {
+ "content": "uig_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256188": {
+ "content": "ukr_Cyrl",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256189": {
+ "content": "umb_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256190": {
+ "content": "urd_Arab",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256191": {
+ "content": "uzn_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256192": {
+ "content": "vec_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256193": {
+ "content": "vie_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256194": {
+ "content": "war_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256195": {
+ "content": "wol_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256196": {
+ "content": "xho_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256197": {
+ "content": "ydd_Hebr",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256198": {
+ "content": "yor_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256199": {
+ "content": "yue_Hant",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256200": {
+ "content": "zho_Hans",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256201": {
+ "content": "zho_Hant",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256202": {
+ "content": "zul_Latn",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "256203": {
+ "content": "",
+ "lstrip": true,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "additional_special_tokens": [
+ "ace_Arab",
+ "ace_Latn",
+ "acm_Arab",
+ "acq_Arab",
+ "aeb_Arab",
+ "afr_Latn",
+ "ajp_Arab",
+ "aka_Latn",
+ "amh_Ethi",
+ "apc_Arab",
+ "arb_Arab",
+ "ars_Arab",
+ "ary_Arab",
+ "arz_Arab",
+ "asm_Beng",
+ "ast_Latn",
+ "awa_Deva",
+ "ayr_Latn",
+ "azb_Arab",
+ "azj_Latn",
+ "bak_Cyrl",
+ "bam_Latn",
+ "ban_Latn",
+ "bel_Cyrl",
+ "bem_Latn",
+ "ben_Beng",
+ "bho_Deva",
+ "bjn_Arab",
+ "bjn_Latn",
+ "bod_Tibt",
+ "bos_Latn",
+ "bug_Latn",
+ "bul_Cyrl",
+ "cat_Latn",
+ "ceb_Latn",
+ "ces_Latn",
+ "cjk_Latn",
+ "ckb_Arab",
+ "crh_Latn",
+ "cym_Latn",
+ "dan_Latn",
+ "deu_Latn",
+ "dik_Latn",
+ "dyu_Latn",
+ "dzo_Tibt",
+ "ell_Grek",
+ "eng_Latn",
+ "epo_Latn",
+ "est_Latn",
+ "eus_Latn",
+ "ewe_Latn",
+ "fao_Latn",
+ "pes_Arab",
+ "fij_Latn",
+ "fin_Latn",
+ "fon_Latn",
+ "fra_Latn",
+ "fur_Latn",
+ "fuv_Latn",
+ "gla_Latn",
+ "gle_Latn",
+ "glg_Latn",
+ "grn_Latn",
+ "guj_Gujr",
+ "hat_Latn",
+ "hau_Latn",
+ "heb_Hebr",
+ "hin_Deva",
+ "hne_Deva",
+ "hrv_Latn",
+ "hun_Latn",
+ "hye_Armn",
+ "ibo_Latn",
+ "ilo_Latn",
+ "ind_Latn",
+ "isl_Latn",
+ "ita_Latn",
+ "jav_Latn",
+ "jpn_Jpan",
+ "kab_Latn",
+ "kac_Latn",
+ "kam_Latn",
+ "kan_Knda",
+ "kas_Arab",
+ "kas_Deva",
+ "kat_Geor",
+ "knc_Arab",
+ "knc_Latn",
+ "kaz_Cyrl",
+ "kbp_Latn",
+ "kea_Latn",
+ "khm_Khmr",
+ "kik_Latn",
+ "kin_Latn",
+ "kir_Cyrl",
+ "kmb_Latn",
+ "kon_Latn",
+ "kor_Hang",
+ "kmr_Latn",
+ "lao_Laoo",
+ "lvs_Latn",
+ "lij_Latn",
+ "lim_Latn",
+ "lin_Latn",
+ "lit_Latn",
+ "lmo_Latn",
+ "ltg_Latn",
+ "ltz_Latn",
+ "lua_Latn",
+ "lug_Latn",
+ "luo_Latn",
+ "lus_Latn",
+ "mag_Deva",
+ "mai_Deva",
+ "mal_Mlym",
+ "mar_Deva",
+ "min_Latn",
+ "mkd_Cyrl",
+ "plt_Latn",
+ "mlt_Latn",
+ "mni_Beng",
+ "khk_Cyrl",
+ "mos_Latn",
+ "mri_Latn",
+ "zsm_Latn",
+ "mya_Mymr",
+ "nld_Latn",
+ "nno_Latn",
+ "nob_Latn",
+ "npi_Deva",
+ "nso_Latn",
+ "nus_Latn",
+ "nya_Latn",
+ "oci_Latn",
+ "gaz_Latn",
+ "ory_Orya",
+ "pag_Latn",
+ "pan_Guru",
+ "pap_Latn",
+ "pol_Latn",
+ "por_Latn",
+ "prs_Arab",
+ "pbt_Arab",
+ "quy_Latn",
+ "ron_Latn",
+ "run_Latn",
+ "rus_Cyrl",
+ "sag_Latn",
+ "san_Deva",
+ "sat_Beng",
+ "scn_Latn",
+ "shn_Mymr",
+ "sin_Sinh",
+ "slk_Latn",
+ "slv_Latn",
+ "smo_Latn",
+ "sna_Latn",
+ "snd_Arab",
+ "som_Latn",
+ "sot_Latn",
+ "spa_Latn",
+ "als_Latn",
+ "srd_Latn",
+ "srp_Cyrl",
+ "ssw_Latn",
+ "sun_Latn",
+ "swe_Latn",
+ "swh_Latn",
+ "szl_Latn",
+ "tam_Taml",
+ "tat_Cyrl",
+ "tel_Telu",
+ "tgk_Cyrl",
+ "tgl_Latn",
+ "tha_Thai",
+ "tir_Ethi",
+ "taq_Latn",
+ "taq_Tfng",
+ "tpi_Latn",
+ "tsn_Latn",
+ "tso_Latn",
+ "tuk_Latn",
+ "tum_Latn",
+ "tur_Latn",
+ "twi_Latn",
+ "tzm_Tfng",
+ "uig_Arab",
+ "ukr_Cyrl",
+ "umb_Latn",
+ "urd_Arab",
+ "uzn_Latn",
+ "vec_Latn",
+ "vie_Latn",
+ "war_Latn",
+ "wol_Latn",
+ "xho_Latn",
+ "ydd_Hebr",
+ "yor_Latn",
+ "yue_Hant",
+ "zho_Hans",
+ "zho_Hant",
+ "zul_Latn"
+ ],
+ "bos_token": "",
+ "clean_up_tokenization_spaces": true,
+ "cls_token": "",
+ "eos_token": "",
+ "legacy_behaviour": false,
+ "mask_token": "",
+ "model_max_length": 1024,
+ "pad_token": "",
+ "sep_token": "",
+ "sp_model_kwargs": {},
+ "src_lang": "eng_Latn",
+ "tgt_lang": null,
+ "tokenizer_class": "NllbTokenizer",
+ "unk_token": ""
+}
diff --git a/checkpoint-500/trainer_state.json b/checkpoint-500/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..226503cddae7f347efe6a83de37503d640f4023a
--- /dev/null
+++ b/checkpoint-500/trainer_state.json
@@ -0,0 +1,3533 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 0.5001406645619081,
+ "eval_steps": 500,
+ "global_step": 500,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.001000281329123816,
+ "grad_norm": 5.902005195617676,
+ "learning_rate": 0.0001999999450590425,
+ "loss": 3.1875,
+ "step": 1
+ },
+ {
+ "epoch": 0.002000562658247632,
+ "grad_norm": 3.2577760219573975,
+ "learning_rate": 0.00019999978023623033,
+ "loss": 2.3666,
+ "step": 2
+ },
+ {
+ "epoch": 0.003000843987371448,
+ "grad_norm": 5.3700995445251465,
+ "learning_rate": 0.0001999995055317446,
+ "loss": 2.8282,
+ "step": 3
+ },
+ {
+ "epoch": 0.004001125316495264,
+ "grad_norm": 2.1445534229278564,
+ "learning_rate": 0.00019999912094588717,
+ "loss": 2.2322,
+ "step": 4
+ },
+ {
+ "epoch": 0.005001406645619081,
+ "grad_norm": 1.5143821239471436,
+ "learning_rate": 0.00019999862647908064,
+ "loss": 2.1709,
+ "step": 5
+ },
+ {
+ "epoch": 0.006001687974742896,
+ "grad_norm": 2.0491714477539062,
+ "learning_rate": 0.00019999802213186834,
+ "loss": 2.2863,
+ "step": 6
+ },
+ {
+ "epoch": 0.007001969303866712,
+ "grad_norm": 1.2016857862472534,
+ "learning_rate": 0.0001999973079049143,
+ "loss": 1.5595,
+ "step": 7
+ },
+ {
+ "epoch": 0.008002250632990529,
+ "grad_norm": 1.3860406875610352,
+ "learning_rate": 0.00019999648379900338,
+ "loss": 1.7264,
+ "step": 8
+ },
+ {
+ "epoch": 0.009002531962114344,
+ "grad_norm": 1.0861930847167969,
+ "learning_rate": 0.0001999955498150411,
+ "loss": 2.0533,
+ "step": 9
+ },
+ {
+ "epoch": 0.010002813291238161,
+ "grad_norm": 2.233243703842163,
+ "learning_rate": 0.00019999450595405374,
+ "loss": 1.9378,
+ "step": 10
+ },
+ {
+ "epoch": 0.011003094620361977,
+ "grad_norm": 1.302808165550232,
+ "learning_rate": 0.0001999933522171883,
+ "loss": 1.9182,
+ "step": 11
+ },
+ {
+ "epoch": 0.012003375949485792,
+ "grad_norm": 0.8285257816314697,
+ "learning_rate": 0.00019999208860571255,
+ "loss": 1.9146,
+ "step": 12
+ },
+ {
+ "epoch": 0.01300365727860961,
+ "grad_norm": 1.2248319387435913,
+ "learning_rate": 0.00019999071512101496,
+ "loss": 1.7467,
+ "step": 13
+ },
+ {
+ "epoch": 0.014003938607733425,
+ "grad_norm": 0.8307135105133057,
+ "learning_rate": 0.00019998923176460474,
+ "loss": 1.6896,
+ "step": 14
+ },
+ {
+ "epoch": 0.01500421993685724,
+ "grad_norm": 1.1531301736831665,
+ "learning_rate": 0.00019998763853811184,
+ "loss": 1.7549,
+ "step": 15
+ },
+ {
+ "epoch": 0.016004501265981057,
+ "grad_norm": 1.0071958303451538,
+ "learning_rate": 0.00019998593544328692,
+ "loss": 1.903,
+ "step": 16
+ },
+ {
+ "epoch": 0.017004782595104875,
+ "grad_norm": 0.9111937284469604,
+ "learning_rate": 0.00019998412248200138,
+ "loss": 1.8372,
+ "step": 17
+ },
+ {
+ "epoch": 0.01800506392422869,
+ "grad_norm": 0.9943836331367493,
+ "learning_rate": 0.00019998219965624734,
+ "loss": 1.7304,
+ "step": 18
+ },
+ {
+ "epoch": 0.019005345253352506,
+ "grad_norm": 0.8139007687568665,
+ "learning_rate": 0.0001999801669681376,
+ "loss": 1.6932,
+ "step": 19
+ },
+ {
+ "epoch": 0.020005626582476323,
+ "grad_norm": 0.7991273999214172,
+ "learning_rate": 0.00019997802441990573,
+ "loss": 1.9596,
+ "step": 20
+ },
+ {
+ "epoch": 0.021005907911600136,
+ "grad_norm": 0.832266628742218,
+ "learning_rate": 0.00019997577201390606,
+ "loss": 1.7116,
+ "step": 21
+ },
+ {
+ "epoch": 0.022006189240723954,
+ "grad_norm": 0.8465655446052551,
+ "learning_rate": 0.00019997340975261353,
+ "loss": 1.7711,
+ "step": 22
+ },
+ {
+ "epoch": 0.02300647056984777,
+ "grad_norm": 1.032426118850708,
+ "learning_rate": 0.00019997093763862383,
+ "loss": 1.6746,
+ "step": 23
+ },
+ {
+ "epoch": 0.024006751898971584,
+ "grad_norm": 1.0036743879318237,
+ "learning_rate": 0.0001999683556746534,
+ "loss": 1.7274,
+ "step": 24
+ },
+ {
+ "epoch": 0.0250070332280954,
+ "grad_norm": 0.9491412043571472,
+ "learning_rate": 0.0001999656638635393,
+ "loss": 2.0302,
+ "step": 25
+ },
+ {
+ "epoch": 0.02600731455721922,
+ "grad_norm": 0.9477822184562683,
+ "learning_rate": 0.0001999628622082394,
+ "loss": 1.6107,
+ "step": 26
+ },
+ {
+ "epoch": 0.027007595886343033,
+ "grad_norm": 1.0687041282653809,
+ "learning_rate": 0.0001999599507118322,
+ "loss": 1.8225,
+ "step": 27
+ },
+ {
+ "epoch": 0.02800787721546685,
+ "grad_norm": 1.6572712659835815,
+ "learning_rate": 0.00019995692937751683,
+ "loss": 1.896,
+ "step": 28
+ },
+ {
+ "epoch": 0.029008158544590667,
+ "grad_norm": 1.013258695602417,
+ "learning_rate": 0.0001999537982086133,
+ "loss": 1.7847,
+ "step": 29
+ },
+ {
+ "epoch": 0.03000843987371448,
+ "grad_norm": 0.7584932446479797,
+ "learning_rate": 0.00019995055720856218,
+ "loss": 1.5841,
+ "step": 30
+ },
+ {
+ "epoch": 0.031008721202838298,
+ "grad_norm": 1.1543537378311157,
+ "learning_rate": 0.00019994720638092468,
+ "loss": 1.8362,
+ "step": 31
+ },
+ {
+ "epoch": 0.032009002531962115,
+ "grad_norm": 0.8389608860015869,
+ "learning_rate": 0.00019994374572938277,
+ "loss": 1.7913,
+ "step": 32
+ },
+ {
+ "epoch": 0.03300928386108593,
+ "grad_norm": 0.7582125663757324,
+ "learning_rate": 0.00019994017525773913,
+ "loss": 1.5406,
+ "step": 33
+ },
+ {
+ "epoch": 0.03400956519020975,
+ "grad_norm": 0.7866935133934021,
+ "learning_rate": 0.00019993649496991705,
+ "loss": 1.5363,
+ "step": 34
+ },
+ {
+ "epoch": 0.03500984651933356,
+ "grad_norm": 0.8007768988609314,
+ "learning_rate": 0.00019993270486996046,
+ "loss": 1.7597,
+ "step": 35
+ },
+ {
+ "epoch": 0.03601012784845738,
+ "grad_norm": 0.8109031319618225,
+ "learning_rate": 0.000199928804962034,
+ "loss": 1.5554,
+ "step": 36
+ },
+ {
+ "epoch": 0.037010409177581194,
+ "grad_norm": 0.7722628116607666,
+ "learning_rate": 0.00019992479525042303,
+ "loss": 1.6437,
+ "step": 37
+ },
+ {
+ "epoch": 0.03801069050670501,
+ "grad_norm": 0.7336480021476746,
+ "learning_rate": 0.00019992067573953342,
+ "loss": 1.7276,
+ "step": 38
+ },
+ {
+ "epoch": 0.03901097183582883,
+ "grad_norm": 0.6940280795097351,
+ "learning_rate": 0.0001999164464338918,
+ "loss": 1.846,
+ "step": 39
+ },
+ {
+ "epoch": 0.040011253164952645,
+ "grad_norm": 0.7079702615737915,
+ "learning_rate": 0.0001999121073381454,
+ "loss": 1.7017,
+ "step": 40
+ },
+ {
+ "epoch": 0.041011534494076456,
+ "grad_norm": 0.7438498139381409,
+ "learning_rate": 0.0001999076584570621,
+ "loss": 1.665,
+ "step": 41
+ },
+ {
+ "epoch": 0.04201181582320027,
+ "grad_norm": 0.6951525211334229,
+ "learning_rate": 0.00019990309979553045,
+ "loss": 1.588,
+ "step": 42
+ },
+ {
+ "epoch": 0.04301209715232409,
+ "grad_norm": 0.9398604035377502,
+ "learning_rate": 0.00019989843135855958,
+ "loss": 1.6513,
+ "step": 43
+ },
+ {
+ "epoch": 0.04401237848144791,
+ "grad_norm": 0.7384347319602966,
+ "learning_rate": 0.00019989365315127922,
+ "loss": 1.5975,
+ "step": 44
+ },
+ {
+ "epoch": 0.045012659810571724,
+ "grad_norm": 0.9856846332550049,
+ "learning_rate": 0.0001998887651789398,
+ "loss": 1.644,
+ "step": 45
+ },
+ {
+ "epoch": 0.04601294113969554,
+ "grad_norm": 0.7322820425033569,
+ "learning_rate": 0.0001998837674469123,
+ "loss": 1.5207,
+ "step": 46
+ },
+ {
+ "epoch": 0.04701322246881936,
+ "grad_norm": 0.8695257902145386,
+ "learning_rate": 0.00019987865996068833,
+ "loss": 1.5572,
+ "step": 47
+ },
+ {
+ "epoch": 0.04801350379794317,
+ "grad_norm": 0.7231017351150513,
+ "learning_rate": 0.00019987344272588006,
+ "loss": 1.5841,
+ "step": 48
+ },
+ {
+ "epoch": 0.049013785127066986,
+ "grad_norm": 0.7147384285926819,
+ "learning_rate": 0.00019986811574822033,
+ "loss": 1.8628,
+ "step": 49
+ },
+ {
+ "epoch": 0.0500140664561908,
+ "grad_norm": 0.8631477355957031,
+ "learning_rate": 0.00019986267903356254,
+ "loss": 1.8487,
+ "step": 50
+ },
+ {
+ "epoch": 0.05101434778531462,
+ "grad_norm": 0.7995486855506897,
+ "learning_rate": 0.0001998571325878806,
+ "loss": 1.6491,
+ "step": 51
+ },
+ {
+ "epoch": 0.05201462911443844,
+ "grad_norm": 0.7828657031059265,
+ "learning_rate": 0.0001998514764172691,
+ "loss": 1.7496,
+ "step": 52
+ },
+ {
+ "epoch": 0.053014910443562255,
+ "grad_norm": 0.7789833545684814,
+ "learning_rate": 0.00019984571052794313,
+ "loss": 1.6628,
+ "step": 53
+ },
+ {
+ "epoch": 0.054015191772686065,
+ "grad_norm": 0.7077661752700806,
+ "learning_rate": 0.00019983983492623833,
+ "loss": 1.771,
+ "step": 54
+ },
+ {
+ "epoch": 0.05501547310180988,
+ "grad_norm": 0.7939582467079163,
+ "learning_rate": 0.00019983384961861096,
+ "loss": 1.707,
+ "step": 55
+ },
+ {
+ "epoch": 0.0560157544309337,
+ "grad_norm": 0.9438828229904175,
+ "learning_rate": 0.0001998277546116378,
+ "loss": 1.8334,
+ "step": 56
+ },
+ {
+ "epoch": 0.05701603576005752,
+ "grad_norm": 0.8028286695480347,
+ "learning_rate": 0.00019982154991201608,
+ "loss": 1.9117,
+ "step": 57
+ },
+ {
+ "epoch": 0.058016317089181334,
+ "grad_norm": 0.6563037037849426,
+ "learning_rate": 0.00019981523552656377,
+ "loss": 1.4767,
+ "step": 58
+ },
+ {
+ "epoch": 0.05901659841830515,
+ "grad_norm": 0.6600964665412903,
+ "learning_rate": 0.00019980881146221914,
+ "loss": 1.6656,
+ "step": 59
+ },
+ {
+ "epoch": 0.06001687974742896,
+ "grad_norm": 0.7966578602790833,
+ "learning_rate": 0.00019980227772604112,
+ "loss": 1.4844,
+ "step": 60
+ },
+ {
+ "epoch": 0.06101716107655278,
+ "grad_norm": 0.8019976615905762,
+ "learning_rate": 0.0001997956343252091,
+ "loss": 1.5682,
+ "step": 61
+ },
+ {
+ "epoch": 0.062017442405676595,
+ "grad_norm": 0.8935349583625793,
+ "learning_rate": 0.00019978888126702296,
+ "loss": 1.8131,
+ "step": 62
+ },
+ {
+ "epoch": 0.06301772373480041,
+ "grad_norm": 0.8085179924964905,
+ "learning_rate": 0.00019978201855890308,
+ "loss": 1.5602,
+ "step": 63
+ },
+ {
+ "epoch": 0.06401800506392423,
+ "grad_norm": 0.7631951570510864,
+ "learning_rate": 0.00019977504620839035,
+ "loss": 1.8008,
+ "step": 64
+ },
+ {
+ "epoch": 0.06501828639304805,
+ "grad_norm": 0.7315165996551514,
+ "learning_rate": 0.00019976796422314615,
+ "loss": 1.5735,
+ "step": 65
+ },
+ {
+ "epoch": 0.06601856772217186,
+ "grad_norm": 0.745726466178894,
+ "learning_rate": 0.00019976077261095226,
+ "loss": 1.5775,
+ "step": 66
+ },
+ {
+ "epoch": 0.06701884905129568,
+ "grad_norm": 0.9082249999046326,
+ "learning_rate": 0.00019975347137971098,
+ "loss": 1.7427,
+ "step": 67
+ },
+ {
+ "epoch": 0.0680191303804195,
+ "grad_norm": 0.6575669050216675,
+ "learning_rate": 0.00019974606053744503,
+ "loss": 1.5231,
+ "step": 68
+ },
+ {
+ "epoch": 0.06901941170954332,
+ "grad_norm": 0.7749233245849609,
+ "learning_rate": 0.00019973854009229763,
+ "loss": 1.5703,
+ "step": 69
+ },
+ {
+ "epoch": 0.07001969303866712,
+ "grad_norm": 0.7240824699401855,
+ "learning_rate": 0.00019973091005253232,
+ "loss": 1.5197,
+ "step": 70
+ },
+ {
+ "epoch": 0.07101997436779094,
+ "grad_norm": 0.8683856725692749,
+ "learning_rate": 0.0001997231704265332,
+ "loss": 1.6183,
+ "step": 71
+ },
+ {
+ "epoch": 0.07202025569691475,
+ "grad_norm": 0.6885640621185303,
+ "learning_rate": 0.00019971532122280464,
+ "loss": 1.6565,
+ "step": 72
+ },
+ {
+ "epoch": 0.07302053702603857,
+ "grad_norm": 0.6648329496383667,
+ "learning_rate": 0.0001997073624499716,
+ "loss": 1.5943,
+ "step": 73
+ },
+ {
+ "epoch": 0.07402081835516239,
+ "grad_norm": 0.8867416977882385,
+ "learning_rate": 0.0001996992941167792,
+ "loss": 1.7855,
+ "step": 74
+ },
+ {
+ "epoch": 0.0750210996842862,
+ "grad_norm": 0.7790491580963135,
+ "learning_rate": 0.00019969111623209323,
+ "loss": 1.6723,
+ "step": 75
+ },
+ {
+ "epoch": 0.07602138101341002,
+ "grad_norm": 0.7999201416969299,
+ "learning_rate": 0.00019968282880489957,
+ "loss": 1.5619,
+ "step": 76
+ },
+ {
+ "epoch": 0.07702166234253384,
+ "grad_norm": 0.6316407322883606,
+ "learning_rate": 0.00019967443184430467,
+ "loss": 1.6377,
+ "step": 77
+ },
+ {
+ "epoch": 0.07802194367165766,
+ "grad_norm": 0.7680445313453674,
+ "learning_rate": 0.0001996659253595353,
+ "loss": 1.5433,
+ "step": 78
+ },
+ {
+ "epoch": 0.07902222500078147,
+ "grad_norm": 0.7158446907997131,
+ "learning_rate": 0.0001996573093599385,
+ "loss": 1.5436,
+ "step": 79
+ },
+ {
+ "epoch": 0.08002250632990529,
+ "grad_norm": 0.7354825139045715,
+ "learning_rate": 0.00019964858385498172,
+ "loss": 1.6512,
+ "step": 80
+ },
+ {
+ "epoch": 0.08102278765902911,
+ "grad_norm": 0.7031210660934448,
+ "learning_rate": 0.00019963974885425266,
+ "loss": 1.6411,
+ "step": 81
+ },
+ {
+ "epoch": 0.08202306898815291,
+ "grad_norm": 0.8451045751571655,
+ "learning_rate": 0.00019963080436745945,
+ "loss": 1.6622,
+ "step": 82
+ },
+ {
+ "epoch": 0.08302335031727673,
+ "grad_norm": 0.8329521417617798,
+ "learning_rate": 0.00019962175040443044,
+ "loss": 1.9269,
+ "step": 83
+ },
+ {
+ "epoch": 0.08402363164640055,
+ "grad_norm": 0.6967645883560181,
+ "learning_rate": 0.0001996125869751143,
+ "loss": 1.7243,
+ "step": 84
+ },
+ {
+ "epoch": 0.08502391297552436,
+ "grad_norm": 0.8699042797088623,
+ "learning_rate": 0.00019960331408957997,
+ "loss": 1.7211,
+ "step": 85
+ },
+ {
+ "epoch": 0.08602419430464818,
+ "grad_norm": 0.6780512928962708,
+ "learning_rate": 0.00019959393175801671,
+ "loss": 1.6376,
+ "step": 86
+ },
+ {
+ "epoch": 0.087024475633772,
+ "grad_norm": 0.7213720679283142,
+ "learning_rate": 0.00019958443999073397,
+ "loss": 1.6048,
+ "step": 87
+ },
+ {
+ "epoch": 0.08802475696289581,
+ "grad_norm": 0.6077585816383362,
+ "learning_rate": 0.00019957483879816151,
+ "loss": 1.5231,
+ "step": 88
+ },
+ {
+ "epoch": 0.08902503829201963,
+ "grad_norm": 0.6854611039161682,
+ "learning_rate": 0.00019956512819084928,
+ "loss": 1.4726,
+ "step": 89
+ },
+ {
+ "epoch": 0.09002531962114345,
+ "grad_norm": 0.6969390511512756,
+ "learning_rate": 0.00019955530817946748,
+ "loss": 1.6435,
+ "step": 90
+ },
+ {
+ "epoch": 0.09102560095026727,
+ "grad_norm": 0.7178792953491211,
+ "learning_rate": 0.00019954537877480655,
+ "loss": 1.6967,
+ "step": 91
+ },
+ {
+ "epoch": 0.09202588227939108,
+ "grad_norm": 0.8248458504676819,
+ "learning_rate": 0.00019953533998777706,
+ "loss": 1.5884,
+ "step": 92
+ },
+ {
+ "epoch": 0.0930261636085149,
+ "grad_norm": 0.6472075581550598,
+ "learning_rate": 0.00019952519182940993,
+ "loss": 1.5074,
+ "step": 93
+ },
+ {
+ "epoch": 0.09402644493763872,
+ "grad_norm": 0.7548672556877136,
+ "learning_rate": 0.00019951493431085603,
+ "loss": 1.6934,
+ "step": 94
+ },
+ {
+ "epoch": 0.09502672626676252,
+ "grad_norm": 0.6680666208267212,
+ "learning_rate": 0.00019950456744338658,
+ "loss": 1.4014,
+ "step": 95
+ },
+ {
+ "epoch": 0.09602700759588634,
+ "grad_norm": 0.7270862460136414,
+ "learning_rate": 0.00019949409123839288,
+ "loss": 1.6991,
+ "step": 96
+ },
+ {
+ "epoch": 0.09702728892501016,
+ "grad_norm": 0.682833731174469,
+ "learning_rate": 0.00019948350570738642,
+ "loss": 1.4926,
+ "step": 97
+ },
+ {
+ "epoch": 0.09802757025413397,
+ "grad_norm": 0.6598315238952637,
+ "learning_rate": 0.0001994728108619987,
+ "loss": 1.6329,
+ "step": 98
+ },
+ {
+ "epoch": 0.09902785158325779,
+ "grad_norm": 0.830845832824707,
+ "learning_rate": 0.0001994620067139815,
+ "loss": 1.8517,
+ "step": 99
+ },
+ {
+ "epoch": 0.1000281329123816,
+ "grad_norm": 0.6964694857597351,
+ "learning_rate": 0.00019945109327520658,
+ "loss": 1.5459,
+ "step": 100
+ },
+ {
+ "epoch": 0.10102841424150542,
+ "grad_norm": 0.8640177249908447,
+ "learning_rate": 0.00019944007055766586,
+ "loss": 1.6638,
+ "step": 101
+ },
+ {
+ "epoch": 0.10202869557062924,
+ "grad_norm": 0.6485210657119751,
+ "learning_rate": 0.00019942893857347128,
+ "loss": 1.8025,
+ "step": 102
+ },
+ {
+ "epoch": 0.10302897689975306,
+ "grad_norm": 0.6746248006820679,
+ "learning_rate": 0.00019941769733485494,
+ "loss": 1.6954,
+ "step": 103
+ },
+ {
+ "epoch": 0.10402925822887688,
+ "grad_norm": 0.7386549115180969,
+ "learning_rate": 0.00019940634685416888,
+ "loss": 1.4547,
+ "step": 104
+ },
+ {
+ "epoch": 0.10502953955800069,
+ "grad_norm": 0.7518633008003235,
+ "learning_rate": 0.00019939488714388524,
+ "loss": 1.5098,
+ "step": 105
+ },
+ {
+ "epoch": 0.10602982088712451,
+ "grad_norm": 0.7350422739982605,
+ "learning_rate": 0.00019938331821659614,
+ "loss": 1.5452,
+ "step": 106
+ },
+ {
+ "epoch": 0.10703010221624833,
+ "grad_norm": 0.6544668674468994,
+ "learning_rate": 0.0001993716400850138,
+ "loss": 1.5106,
+ "step": 107
+ },
+ {
+ "epoch": 0.10803038354537213,
+ "grad_norm": 0.6108564138412476,
+ "learning_rate": 0.0001993598527619703,
+ "loss": 1.5818,
+ "step": 108
+ },
+ {
+ "epoch": 0.10903066487449595,
+ "grad_norm": 0.731071949005127,
+ "learning_rate": 0.00019934795626041783,
+ "loss": 1.4819,
+ "step": 109
+ },
+ {
+ "epoch": 0.11003094620361976,
+ "grad_norm": 0.5978986620903015,
+ "learning_rate": 0.0001993359505934285,
+ "loss": 1.5469,
+ "step": 110
+ },
+ {
+ "epoch": 0.11103122753274358,
+ "grad_norm": 0.7249881029129028,
+ "learning_rate": 0.00019932383577419432,
+ "loss": 1.7466,
+ "step": 111
+ },
+ {
+ "epoch": 0.1120315088618674,
+ "grad_norm": 0.6161806583404541,
+ "learning_rate": 0.0001993116118160273,
+ "loss": 1.3411,
+ "step": 112
+ },
+ {
+ "epoch": 0.11303179019099122,
+ "grad_norm": 0.6745229363441467,
+ "learning_rate": 0.00019929927873235938,
+ "loss": 1.5615,
+ "step": 113
+ },
+ {
+ "epoch": 0.11403207152011503,
+ "grad_norm": 0.6489872336387634,
+ "learning_rate": 0.00019928683653674237,
+ "loss": 1.6279,
+ "step": 114
+ },
+ {
+ "epoch": 0.11503235284923885,
+ "grad_norm": 0.7769975662231445,
+ "learning_rate": 0.00019927428524284805,
+ "loss": 1.6155,
+ "step": 115
+ },
+ {
+ "epoch": 0.11603263417836267,
+ "grad_norm": 0.734336793422699,
+ "learning_rate": 0.00019926162486446792,
+ "loss": 1.6837,
+ "step": 116
+ },
+ {
+ "epoch": 0.11703291550748648,
+ "grad_norm": 0.6966903805732727,
+ "learning_rate": 0.0001992488554155135,
+ "loss": 1.6659,
+ "step": 117
+ },
+ {
+ "epoch": 0.1180331968366103,
+ "grad_norm": 0.6714586615562439,
+ "learning_rate": 0.00019923597691001615,
+ "loss": 1.5161,
+ "step": 118
+ },
+ {
+ "epoch": 0.11903347816573412,
+ "grad_norm": 0.6390894651412964,
+ "learning_rate": 0.0001992229893621269,
+ "loss": 1.4561,
+ "step": 119
+ },
+ {
+ "epoch": 0.12003375949485792,
+ "grad_norm": 0.6481143832206726,
+ "learning_rate": 0.00019920989278611687,
+ "loss": 1.6331,
+ "step": 120
+ },
+ {
+ "epoch": 0.12103404082398174,
+ "grad_norm": 0.6819384694099426,
+ "learning_rate": 0.0001991966871963767,
+ "loss": 1.6508,
+ "step": 121
+ },
+ {
+ "epoch": 0.12203432215310556,
+ "grad_norm": 0.6839059591293335,
+ "learning_rate": 0.000199183372607417,
+ "loss": 1.6514,
+ "step": 122
+ },
+ {
+ "epoch": 0.12303460348222937,
+ "grad_norm": 0.6401050090789795,
+ "learning_rate": 0.0001991699490338681,
+ "loss": 1.8065,
+ "step": 123
+ },
+ {
+ "epoch": 0.12403488481135319,
+ "grad_norm": 0.6860588788986206,
+ "learning_rate": 0.00019915641649048005,
+ "loss": 1.7658,
+ "step": 124
+ },
+ {
+ "epoch": 0.12503516614047702,
+ "grad_norm": 0.6286434531211853,
+ "learning_rate": 0.0001991427749921227,
+ "loss": 1.7678,
+ "step": 125
+ },
+ {
+ "epoch": 0.12603544746960083,
+ "grad_norm": 0.6609922647476196,
+ "learning_rate": 0.00019912902455378556,
+ "loss": 1.4934,
+ "step": 126
+ },
+ {
+ "epoch": 0.12703572879872463,
+ "grad_norm": 0.7058399319648743,
+ "learning_rate": 0.00019911516519057788,
+ "loss": 1.6058,
+ "step": 127
+ },
+ {
+ "epoch": 0.12803601012784846,
+ "grad_norm": 0.6362051963806152,
+ "learning_rate": 0.00019910119691772863,
+ "loss": 1.502,
+ "step": 128
+ },
+ {
+ "epoch": 0.12903629145697226,
+ "grad_norm": 0.7493100762367249,
+ "learning_rate": 0.00019908711975058637,
+ "loss": 1.5287,
+ "step": 129
+ },
+ {
+ "epoch": 0.1300365727860961,
+ "grad_norm": 0.6492393612861633,
+ "learning_rate": 0.0001990729337046194,
+ "loss": 1.5716,
+ "step": 130
+ },
+ {
+ "epoch": 0.1310368541152199,
+ "grad_norm": 0.70331871509552,
+ "learning_rate": 0.0001990586387954156,
+ "loss": 1.5882,
+ "step": 131
+ },
+ {
+ "epoch": 0.13203713544434373,
+ "grad_norm": 0.7581572532653809,
+ "learning_rate": 0.00019904423503868247,
+ "loss": 1.7627,
+ "step": 132
+ },
+ {
+ "epoch": 0.13303741677346753,
+ "grad_norm": 0.7087228894233704,
+ "learning_rate": 0.00019902972245024715,
+ "loss": 1.6257,
+ "step": 133
+ },
+ {
+ "epoch": 0.13403769810259136,
+ "grad_norm": 0.7920627593994141,
+ "learning_rate": 0.00019901510104605637,
+ "loss": 1.572,
+ "step": 134
+ },
+ {
+ "epoch": 0.13503797943171517,
+ "grad_norm": 0.6869202256202698,
+ "learning_rate": 0.00019900037084217637,
+ "loss": 1.5478,
+ "step": 135
+ },
+ {
+ "epoch": 0.136038260760839,
+ "grad_norm": 0.6879409551620483,
+ "learning_rate": 0.00019898553185479303,
+ "loss": 1.3104,
+ "step": 136
+ },
+ {
+ "epoch": 0.1370385420899628,
+ "grad_norm": 0.6574143767356873,
+ "learning_rate": 0.00019897058410021167,
+ "loss": 1.7041,
+ "step": 137
+ },
+ {
+ "epoch": 0.13803882341908663,
+ "grad_norm": 0.7793259620666504,
+ "learning_rate": 0.00019895552759485722,
+ "loss": 1.5417,
+ "step": 138
+ },
+ {
+ "epoch": 0.13903910474821043,
+ "grad_norm": 0.6310438513755798,
+ "learning_rate": 0.00019894036235527395,
+ "loss": 1.4978,
+ "step": 139
+ },
+ {
+ "epoch": 0.14003938607733424,
+ "grad_norm": 0.6298012137413025,
+ "learning_rate": 0.00019892508839812584,
+ "loss": 1.5367,
+ "step": 140
+ },
+ {
+ "epoch": 0.14103966740645807,
+ "grad_norm": 0.5647856593132019,
+ "learning_rate": 0.00019890970574019617,
+ "loss": 1.537,
+ "step": 141
+ },
+ {
+ "epoch": 0.14203994873558187,
+ "grad_norm": 0.6491876244544983,
+ "learning_rate": 0.00019889421439838763,
+ "loss": 1.6992,
+ "step": 142
+ },
+ {
+ "epoch": 0.1430402300647057,
+ "grad_norm": 0.6574720144271851,
+ "learning_rate": 0.00019887861438972246,
+ "loss": 1.3837,
+ "step": 143
+ },
+ {
+ "epoch": 0.1440405113938295,
+ "grad_norm": 0.6267092227935791,
+ "learning_rate": 0.00019886290573134228,
+ "loss": 1.6307,
+ "step": 144
+ },
+ {
+ "epoch": 0.14504079272295334,
+ "grad_norm": 0.6785029172897339,
+ "learning_rate": 0.000198847088440508,
+ "loss": 1.574,
+ "step": 145
+ },
+ {
+ "epoch": 0.14604107405207714,
+ "grad_norm": 0.6218644380569458,
+ "learning_rate": 0.0001988311625346,
+ "loss": 1.4676,
+ "step": 146
+ },
+ {
+ "epoch": 0.14704135538120097,
+ "grad_norm": 0.6047986745834351,
+ "learning_rate": 0.00019881512803111796,
+ "loss": 1.4316,
+ "step": 147
+ },
+ {
+ "epoch": 0.14804163671032478,
+ "grad_norm": 0.7340937256813049,
+ "learning_rate": 0.00019879898494768093,
+ "loss": 1.5185,
+ "step": 148
+ },
+ {
+ "epoch": 0.1490419180394486,
+ "grad_norm": 0.5874620676040649,
+ "learning_rate": 0.00019878273330202717,
+ "loss": 1.5031,
+ "step": 149
+ },
+ {
+ "epoch": 0.1500421993685724,
+ "grad_norm": 0.6943556666374207,
+ "learning_rate": 0.00019876637311201433,
+ "loss": 1.7323,
+ "step": 150
+ },
+ {
+ "epoch": 0.15104248069769624,
+ "grad_norm": 0.6345832347869873,
+ "learning_rate": 0.00019874990439561934,
+ "loss": 1.4691,
+ "step": 151
+ },
+ {
+ "epoch": 0.15204276202682004,
+ "grad_norm": 0.7047753930091858,
+ "learning_rate": 0.0001987333271709383,
+ "loss": 1.5198,
+ "step": 152
+ },
+ {
+ "epoch": 0.15304304335594385,
+ "grad_norm": 0.6043322086334229,
+ "learning_rate": 0.00019871664145618657,
+ "loss": 1.5488,
+ "step": 153
+ },
+ {
+ "epoch": 0.15404332468506768,
+ "grad_norm": 0.5978446006774902,
+ "learning_rate": 0.00019869984726969878,
+ "loss": 1.4278,
+ "step": 154
+ },
+ {
+ "epoch": 0.15504360601419148,
+ "grad_norm": 0.6796436905860901,
+ "learning_rate": 0.00019868294462992866,
+ "loss": 1.5845,
+ "step": 155
+ },
+ {
+ "epoch": 0.1560438873433153,
+ "grad_norm": 0.7113372087478638,
+ "learning_rate": 0.00019866593355544922,
+ "loss": 1.7509,
+ "step": 156
+ },
+ {
+ "epoch": 0.15704416867243912,
+ "grad_norm": 0.5908107757568359,
+ "learning_rate": 0.00019864881406495246,
+ "loss": 1.5693,
+ "step": 157
+ },
+ {
+ "epoch": 0.15804445000156295,
+ "grad_norm": 0.7135252952575684,
+ "learning_rate": 0.00019863158617724967,
+ "loss": 1.6109,
+ "step": 158
+ },
+ {
+ "epoch": 0.15904473133068675,
+ "grad_norm": 0.5621710419654846,
+ "learning_rate": 0.00019861424991127115,
+ "loss": 1.5368,
+ "step": 159
+ },
+ {
+ "epoch": 0.16004501265981058,
+ "grad_norm": 0.6205443143844604,
+ "learning_rate": 0.00019859680528606637,
+ "loss": 1.5181,
+ "step": 160
+ },
+ {
+ "epoch": 0.16104529398893438,
+ "grad_norm": 0.6933260560035706,
+ "learning_rate": 0.00019857925232080373,
+ "loss": 1.4508,
+ "step": 161
+ },
+ {
+ "epoch": 0.16204557531805822,
+ "grad_norm": 0.6911661028862,
+ "learning_rate": 0.00019856159103477086,
+ "loss": 1.5423,
+ "step": 162
+ },
+ {
+ "epoch": 0.16304585664718202,
+ "grad_norm": 0.7684744000434875,
+ "learning_rate": 0.00019854382144737426,
+ "loss": 1.4097,
+ "step": 163
+ },
+ {
+ "epoch": 0.16404613797630582,
+ "grad_norm": 0.6657288074493408,
+ "learning_rate": 0.00019852594357813952,
+ "loss": 1.6145,
+ "step": 164
+ },
+ {
+ "epoch": 0.16504641930542965,
+ "grad_norm": 0.7030160427093506,
+ "learning_rate": 0.00019850795744671116,
+ "loss": 1.6551,
+ "step": 165
+ },
+ {
+ "epoch": 0.16604670063455346,
+ "grad_norm": 0.87894207239151,
+ "learning_rate": 0.0001984898630728527,
+ "loss": 1.6316,
+ "step": 166
+ },
+ {
+ "epoch": 0.1670469819636773,
+ "grad_norm": 0.6282681226730347,
+ "learning_rate": 0.0001984716604764466,
+ "loss": 1.451,
+ "step": 167
+ },
+ {
+ "epoch": 0.1680472632928011,
+ "grad_norm": 0.6729792952537537,
+ "learning_rate": 0.0001984533496774942,
+ "loss": 1.4381,
+ "step": 168
+ },
+ {
+ "epoch": 0.16904754462192492,
+ "grad_norm": 0.7300116419792175,
+ "learning_rate": 0.0001984349306961158,
+ "loss": 1.4244,
+ "step": 169
+ },
+ {
+ "epoch": 0.17004782595104873,
+ "grad_norm": 0.6853480935096741,
+ "learning_rate": 0.00019841640355255043,
+ "loss": 1.6174,
+ "step": 170
+ },
+ {
+ "epoch": 0.17104810728017256,
+ "grad_norm": 0.735612690448761,
+ "learning_rate": 0.00019839776826715614,
+ "loss": 1.5085,
+ "step": 171
+ },
+ {
+ "epoch": 0.17204838860929636,
+ "grad_norm": 0.6735563278198242,
+ "learning_rate": 0.00019837902486040978,
+ "loss": 1.507,
+ "step": 172
+ },
+ {
+ "epoch": 0.1730486699384202,
+ "grad_norm": 0.6617917418479919,
+ "learning_rate": 0.0001983601733529069,
+ "loss": 1.6774,
+ "step": 173
+ },
+ {
+ "epoch": 0.174048951267544,
+ "grad_norm": 0.7137823700904846,
+ "learning_rate": 0.00019834121376536187,
+ "loss": 1.4665,
+ "step": 174
+ },
+ {
+ "epoch": 0.17504923259666783,
+ "grad_norm": 0.6372626423835754,
+ "learning_rate": 0.00019832214611860793,
+ "loss": 1.3597,
+ "step": 175
+ },
+ {
+ "epoch": 0.17604951392579163,
+ "grad_norm": 0.7131632566452026,
+ "learning_rate": 0.00019830297043359692,
+ "loss": 1.4833,
+ "step": 176
+ },
+ {
+ "epoch": 0.17704979525491543,
+ "grad_norm": 0.7538559436798096,
+ "learning_rate": 0.00019828368673139947,
+ "loss": 1.4714,
+ "step": 177
+ },
+ {
+ "epoch": 0.17805007658403926,
+ "grad_norm": 0.5684806108474731,
+ "learning_rate": 0.0001982642950332049,
+ "loss": 1.5012,
+ "step": 178
+ },
+ {
+ "epoch": 0.17905035791316307,
+ "grad_norm": 0.621658444404602,
+ "learning_rate": 0.00019824479536032112,
+ "loss": 1.9119,
+ "step": 179
+ },
+ {
+ "epoch": 0.1800506392422869,
+ "grad_norm": 0.6564679741859436,
+ "learning_rate": 0.0001982251877341748,
+ "loss": 1.5131,
+ "step": 180
+ },
+ {
+ "epoch": 0.1810509205714107,
+ "grad_norm": 0.6546526551246643,
+ "learning_rate": 0.00019820547217631117,
+ "loss": 1.4493,
+ "step": 181
+ },
+ {
+ "epoch": 0.18205120190053453,
+ "grad_norm": 0.6504479050636292,
+ "learning_rate": 0.00019818564870839405,
+ "loss": 1.6131,
+ "step": 182
+ },
+ {
+ "epoch": 0.18305148322965833,
+ "grad_norm": 0.6269803047180176,
+ "learning_rate": 0.00019816571735220583,
+ "loss": 1.5936,
+ "step": 183
+ },
+ {
+ "epoch": 0.18405176455878217,
+ "grad_norm": 0.6303942799568176,
+ "learning_rate": 0.00019814567812964748,
+ "loss": 1.6948,
+ "step": 184
+ },
+ {
+ "epoch": 0.18505204588790597,
+ "grad_norm": 0.6562885046005249,
+ "learning_rate": 0.00019812553106273847,
+ "loss": 1.5542,
+ "step": 185
+ },
+ {
+ "epoch": 0.1860523272170298,
+ "grad_norm": 0.5844212174415588,
+ "learning_rate": 0.00019810527617361681,
+ "loss": 1.539,
+ "step": 186
+ },
+ {
+ "epoch": 0.1870526085461536,
+ "grad_norm": 0.6402295231819153,
+ "learning_rate": 0.00019808491348453894,
+ "loss": 1.4748,
+ "step": 187
+ },
+ {
+ "epoch": 0.18805288987527743,
+ "grad_norm": 0.6579477190971375,
+ "learning_rate": 0.00019806444301787978,
+ "loss": 1.5114,
+ "step": 188
+ },
+ {
+ "epoch": 0.18905317120440124,
+ "grad_norm": 0.6511597037315369,
+ "learning_rate": 0.0001980438647961327,
+ "loss": 1.4678,
+ "step": 189
+ },
+ {
+ "epoch": 0.19005345253352504,
+ "grad_norm": 0.6911427974700928,
+ "learning_rate": 0.00019802317884190935,
+ "loss": 1.6876,
+ "step": 190
+ },
+ {
+ "epoch": 0.19105373386264887,
+ "grad_norm": 0.6146433353424072,
+ "learning_rate": 0.00019800238517793996,
+ "loss": 1.5986,
+ "step": 191
+ },
+ {
+ "epoch": 0.19205401519177268,
+ "grad_norm": 0.6126302480697632,
+ "learning_rate": 0.00019798148382707296,
+ "loss": 1.571,
+ "step": 192
+ },
+ {
+ "epoch": 0.1930542965208965,
+ "grad_norm": 0.5751072764396667,
+ "learning_rate": 0.00019796047481227515,
+ "loss": 1.4921,
+ "step": 193
+ },
+ {
+ "epoch": 0.1940545778500203,
+ "grad_norm": 0.6484839916229248,
+ "learning_rate": 0.00019793935815663163,
+ "loss": 1.7495,
+ "step": 194
+ },
+ {
+ "epoch": 0.19505485917914414,
+ "grad_norm": 0.6875973343849182,
+ "learning_rate": 0.00019791813388334581,
+ "loss": 1.5782,
+ "step": 195
+ },
+ {
+ "epoch": 0.19605514050826794,
+ "grad_norm": 0.8130943179130554,
+ "learning_rate": 0.00019789680201573933,
+ "loss": 1.4964,
+ "step": 196
+ },
+ {
+ "epoch": 0.19705542183739178,
+ "grad_norm": 0.6734403371810913,
+ "learning_rate": 0.00019787536257725202,
+ "loss": 1.4787,
+ "step": 197
+ },
+ {
+ "epoch": 0.19805570316651558,
+ "grad_norm": 0.6480582356452942,
+ "learning_rate": 0.00019785381559144196,
+ "loss": 1.5629,
+ "step": 198
+ },
+ {
+ "epoch": 0.1990559844956394,
+ "grad_norm": 0.6554624438285828,
+ "learning_rate": 0.00019783216108198542,
+ "loss": 1.5806,
+ "step": 199
+ },
+ {
+ "epoch": 0.2000562658247632,
+ "grad_norm": 0.705443263053894,
+ "learning_rate": 0.00019781039907267677,
+ "loss": 1.8372,
+ "step": 200
+ },
+ {
+ "epoch": 0.20105654715388704,
+ "grad_norm": 0.706923246383667,
+ "learning_rate": 0.00019778852958742853,
+ "loss": 1.6405,
+ "step": 201
+ },
+ {
+ "epoch": 0.20205682848301085,
+ "grad_norm": 0.7062544822692871,
+ "learning_rate": 0.00019776655265027127,
+ "loss": 1.6,
+ "step": 202
+ },
+ {
+ "epoch": 0.20305710981213465,
+ "grad_norm": 0.7227569222450256,
+ "learning_rate": 0.00019774446828535371,
+ "loss": 1.5172,
+ "step": 203
+ },
+ {
+ "epoch": 0.20405739114125848,
+ "grad_norm": 0.6762563586235046,
+ "learning_rate": 0.00019772227651694256,
+ "loss": 1.6753,
+ "step": 204
+ },
+ {
+ "epoch": 0.20505767247038229,
+ "grad_norm": 0.6048421859741211,
+ "learning_rate": 0.00019769997736942258,
+ "loss": 1.4827,
+ "step": 205
+ },
+ {
+ "epoch": 0.20605795379950612,
+ "grad_norm": 0.6002956032752991,
+ "learning_rate": 0.00019767757086729647,
+ "loss": 1.5438,
+ "step": 206
+ },
+ {
+ "epoch": 0.20705823512862992,
+ "grad_norm": 0.7948954701423645,
+ "learning_rate": 0.00019765505703518496,
+ "loss": 1.4988,
+ "step": 207
+ },
+ {
+ "epoch": 0.20805851645775375,
+ "grad_norm": 0.6495680809020996,
+ "learning_rate": 0.00019763243589782662,
+ "loss": 1.5738,
+ "step": 208
+ },
+ {
+ "epoch": 0.20905879778687755,
+ "grad_norm": 0.6413107514381409,
+ "learning_rate": 0.00019760970748007803,
+ "loss": 1.3794,
+ "step": 209
+ },
+ {
+ "epoch": 0.21005907911600138,
+ "grad_norm": 0.5999665260314941,
+ "learning_rate": 0.0001975868718069136,
+ "loss": 1.4313,
+ "step": 210
+ },
+ {
+ "epoch": 0.2110593604451252,
+ "grad_norm": 0.6355773210525513,
+ "learning_rate": 0.00019756392890342563,
+ "loss": 1.5107,
+ "step": 211
+ },
+ {
+ "epoch": 0.21205964177424902,
+ "grad_norm": 0.6068251729011536,
+ "learning_rate": 0.00019754087879482422,
+ "loss": 1.536,
+ "step": 212
+ },
+ {
+ "epoch": 0.21305992310337282,
+ "grad_norm": 0.5568909049034119,
+ "learning_rate": 0.00019751772150643722,
+ "loss": 1.5372,
+ "step": 213
+ },
+ {
+ "epoch": 0.21406020443249665,
+ "grad_norm": 0.5771281719207764,
+ "learning_rate": 0.00019749445706371038,
+ "loss": 1.487,
+ "step": 214
+ },
+ {
+ "epoch": 0.21506048576162046,
+ "grad_norm": 0.6146671772003174,
+ "learning_rate": 0.00019747108549220702,
+ "loss": 1.4585,
+ "step": 215
+ },
+ {
+ "epoch": 0.21606076709074426,
+ "grad_norm": 0.5595754981040955,
+ "learning_rate": 0.00019744760681760832,
+ "loss": 1.4224,
+ "step": 216
+ },
+ {
+ "epoch": 0.2170610484198681,
+ "grad_norm": 0.5873929858207703,
+ "learning_rate": 0.00019742402106571314,
+ "loss": 1.4581,
+ "step": 217
+ },
+ {
+ "epoch": 0.2180613297489919,
+ "grad_norm": 0.5725668668746948,
+ "learning_rate": 0.00019740032826243788,
+ "loss": 1.4393,
+ "step": 218
+ },
+ {
+ "epoch": 0.21906161107811573,
+ "grad_norm": 0.6452648043632507,
+ "learning_rate": 0.0001973765284338167,
+ "loss": 1.6048,
+ "step": 219
+ },
+ {
+ "epoch": 0.22006189240723953,
+ "grad_norm": 0.6166092753410339,
+ "learning_rate": 0.00019735262160600127,
+ "loss": 1.4976,
+ "step": 220
+ },
+ {
+ "epoch": 0.22106217373636336,
+ "grad_norm": 0.7053269147872925,
+ "learning_rate": 0.00019732860780526088,
+ "loss": 1.6882,
+ "step": 221
+ },
+ {
+ "epoch": 0.22206245506548716,
+ "grad_norm": 0.7072796821594238,
+ "learning_rate": 0.00019730448705798239,
+ "loss": 1.5441,
+ "step": 222
+ },
+ {
+ "epoch": 0.223062736394611,
+ "grad_norm": 0.6704496145248413,
+ "learning_rate": 0.00019728025939067008,
+ "loss": 1.3791,
+ "step": 223
+ },
+ {
+ "epoch": 0.2240630177237348,
+ "grad_norm": 0.6141743659973145,
+ "learning_rate": 0.00019725592482994583,
+ "loss": 1.5831,
+ "step": 224
+ },
+ {
+ "epoch": 0.22506329905285863,
+ "grad_norm": 0.6235673427581787,
+ "learning_rate": 0.00019723148340254892,
+ "loss": 1.6103,
+ "step": 225
+ },
+ {
+ "epoch": 0.22606358038198243,
+ "grad_norm": 0.6383673548698425,
+ "learning_rate": 0.00019720693513533598,
+ "loss": 1.6284,
+ "step": 226
+ },
+ {
+ "epoch": 0.22706386171110624,
+ "grad_norm": 0.7666104435920715,
+ "learning_rate": 0.00019718228005528122,
+ "loss": 1.702,
+ "step": 227
+ },
+ {
+ "epoch": 0.22806414304023007,
+ "grad_norm": 0.6431383490562439,
+ "learning_rate": 0.00019715751818947603,
+ "loss": 1.4571,
+ "step": 228
+ },
+ {
+ "epoch": 0.22906442436935387,
+ "grad_norm": 0.6177626252174377,
+ "learning_rate": 0.0001971326495651293,
+ "loss": 1.4326,
+ "step": 229
+ },
+ {
+ "epoch": 0.2300647056984777,
+ "grad_norm": 0.7352898120880127,
+ "learning_rate": 0.00019710767420956705,
+ "loss": 1.7427,
+ "step": 230
+ },
+ {
+ "epoch": 0.2310649870276015,
+ "grad_norm": 0.6259469389915466,
+ "learning_rate": 0.0001970825921502328,
+ "loss": 1.634,
+ "step": 231
+ },
+ {
+ "epoch": 0.23206526835672533,
+ "grad_norm": 0.6699635982513428,
+ "learning_rate": 0.0001970574034146871,
+ "loss": 1.4705,
+ "step": 232
+ },
+ {
+ "epoch": 0.23306554968584914,
+ "grad_norm": 0.5577033162117004,
+ "learning_rate": 0.00019703210803060782,
+ "loss": 1.5438,
+ "step": 233
+ },
+ {
+ "epoch": 0.23406583101497297,
+ "grad_norm": 0.6063429117202759,
+ "learning_rate": 0.00019700670602579008,
+ "loss": 1.555,
+ "step": 234
+ },
+ {
+ "epoch": 0.23506611234409677,
+ "grad_norm": 0.6069104671478271,
+ "learning_rate": 0.00019698119742814606,
+ "loss": 1.5036,
+ "step": 235
+ },
+ {
+ "epoch": 0.2360663936732206,
+ "grad_norm": 0.6158379316329956,
+ "learning_rate": 0.00019695558226570507,
+ "loss": 1.3741,
+ "step": 236
+ },
+ {
+ "epoch": 0.2370666750023444,
+ "grad_norm": 0.6366294622421265,
+ "learning_rate": 0.00019692986056661356,
+ "loss": 1.4467,
+ "step": 237
+ },
+ {
+ "epoch": 0.23806695633146824,
+ "grad_norm": 0.6726595163345337,
+ "learning_rate": 0.00019690403235913504,
+ "loss": 1.3861,
+ "step": 238
+ },
+ {
+ "epoch": 0.23906723766059204,
+ "grad_norm": 0.6546512842178345,
+ "learning_rate": 0.00019687809767165,
+ "loss": 1.6886,
+ "step": 239
+ },
+ {
+ "epoch": 0.24006751898971584,
+ "grad_norm": 0.6623121500015259,
+ "learning_rate": 0.000196852056532656,
+ "loss": 1.5925,
+ "step": 240
+ },
+ {
+ "epoch": 0.24106780031883968,
+ "grad_norm": 0.6577529311180115,
+ "learning_rate": 0.00019682590897076752,
+ "loss": 1.4509,
+ "step": 241
+ },
+ {
+ "epoch": 0.24206808164796348,
+ "grad_norm": 0.5586327314376831,
+ "learning_rate": 0.00019679965501471608,
+ "loss": 1.6346,
+ "step": 242
+ },
+ {
+ "epoch": 0.2430683629770873,
+ "grad_norm": 0.6459937691688538,
+ "learning_rate": 0.0001967732946933499,
+ "loss": 1.4129,
+ "step": 243
+ },
+ {
+ "epoch": 0.2440686443062111,
+ "grad_norm": 0.778732180595398,
+ "learning_rate": 0.00019674682803563428,
+ "loss": 1.5129,
+ "step": 244
+ },
+ {
+ "epoch": 0.24506892563533494,
+ "grad_norm": 0.7264451384544373,
+ "learning_rate": 0.00019672025507065131,
+ "loss": 1.4483,
+ "step": 245
+ },
+ {
+ "epoch": 0.24606920696445875,
+ "grad_norm": 0.616084635257721,
+ "learning_rate": 0.00019669357582759983,
+ "loss": 1.5947,
+ "step": 246
+ },
+ {
+ "epoch": 0.24706948829358258,
+ "grad_norm": 0.5911642909049988,
+ "learning_rate": 0.00019666679033579552,
+ "loss": 1.6407,
+ "step": 247
+ },
+ {
+ "epoch": 0.24806976962270638,
+ "grad_norm": 0.6102796792984009,
+ "learning_rate": 0.00019663989862467082,
+ "loss": 1.5251,
+ "step": 248
+ },
+ {
+ "epoch": 0.2490700509518302,
+ "grad_norm": 0.5973434448242188,
+ "learning_rate": 0.00019661290072377482,
+ "loss": 1.3969,
+ "step": 249
+ },
+ {
+ "epoch": 0.25007033228095404,
+ "grad_norm": 0.8515523076057434,
+ "learning_rate": 0.00019658579666277334,
+ "loss": 1.5687,
+ "step": 250
+ },
+ {
+ "epoch": 0.2510706136100778,
+ "grad_norm": 0.5003417134284973,
+ "learning_rate": 0.0001965585864714488,
+ "loss": 1.4102,
+ "step": 251
+ },
+ {
+ "epoch": 0.25207089493920165,
+ "grad_norm": 0.5215190052986145,
+ "learning_rate": 0.00019653127017970034,
+ "loss": 1.2471,
+ "step": 252
+ },
+ {
+ "epoch": 0.2530711762683255,
+ "grad_norm": 0.6491619348526001,
+ "learning_rate": 0.0001965038478175436,
+ "loss": 1.6969,
+ "step": 253
+ },
+ {
+ "epoch": 0.25407145759744926,
+ "grad_norm": 0.6176133155822754,
+ "learning_rate": 0.00019647631941511082,
+ "loss": 1.5351,
+ "step": 254
+ },
+ {
+ "epoch": 0.2550717389265731,
+ "grad_norm": 0.6913408041000366,
+ "learning_rate": 0.0001964486850026507,
+ "loss": 1.4309,
+ "step": 255
+ },
+ {
+ "epoch": 0.2560720202556969,
+ "grad_norm": 0.5875718593597412,
+ "learning_rate": 0.00019642094461052852,
+ "loss": 1.4679,
+ "step": 256
+ },
+ {
+ "epoch": 0.25707230158482075,
+ "grad_norm": 0.6682264804840088,
+ "learning_rate": 0.00019639309826922585,
+ "loss": 1.5393,
+ "step": 257
+ },
+ {
+ "epoch": 0.2580725829139445,
+ "grad_norm": 0.7241432666778564,
+ "learning_rate": 0.0001963651460093409,
+ "loss": 1.4998,
+ "step": 258
+ },
+ {
+ "epoch": 0.25907286424306836,
+ "grad_norm": 0.5210353136062622,
+ "learning_rate": 0.00019633708786158806,
+ "loss": 1.3837,
+ "step": 259
+ },
+ {
+ "epoch": 0.2600731455721922,
+ "grad_norm": 0.584020733833313,
+ "learning_rate": 0.00019630892385679818,
+ "loss": 1.4961,
+ "step": 260
+ },
+ {
+ "epoch": 0.261073426901316,
+ "grad_norm": 0.6708115935325623,
+ "learning_rate": 0.00019628065402591845,
+ "loss": 1.5277,
+ "step": 261
+ },
+ {
+ "epoch": 0.2620737082304398,
+ "grad_norm": 0.5480003952980042,
+ "learning_rate": 0.00019625227840001225,
+ "loss": 1.556,
+ "step": 262
+ },
+ {
+ "epoch": 0.2630739895595636,
+ "grad_norm": 0.595191478729248,
+ "learning_rate": 0.0001962237970102593,
+ "loss": 1.3514,
+ "step": 263
+ },
+ {
+ "epoch": 0.26407427088868746,
+ "grad_norm": 0.7332099080085754,
+ "learning_rate": 0.0001961952098879555,
+ "loss": 1.5394,
+ "step": 264
+ },
+ {
+ "epoch": 0.26507455221781123,
+ "grad_norm": 0.596319317817688,
+ "learning_rate": 0.00019616651706451287,
+ "loss": 1.3828,
+ "step": 265
+ },
+ {
+ "epoch": 0.26607483354693506,
+ "grad_norm": 0.5998026132583618,
+ "learning_rate": 0.0001961377185714597,
+ "loss": 1.4479,
+ "step": 266
+ },
+ {
+ "epoch": 0.2670751148760589,
+ "grad_norm": 0.6220220923423767,
+ "learning_rate": 0.0001961088144404403,
+ "loss": 1.5121,
+ "step": 267
+ },
+ {
+ "epoch": 0.2680753962051827,
+ "grad_norm": 0.5865943431854248,
+ "learning_rate": 0.00019607980470321505,
+ "loss": 1.6747,
+ "step": 268
+ },
+ {
+ "epoch": 0.2690756775343065,
+ "grad_norm": 0.5790852904319763,
+ "learning_rate": 0.00019605068939166045,
+ "loss": 1.3798,
+ "step": 269
+ },
+ {
+ "epoch": 0.27007595886343033,
+ "grad_norm": 0.6157498955726624,
+ "learning_rate": 0.00019602146853776894,
+ "loss": 1.6799,
+ "step": 270
+ },
+ {
+ "epoch": 0.27107624019255416,
+ "grad_norm": 0.6214422583580017,
+ "learning_rate": 0.000195992142173649,
+ "loss": 1.4782,
+ "step": 271
+ },
+ {
+ "epoch": 0.272076521521678,
+ "grad_norm": 0.6460129618644714,
+ "learning_rate": 0.0001959627103315249,
+ "loss": 1.4874,
+ "step": 272
+ },
+ {
+ "epoch": 0.27307680285080177,
+ "grad_norm": 0.5928930640220642,
+ "learning_rate": 0.00019593317304373705,
+ "loss": 1.4557,
+ "step": 273
+ },
+ {
+ "epoch": 0.2740770841799256,
+ "grad_norm": 0.5123687982559204,
+ "learning_rate": 0.00019590353034274144,
+ "loss": 1.445,
+ "step": 274
+ },
+ {
+ "epoch": 0.27507736550904943,
+ "grad_norm": 0.607455313205719,
+ "learning_rate": 0.00019587378226111014,
+ "loss": 1.4468,
+ "step": 275
+ },
+ {
+ "epoch": 0.27607764683817326,
+ "grad_norm": 0.6108120083808899,
+ "learning_rate": 0.00019584392883153088,
+ "loss": 1.3834,
+ "step": 276
+ },
+ {
+ "epoch": 0.27707792816729704,
+ "grad_norm": 0.680404543876648,
+ "learning_rate": 0.00019581397008680717,
+ "loss": 1.5094,
+ "step": 277
+ },
+ {
+ "epoch": 0.27807820949642087,
+ "grad_norm": 0.6419563889503479,
+ "learning_rate": 0.00019578390605985826,
+ "loss": 1.6933,
+ "step": 278
+ },
+ {
+ "epoch": 0.2790784908255447,
+ "grad_norm": 0.5788853764533997,
+ "learning_rate": 0.00019575373678371909,
+ "loss": 1.4754,
+ "step": 279
+ },
+ {
+ "epoch": 0.2800787721546685,
+ "grad_norm": 0.5943770408630371,
+ "learning_rate": 0.00019572346229154025,
+ "loss": 1.2949,
+ "step": 280
+ },
+ {
+ "epoch": 0.2810790534837923,
+ "grad_norm": 0.5997135043144226,
+ "learning_rate": 0.00019569308261658787,
+ "loss": 1.5365,
+ "step": 281
+ },
+ {
+ "epoch": 0.28207933481291614,
+ "grad_norm": 0.692401647567749,
+ "learning_rate": 0.00019566259779224378,
+ "loss": 1.4946,
+ "step": 282
+ },
+ {
+ "epoch": 0.28307961614203997,
+ "grad_norm": 0.5856708884239197,
+ "learning_rate": 0.00019563200785200526,
+ "loss": 1.426,
+ "step": 283
+ },
+ {
+ "epoch": 0.28407989747116374,
+ "grad_norm": 1.2516822814941406,
+ "learning_rate": 0.00019560131282948516,
+ "loss": 1.5119,
+ "step": 284
+ },
+ {
+ "epoch": 0.2850801788002876,
+ "grad_norm": 0.6360501050949097,
+ "learning_rate": 0.0001955705127584117,
+ "loss": 1.3916,
+ "step": 285
+ },
+ {
+ "epoch": 0.2860804601294114,
+ "grad_norm": 0.6822036504745483,
+ "learning_rate": 0.00019553960767262863,
+ "loss": 1.5565,
+ "step": 286
+ },
+ {
+ "epoch": 0.28708074145853524,
+ "grad_norm": 0.6973714828491211,
+ "learning_rate": 0.00019550859760609503,
+ "loss": 1.5559,
+ "step": 287
+ },
+ {
+ "epoch": 0.288081022787659,
+ "grad_norm": 0.6595618724822998,
+ "learning_rate": 0.00019547748259288536,
+ "loss": 1.5824,
+ "step": 288
+ },
+ {
+ "epoch": 0.28908130411678284,
+ "grad_norm": 0.5625808238983154,
+ "learning_rate": 0.0001954462626671894,
+ "loss": 1.2669,
+ "step": 289
+ },
+ {
+ "epoch": 0.2900815854459067,
+ "grad_norm": 0.6318663358688354,
+ "learning_rate": 0.0001954149378633122,
+ "loss": 1.3896,
+ "step": 290
+ },
+ {
+ "epoch": 0.29108186677503045,
+ "grad_norm": 0.6655906438827515,
+ "learning_rate": 0.00019538350821567404,
+ "loss": 1.3889,
+ "step": 291
+ },
+ {
+ "epoch": 0.2920821481041543,
+ "grad_norm": 0.5947337746620178,
+ "learning_rate": 0.00019535197375881045,
+ "loss": 1.6112,
+ "step": 292
+ },
+ {
+ "epoch": 0.2930824294332781,
+ "grad_norm": 0.6139295101165771,
+ "learning_rate": 0.00019532033452737205,
+ "loss": 1.5185,
+ "step": 293
+ },
+ {
+ "epoch": 0.29408271076240194,
+ "grad_norm": 0.579953670501709,
+ "learning_rate": 0.00019528859055612468,
+ "loss": 1.3874,
+ "step": 294
+ },
+ {
+ "epoch": 0.2950829920915257,
+ "grad_norm": 0.6101506352424622,
+ "learning_rate": 0.0001952567418799492,
+ "loss": 1.5965,
+ "step": 295
+ },
+ {
+ "epoch": 0.29608327342064955,
+ "grad_norm": 0.6393965482711792,
+ "learning_rate": 0.00019522478853384155,
+ "loss": 1.4124,
+ "step": 296
+ },
+ {
+ "epoch": 0.2970835547497734,
+ "grad_norm": 0.6147856712341309,
+ "learning_rate": 0.00019519273055291266,
+ "loss": 1.3776,
+ "step": 297
+ },
+ {
+ "epoch": 0.2980838360788972,
+ "grad_norm": 0.6056416630744934,
+ "learning_rate": 0.00019516056797238846,
+ "loss": 1.4453,
+ "step": 298
+ },
+ {
+ "epoch": 0.299084117408021,
+ "grad_norm": 0.6705831289291382,
+ "learning_rate": 0.00019512830082760987,
+ "loss": 1.3248,
+ "step": 299
+ },
+ {
+ "epoch": 0.3000843987371448,
+ "grad_norm": 0.6664314866065979,
+ "learning_rate": 0.00019509592915403255,
+ "loss": 1.5865,
+ "step": 300
+ },
+ {
+ "epoch": 0.30108468006626865,
+ "grad_norm": 0.5325604677200317,
+ "learning_rate": 0.00019506345298722717,
+ "loss": 1.0646,
+ "step": 301
+ },
+ {
+ "epoch": 0.3020849613953925,
+ "grad_norm": 0.589242160320282,
+ "learning_rate": 0.00019503087236287913,
+ "loss": 1.2297,
+ "step": 302
+ },
+ {
+ "epoch": 0.30308524272451626,
+ "grad_norm": 0.5677699446678162,
+ "learning_rate": 0.00019499818731678873,
+ "loss": 1.3961,
+ "step": 303
+ },
+ {
+ "epoch": 0.3040855240536401,
+ "grad_norm": 0.5676394701004028,
+ "learning_rate": 0.00019496539788487082,
+ "loss": 1.3276,
+ "step": 304
+ },
+ {
+ "epoch": 0.3050858053827639,
+ "grad_norm": 0.7280861139297485,
+ "learning_rate": 0.0001949325041031551,
+ "loss": 1.6731,
+ "step": 305
+ },
+ {
+ "epoch": 0.3060860867118877,
+ "grad_norm": 0.690636396408081,
+ "learning_rate": 0.0001948995060077859,
+ "loss": 1.5443,
+ "step": 306
+ },
+ {
+ "epoch": 0.3070863680410115,
+ "grad_norm": 0.611426055431366,
+ "learning_rate": 0.0001948664036350221,
+ "loss": 1.5827,
+ "step": 307
+ },
+ {
+ "epoch": 0.30808664937013536,
+ "grad_norm": 0.7112497091293335,
+ "learning_rate": 0.00019483319702123732,
+ "loss": 1.5401,
+ "step": 308
+ },
+ {
+ "epoch": 0.3090869306992592,
+ "grad_norm": 0.6598275303840637,
+ "learning_rate": 0.00019479988620291956,
+ "loss": 1.6432,
+ "step": 309
+ },
+ {
+ "epoch": 0.31008721202838296,
+ "grad_norm": 0.5019932985305786,
+ "learning_rate": 0.00019476647121667137,
+ "loss": 1.2561,
+ "step": 310
+ },
+ {
+ "epoch": 0.3110874933575068,
+ "grad_norm": 0.7777897715568542,
+ "learning_rate": 0.00019473295209920983,
+ "loss": 1.6118,
+ "step": 311
+ },
+ {
+ "epoch": 0.3120877746866306,
+ "grad_norm": 0.6028640866279602,
+ "learning_rate": 0.00019469932888736632,
+ "loss": 1.4682,
+ "step": 312
+ },
+ {
+ "epoch": 0.31308805601575446,
+ "grad_norm": 0.554381251335144,
+ "learning_rate": 0.00019466560161808674,
+ "loss": 1.4179,
+ "step": 313
+ },
+ {
+ "epoch": 0.31408833734487823,
+ "grad_norm": 0.6212736368179321,
+ "learning_rate": 0.00019463177032843124,
+ "loss": 1.4327,
+ "step": 314
+ },
+ {
+ "epoch": 0.31508861867400206,
+ "grad_norm": 0.6829814910888672,
+ "learning_rate": 0.00019459783505557424,
+ "loss": 1.4455,
+ "step": 315
+ },
+ {
+ "epoch": 0.3160889000031259,
+ "grad_norm": 0.5808065533638,
+ "learning_rate": 0.00019456379583680452,
+ "loss": 1.3583,
+ "step": 316
+ },
+ {
+ "epoch": 0.31708918133224967,
+ "grad_norm": 0.6354159712791443,
+ "learning_rate": 0.000194529652709525,
+ "loss": 1.6916,
+ "step": 317
+ },
+ {
+ "epoch": 0.3180894626613735,
+ "grad_norm": 0.6299159526824951,
+ "learning_rate": 0.00019449540571125286,
+ "loss": 1.47,
+ "step": 318
+ },
+ {
+ "epoch": 0.31908974399049733,
+ "grad_norm": 0.6222877502441406,
+ "learning_rate": 0.00019446105487961926,
+ "loss": 1.4137,
+ "step": 319
+ },
+ {
+ "epoch": 0.32009002531962116,
+ "grad_norm": 0.5995916724205017,
+ "learning_rate": 0.0001944266002523696,
+ "loss": 1.3679,
+ "step": 320
+ },
+ {
+ "epoch": 0.32109030664874494,
+ "grad_norm": 0.599814236164093,
+ "learning_rate": 0.0001943920418673633,
+ "loss": 1.4075,
+ "step": 321
+ },
+ {
+ "epoch": 0.32209058797786877,
+ "grad_norm": 0.5409269332885742,
+ "learning_rate": 0.00019435737976257377,
+ "loss": 1.4289,
+ "step": 322
+ },
+ {
+ "epoch": 0.3230908693069926,
+ "grad_norm": 0.5298951864242554,
+ "learning_rate": 0.00019432261397608834,
+ "loss": 1.2834,
+ "step": 323
+ },
+ {
+ "epoch": 0.32409115063611643,
+ "grad_norm": 0.7196112871170044,
+ "learning_rate": 0.00019428774454610843,
+ "loss": 1.4845,
+ "step": 324
+ },
+ {
+ "epoch": 0.3250914319652402,
+ "grad_norm": 0.5605450868606567,
+ "learning_rate": 0.00019425277151094913,
+ "loss": 1.4575,
+ "step": 325
+ },
+ {
+ "epoch": 0.32609171329436404,
+ "grad_norm": 0.573080837726593,
+ "learning_rate": 0.00019421769490903957,
+ "loss": 1.5757,
+ "step": 326
+ },
+ {
+ "epoch": 0.32709199462348787,
+ "grad_norm": 0.5017902851104736,
+ "learning_rate": 0.0001941825147789225,
+ "loss": 1.5794,
+ "step": 327
+ },
+ {
+ "epoch": 0.32809227595261165,
+ "grad_norm": 0.643267810344696,
+ "learning_rate": 0.00019414723115925456,
+ "loss": 1.4903,
+ "step": 328
+ },
+ {
+ "epoch": 0.3290925572817355,
+ "grad_norm": 0.6522070169448853,
+ "learning_rate": 0.0001941118440888061,
+ "loss": 1.5907,
+ "step": 329
+ },
+ {
+ "epoch": 0.3300928386108593,
+ "grad_norm": 0.6496105790138245,
+ "learning_rate": 0.0001940763536064611,
+ "loss": 1.4225,
+ "step": 330
+ },
+ {
+ "epoch": 0.33109311993998314,
+ "grad_norm": 0.6011468768119812,
+ "learning_rate": 0.00019404075975121716,
+ "loss": 1.5022,
+ "step": 331
+ },
+ {
+ "epoch": 0.3320934012691069,
+ "grad_norm": 0.6327878832817078,
+ "learning_rate": 0.0001940050625621855,
+ "loss": 1.468,
+ "step": 332
+ },
+ {
+ "epoch": 0.33309368259823074,
+ "grad_norm": 0.6187490820884705,
+ "learning_rate": 0.00019396926207859084,
+ "loss": 1.5183,
+ "step": 333
+ },
+ {
+ "epoch": 0.3340939639273546,
+ "grad_norm": 0.7625093460083008,
+ "learning_rate": 0.0001939333583397715,
+ "loss": 1.4813,
+ "step": 334
+ },
+ {
+ "epoch": 0.3350942452564784,
+ "grad_norm": 0.5286359190940857,
+ "learning_rate": 0.00019389735138517915,
+ "loss": 1.3674,
+ "step": 335
+ },
+ {
+ "epoch": 0.3360945265856022,
+ "grad_norm": 0.5798503160476685,
+ "learning_rate": 0.00019386124125437895,
+ "loss": 1.3016,
+ "step": 336
+ },
+ {
+ "epoch": 0.337094807914726,
+ "grad_norm": 0.48794126510620117,
+ "learning_rate": 0.00019382502798704935,
+ "loss": 1.3642,
+ "step": 337
+ },
+ {
+ "epoch": 0.33809508924384984,
+ "grad_norm": 0.7394312620162964,
+ "learning_rate": 0.00019378871162298227,
+ "loss": 1.327,
+ "step": 338
+ },
+ {
+ "epoch": 0.3390953705729737,
+ "grad_norm": 0.5598319172859192,
+ "learning_rate": 0.00019375229220208276,
+ "loss": 1.4247,
+ "step": 339
+ },
+ {
+ "epoch": 0.34009565190209745,
+ "grad_norm": 0.6099628806114197,
+ "learning_rate": 0.00019371576976436917,
+ "loss": 1.4906,
+ "step": 340
+ },
+ {
+ "epoch": 0.3410959332312213,
+ "grad_norm": 0.6749781370162964,
+ "learning_rate": 0.00019367914434997312,
+ "loss": 1.367,
+ "step": 341
+ },
+ {
+ "epoch": 0.3420962145603451,
+ "grad_norm": 0.7721238136291504,
+ "learning_rate": 0.00019364241599913924,
+ "loss": 1.4464,
+ "step": 342
+ },
+ {
+ "epoch": 0.3430964958894689,
+ "grad_norm": 0.5762369632720947,
+ "learning_rate": 0.0001936055847522254,
+ "loss": 1.409,
+ "step": 343
+ },
+ {
+ "epoch": 0.3440967772185927,
+ "grad_norm": 0.6960498690605164,
+ "learning_rate": 0.00019356865064970244,
+ "loss": 1.3907,
+ "step": 344
+ },
+ {
+ "epoch": 0.34509705854771655,
+ "grad_norm": 0.5805984735488892,
+ "learning_rate": 0.0001935316137321543,
+ "loss": 1.4539,
+ "step": 345
+ },
+ {
+ "epoch": 0.3460973398768404,
+ "grad_norm": 0.5686045289039612,
+ "learning_rate": 0.00019349447404027782,
+ "loss": 1.4493,
+ "step": 346
+ },
+ {
+ "epoch": 0.34709762120596416,
+ "grad_norm": 0.5448501706123352,
+ "learning_rate": 0.00019345723161488283,
+ "loss": 1.5633,
+ "step": 347
+ },
+ {
+ "epoch": 0.348097902535088,
+ "grad_norm": 0.6388784050941467,
+ "learning_rate": 0.000193419886496892,
+ "loss": 1.7179,
+ "step": 348
+ },
+ {
+ "epoch": 0.3490981838642118,
+ "grad_norm": 0.5240457653999329,
+ "learning_rate": 0.00019338243872734086,
+ "loss": 1.4411,
+ "step": 349
+ },
+ {
+ "epoch": 0.35009846519333565,
+ "grad_norm": 0.5460641384124756,
+ "learning_rate": 0.00019334488834737775,
+ "loss": 1.361,
+ "step": 350
+ },
+ {
+ "epoch": 0.3510987465224594,
+ "grad_norm": 0.5495695471763611,
+ "learning_rate": 0.00019330723539826375,
+ "loss": 1.5891,
+ "step": 351
+ },
+ {
+ "epoch": 0.35209902785158326,
+ "grad_norm": 0.5618153214454651,
+ "learning_rate": 0.00019326947992137262,
+ "loss": 1.3084,
+ "step": 352
+ },
+ {
+ "epoch": 0.3530993091807071,
+ "grad_norm": 0.5603707432746887,
+ "learning_rate": 0.00019323162195819082,
+ "loss": 1.5732,
+ "step": 353
+ },
+ {
+ "epoch": 0.35409959050983086,
+ "grad_norm": 0.5732563138008118,
+ "learning_rate": 0.0001931936615503174,
+ "loss": 1.5045,
+ "step": 354
+ },
+ {
+ "epoch": 0.3550998718389547,
+ "grad_norm": 0.5997583866119385,
+ "learning_rate": 0.000193155598739464,
+ "loss": 1.4175,
+ "step": 355
+ },
+ {
+ "epoch": 0.3561001531680785,
+ "grad_norm": 0.5769765377044678,
+ "learning_rate": 0.0001931174335674547,
+ "loss": 1.4834,
+ "step": 356
+ },
+ {
+ "epoch": 0.35710043449720236,
+ "grad_norm": 0.5902683138847351,
+ "learning_rate": 0.0001930791660762262,
+ "loss": 1.4664,
+ "step": 357
+ },
+ {
+ "epoch": 0.35810071582632613,
+ "grad_norm": 0.6354758143424988,
+ "learning_rate": 0.00019304079630782752,
+ "loss": 1.3891,
+ "step": 358
+ },
+ {
+ "epoch": 0.35910099715544996,
+ "grad_norm": 0.6018317341804504,
+ "learning_rate": 0.0001930023243044201,
+ "loss": 1.4514,
+ "step": 359
+ },
+ {
+ "epoch": 0.3601012784845738,
+ "grad_norm": 0.5409123301506042,
+ "learning_rate": 0.00019296375010827773,
+ "loss": 1.4708,
+ "step": 360
+ },
+ {
+ "epoch": 0.3611015598136976,
+ "grad_norm": 0.5457523465156555,
+ "learning_rate": 0.00019292507376178643,
+ "loss": 1.4988,
+ "step": 361
+ },
+ {
+ "epoch": 0.3621018411428214,
+ "grad_norm": 0.626768946647644,
+ "learning_rate": 0.00019288629530744454,
+ "loss": 1.5722,
+ "step": 362
+ },
+ {
+ "epoch": 0.36310212247194523,
+ "grad_norm": 0.566554069519043,
+ "learning_rate": 0.0001928474147878626,
+ "loss": 1.2135,
+ "step": 363
+ },
+ {
+ "epoch": 0.36410240380106906,
+ "grad_norm": 0.7327786684036255,
+ "learning_rate": 0.0001928084322457632,
+ "loss": 1.5245,
+ "step": 364
+ },
+ {
+ "epoch": 0.3651026851301929,
+ "grad_norm": 0.5205698609352112,
+ "learning_rate": 0.00019276934772398114,
+ "loss": 1.2068,
+ "step": 365
+ },
+ {
+ "epoch": 0.36610296645931667,
+ "grad_norm": 1.0956753492355347,
+ "learning_rate": 0.00019273016126546323,
+ "loss": 1.5044,
+ "step": 366
+ },
+ {
+ "epoch": 0.3671032477884405,
+ "grad_norm": 0.6484043598175049,
+ "learning_rate": 0.00019269087291326833,
+ "loss": 1.6369,
+ "step": 367
+ },
+ {
+ "epoch": 0.36810352911756433,
+ "grad_norm": 0.6363429427146912,
+ "learning_rate": 0.00019265148271056722,
+ "loss": 1.4338,
+ "step": 368
+ },
+ {
+ "epoch": 0.3691038104466881,
+ "grad_norm": 0.6295244693756104,
+ "learning_rate": 0.0001926119907006426,
+ "loss": 1.4701,
+ "step": 369
+ },
+ {
+ "epoch": 0.37010409177581194,
+ "grad_norm": 0.6013259887695312,
+ "learning_rate": 0.00019257239692688907,
+ "loss": 1.7629,
+ "step": 370
+ },
+ {
+ "epoch": 0.37110437310493577,
+ "grad_norm": 0.6949493885040283,
+ "learning_rate": 0.00019253270143281296,
+ "loss": 1.6713,
+ "step": 371
+ },
+ {
+ "epoch": 0.3721046544340596,
+ "grad_norm": 0.6933801174163818,
+ "learning_rate": 0.00019249290426203252,
+ "loss": 1.6131,
+ "step": 372
+ },
+ {
+ "epoch": 0.3731049357631834,
+ "grad_norm": 0.5847527384757996,
+ "learning_rate": 0.0001924530054582776,
+ "loss": 1.3968,
+ "step": 373
+ },
+ {
+ "epoch": 0.3741052170923072,
+ "grad_norm": 0.6053057312965393,
+ "learning_rate": 0.0001924130050653898,
+ "loss": 1.3311,
+ "step": 374
+ },
+ {
+ "epoch": 0.37510549842143104,
+ "grad_norm": 0.5513793230056763,
+ "learning_rate": 0.00019237290312732226,
+ "loss": 1.5063,
+ "step": 375
+ },
+ {
+ "epoch": 0.37610577975055487,
+ "grad_norm": 0.5859197378158569,
+ "learning_rate": 0.00019233269968813984,
+ "loss": 1.3556,
+ "step": 376
+ },
+ {
+ "epoch": 0.37710606107967864,
+ "grad_norm": 0.5623495578765869,
+ "learning_rate": 0.00019229239479201876,
+ "loss": 1.3859,
+ "step": 377
+ },
+ {
+ "epoch": 0.3781063424088025,
+ "grad_norm": 0.602118968963623,
+ "learning_rate": 0.0001922519884832469,
+ "loss": 1.334,
+ "step": 378
+ },
+ {
+ "epoch": 0.3791066237379263,
+ "grad_norm": 0.5212380886077881,
+ "learning_rate": 0.0001922114808062234,
+ "loss": 1.401,
+ "step": 379
+ },
+ {
+ "epoch": 0.3801069050670501,
+ "grad_norm": 0.4969455599784851,
+ "learning_rate": 0.00019217087180545893,
+ "loss": 1.2292,
+ "step": 380
+ },
+ {
+ "epoch": 0.3811071863961739,
+ "grad_norm": 0.578629732131958,
+ "learning_rate": 0.0001921301615255754,
+ "loss": 1.5015,
+ "step": 381
+ },
+ {
+ "epoch": 0.38210746772529774,
+ "grad_norm": 0.593053936958313,
+ "learning_rate": 0.0001920893500113061,
+ "loss": 1.302,
+ "step": 382
+ },
+ {
+ "epoch": 0.3831077490544216,
+ "grad_norm": 0.5832563638687134,
+ "learning_rate": 0.00019204843730749547,
+ "loss": 1.3695,
+ "step": 383
+ },
+ {
+ "epoch": 0.38410803038354535,
+ "grad_norm": 0.5608510375022888,
+ "learning_rate": 0.00019200742345909915,
+ "loss": 1.3792,
+ "step": 384
+ },
+ {
+ "epoch": 0.3851083117126692,
+ "grad_norm": 0.5337334275245667,
+ "learning_rate": 0.00019196630851118398,
+ "loss": 1.4163,
+ "step": 385
+ },
+ {
+ "epoch": 0.386108593041793,
+ "grad_norm": 0.5460125803947449,
+ "learning_rate": 0.0001919250925089278,
+ "loss": 1.2439,
+ "step": 386
+ },
+ {
+ "epoch": 0.38710887437091684,
+ "grad_norm": 0.6217851638793945,
+ "learning_rate": 0.00019188377549761963,
+ "loss": 1.6428,
+ "step": 387
+ },
+ {
+ "epoch": 0.3881091557000406,
+ "grad_norm": 0.7154502868652344,
+ "learning_rate": 0.00019184235752265928,
+ "loss": 1.3468,
+ "step": 388
+ },
+ {
+ "epoch": 0.38910943702916445,
+ "grad_norm": 0.5044635534286499,
+ "learning_rate": 0.00019180083862955772,
+ "loss": 1.1877,
+ "step": 389
+ },
+ {
+ "epoch": 0.3901097183582883,
+ "grad_norm": 0.5755971074104309,
+ "learning_rate": 0.00019175921886393666,
+ "loss": 1.3475,
+ "step": 390
+ },
+ {
+ "epoch": 0.39110999968741206,
+ "grad_norm": 0.6121137738227844,
+ "learning_rate": 0.00019171749827152869,
+ "loss": 1.4342,
+ "step": 391
+ },
+ {
+ "epoch": 0.3921102810165359,
+ "grad_norm": 0.5615536570549011,
+ "learning_rate": 0.0001916756768981772,
+ "loss": 1.5471,
+ "step": 392
+ },
+ {
+ "epoch": 0.3931105623456597,
+ "grad_norm": 0.6527026295661926,
+ "learning_rate": 0.00019163375478983632,
+ "loss": 1.6363,
+ "step": 393
+ },
+ {
+ "epoch": 0.39411084367478355,
+ "grad_norm": 0.6465044617652893,
+ "learning_rate": 0.00019159173199257085,
+ "loss": 1.3823,
+ "step": 394
+ },
+ {
+ "epoch": 0.3951111250039073,
+ "grad_norm": 0.5620000958442688,
+ "learning_rate": 0.00019154960855255628,
+ "loss": 1.5418,
+ "step": 395
+ },
+ {
+ "epoch": 0.39611140633303116,
+ "grad_norm": 0.7090588808059692,
+ "learning_rate": 0.0001915073845160786,
+ "loss": 1.4593,
+ "step": 396
+ },
+ {
+ "epoch": 0.397111687662155,
+ "grad_norm": 0.6644489169120789,
+ "learning_rate": 0.00019146505992953446,
+ "loss": 1.4236,
+ "step": 397
+ },
+ {
+ "epoch": 0.3981119689912788,
+ "grad_norm": 0.6038135886192322,
+ "learning_rate": 0.00019142263483943085,
+ "loss": 1.1805,
+ "step": 398
+ },
+ {
+ "epoch": 0.3991122503204026,
+ "grad_norm": 0.6746726036071777,
+ "learning_rate": 0.00019138010929238534,
+ "loss": 1.5264,
+ "step": 399
+ },
+ {
+ "epoch": 0.4001125316495264,
+ "grad_norm": 0.5871374607086182,
+ "learning_rate": 0.00019133748333512575,
+ "loss": 1.3709,
+ "step": 400
+ },
+ {
+ "epoch": 0.40111281297865026,
+ "grad_norm": 0.5743412375450134,
+ "learning_rate": 0.00019129475701449035,
+ "loss": 1.4677,
+ "step": 401
+ },
+ {
+ "epoch": 0.4021130943077741,
+ "grad_norm": 0.6184396743774414,
+ "learning_rate": 0.0001912519303774276,
+ "loss": 1.4228,
+ "step": 402
+ },
+ {
+ "epoch": 0.40311337563689786,
+ "grad_norm": 0.5872434973716736,
+ "learning_rate": 0.0001912090034709963,
+ "loss": 1.3495,
+ "step": 403
+ },
+ {
+ "epoch": 0.4041136569660217,
+ "grad_norm": 0.6500155925750732,
+ "learning_rate": 0.00019116597634236525,
+ "loss": 1.4315,
+ "step": 404
+ },
+ {
+ "epoch": 0.4051139382951455,
+ "grad_norm": 0.5240740180015564,
+ "learning_rate": 0.0001911228490388136,
+ "loss": 1.4954,
+ "step": 405
+ },
+ {
+ "epoch": 0.4061142196242693,
+ "grad_norm": 0.5531806945800781,
+ "learning_rate": 0.00019107962160773035,
+ "loss": 1.3949,
+ "step": 406
+ },
+ {
+ "epoch": 0.40711450095339313,
+ "grad_norm": 0.5266262888908386,
+ "learning_rate": 0.0001910362940966147,
+ "loss": 1.2859,
+ "step": 407
+ },
+ {
+ "epoch": 0.40811478228251696,
+ "grad_norm": 0.5734869241714478,
+ "learning_rate": 0.00019099286655307568,
+ "loss": 1.2451,
+ "step": 408
+ },
+ {
+ "epoch": 0.4091150636116408,
+ "grad_norm": 0.5922874212265015,
+ "learning_rate": 0.0001909493390248324,
+ "loss": 1.5429,
+ "step": 409
+ },
+ {
+ "epoch": 0.41011534494076457,
+ "grad_norm": 0.542540431022644,
+ "learning_rate": 0.00019090571155971366,
+ "loss": 1.4138,
+ "step": 410
+ },
+ {
+ "epoch": 0.4111156262698884,
+ "grad_norm": 0.57356196641922,
+ "learning_rate": 0.00019086198420565823,
+ "loss": 1.2592,
+ "step": 411
+ },
+ {
+ "epoch": 0.41211590759901223,
+ "grad_norm": 0.6042733192443848,
+ "learning_rate": 0.00019081815701071445,
+ "loss": 1.5524,
+ "step": 412
+ },
+ {
+ "epoch": 0.41311618892813606,
+ "grad_norm": 0.46550241112709045,
+ "learning_rate": 0.0001907742300230406,
+ "loss": 1.308,
+ "step": 413
+ },
+ {
+ "epoch": 0.41411647025725984,
+ "grad_norm": 0.6283137798309326,
+ "learning_rate": 0.00019073020329090444,
+ "loss": 1.4753,
+ "step": 414
+ },
+ {
+ "epoch": 0.41511675158638367,
+ "grad_norm": 0.5254876613616943,
+ "learning_rate": 0.0001906860768626834,
+ "loss": 1.2157,
+ "step": 415
+ },
+ {
+ "epoch": 0.4161170329155075,
+ "grad_norm": 0.59089195728302,
+ "learning_rate": 0.00019064185078686443,
+ "loss": 1.2684,
+ "step": 416
+ },
+ {
+ "epoch": 0.4171173142446313,
+ "grad_norm": 0.7129126787185669,
+ "learning_rate": 0.000190597525112044,
+ "loss": 1.3974,
+ "step": 417
+ },
+ {
+ "epoch": 0.4181175955737551,
+ "grad_norm": 0.607305109500885,
+ "learning_rate": 0.000190553099886928,
+ "loss": 1.4312,
+ "step": 418
+ },
+ {
+ "epoch": 0.41911787690287894,
+ "grad_norm": 0.49921515583992004,
+ "learning_rate": 0.00019050857516033173,
+ "loss": 1.3469,
+ "step": 419
+ },
+ {
+ "epoch": 0.42011815823200277,
+ "grad_norm": 0.6167325377464294,
+ "learning_rate": 0.00019046395098117983,
+ "loss": 1.4723,
+ "step": 420
+ },
+ {
+ "epoch": 0.42111843956112655,
+ "grad_norm": 0.6144593358039856,
+ "learning_rate": 0.00019041922739850616,
+ "loss": 1.5502,
+ "step": 421
+ },
+ {
+ "epoch": 0.4221187208902504,
+ "grad_norm": 0.61333167552948,
+ "learning_rate": 0.00019037440446145385,
+ "loss": 1.3283,
+ "step": 422
+ },
+ {
+ "epoch": 0.4231190022193742,
+ "grad_norm": 0.5881702303886414,
+ "learning_rate": 0.00019032948221927524,
+ "loss": 1.4206,
+ "step": 423
+ },
+ {
+ "epoch": 0.42411928354849804,
+ "grad_norm": 0.5334322452545166,
+ "learning_rate": 0.00019028446072133175,
+ "loss": 1.4603,
+ "step": 424
+ },
+ {
+ "epoch": 0.4251195648776218,
+ "grad_norm": 0.5730605721473694,
+ "learning_rate": 0.00019023934001709383,
+ "loss": 1.4375,
+ "step": 425
+ },
+ {
+ "epoch": 0.42611984620674564,
+ "grad_norm": 0.6227820515632629,
+ "learning_rate": 0.00019019412015614098,
+ "loss": 1.4888,
+ "step": 426
+ },
+ {
+ "epoch": 0.4271201275358695,
+ "grad_norm": 0.5811313390731812,
+ "learning_rate": 0.00019014880118816164,
+ "loss": 1.3492,
+ "step": 427
+ },
+ {
+ "epoch": 0.4281204088649933,
+ "grad_norm": 0.5685800313949585,
+ "learning_rate": 0.0001901033831629532,
+ "loss": 1.5052,
+ "step": 428
+ },
+ {
+ "epoch": 0.4291206901941171,
+ "grad_norm": 0.5961394309997559,
+ "learning_rate": 0.00019005786613042185,
+ "loss": 1.3324,
+ "step": 429
+ },
+ {
+ "epoch": 0.4301209715232409,
+ "grad_norm": 0.5845314860343933,
+ "learning_rate": 0.00019001225014058255,
+ "loss": 1.5733,
+ "step": 430
+ },
+ {
+ "epoch": 0.43112125285236474,
+ "grad_norm": 0.5400176048278809,
+ "learning_rate": 0.00018996653524355902,
+ "loss": 1.3973,
+ "step": 431
+ },
+ {
+ "epoch": 0.4321215341814885,
+ "grad_norm": 0.5462201833724976,
+ "learning_rate": 0.00018992072148958368,
+ "loss": 1.2167,
+ "step": 432
+ },
+ {
+ "epoch": 0.43312181551061235,
+ "grad_norm": 0.6200360059738159,
+ "learning_rate": 0.00018987480892899758,
+ "loss": 1.5596,
+ "step": 433
+ },
+ {
+ "epoch": 0.4341220968397362,
+ "grad_norm": 0.5230718851089478,
+ "learning_rate": 0.00018982879761225027,
+ "loss": 1.3661,
+ "step": 434
+ },
+ {
+ "epoch": 0.43512237816886,
+ "grad_norm": 0.5868643522262573,
+ "learning_rate": 0.00018978268758989991,
+ "loss": 1.4792,
+ "step": 435
+ },
+ {
+ "epoch": 0.4361226594979838,
+ "grad_norm": 0.580892026424408,
+ "learning_rate": 0.00018973647891261307,
+ "loss": 1.3275,
+ "step": 436
+ },
+ {
+ "epoch": 0.4371229408271076,
+ "grad_norm": 0.5903263688087463,
+ "learning_rate": 0.00018969017163116472,
+ "loss": 1.4721,
+ "step": 437
+ },
+ {
+ "epoch": 0.43812322215623145,
+ "grad_norm": 0.5108968019485474,
+ "learning_rate": 0.0001896437657964382,
+ "loss": 1.3785,
+ "step": 438
+ },
+ {
+ "epoch": 0.4391235034853553,
+ "grad_norm": 0.6707500219345093,
+ "learning_rate": 0.00018959726145942508,
+ "loss": 1.5033,
+ "step": 439
+ },
+ {
+ "epoch": 0.44012378481447906,
+ "grad_norm": 0.5793184638023376,
+ "learning_rate": 0.00018955065867122528,
+ "loss": 1.3629,
+ "step": 440
+ },
+ {
+ "epoch": 0.4411240661436029,
+ "grad_norm": 0.5549041628837585,
+ "learning_rate": 0.00018950395748304678,
+ "loss": 1.5557,
+ "step": 441
+ },
+ {
+ "epoch": 0.4421243474727267,
+ "grad_norm": 0.5406919121742249,
+ "learning_rate": 0.0001894571579462058,
+ "loss": 1.4441,
+ "step": 442
+ },
+ {
+ "epoch": 0.4431246288018505,
+ "grad_norm": 0.5131089091300964,
+ "learning_rate": 0.00018941026011212654,
+ "loss": 1.3051,
+ "step": 443
+ },
+ {
+ "epoch": 0.4441249101309743,
+ "grad_norm": 0.601586639881134,
+ "learning_rate": 0.00018936326403234125,
+ "loss": 1.5297,
+ "step": 444
+ },
+ {
+ "epoch": 0.44512519146009816,
+ "grad_norm": 0.5036457180976868,
+ "learning_rate": 0.00018931616975849006,
+ "loss": 1.357,
+ "step": 445
+ },
+ {
+ "epoch": 0.446125472789222,
+ "grad_norm": 0.5471266508102417,
+ "learning_rate": 0.00018926897734232115,
+ "loss": 1.2176,
+ "step": 446
+ },
+ {
+ "epoch": 0.44712575411834576,
+ "grad_norm": 0.6057867407798767,
+ "learning_rate": 0.0001892216868356904,
+ "loss": 1.4763,
+ "step": 447
+ },
+ {
+ "epoch": 0.4481260354474696,
+ "grad_norm": 0.5384593605995178,
+ "learning_rate": 0.0001891742982905615,
+ "loss": 1.513,
+ "step": 448
+ },
+ {
+ "epoch": 0.4491263167765934,
+ "grad_norm": 0.6144880056381226,
+ "learning_rate": 0.00018912681175900598,
+ "loss": 1.5782,
+ "step": 449
+ },
+ {
+ "epoch": 0.45012659810571726,
+ "grad_norm": 0.4838174879550934,
+ "learning_rate": 0.00018907922729320285,
+ "loss": 1.4085,
+ "step": 450
+ },
+ {
+ "epoch": 0.45112687943484103,
+ "grad_norm": 0.6852928400039673,
+ "learning_rate": 0.00018903154494543889,
+ "loss": 1.5989,
+ "step": 451
+ },
+ {
+ "epoch": 0.45212716076396486,
+ "grad_norm": 0.47527411580085754,
+ "learning_rate": 0.00018898376476810834,
+ "loss": 1.3409,
+ "step": 452
+ },
+ {
+ "epoch": 0.4531274420930887,
+ "grad_norm": 0.5665884613990784,
+ "learning_rate": 0.00018893588681371303,
+ "loss": 1.5395,
+ "step": 453
+ },
+ {
+ "epoch": 0.45412772342221247,
+ "grad_norm": 0.5792158246040344,
+ "learning_rate": 0.00018888791113486213,
+ "loss": 1.516,
+ "step": 454
+ },
+ {
+ "epoch": 0.4551280047513363,
+ "grad_norm": 0.5223523378372192,
+ "learning_rate": 0.00018883983778427227,
+ "loss": 1.3678,
+ "step": 455
+ },
+ {
+ "epoch": 0.45612828608046013,
+ "grad_norm": 0.5927590131759644,
+ "learning_rate": 0.0001887916668147673,
+ "loss": 1.3617,
+ "step": 456
+ },
+ {
+ "epoch": 0.45712856740958396,
+ "grad_norm": 0.7266496419906616,
+ "learning_rate": 0.00018874339827927846,
+ "loss": 1.3734,
+ "step": 457
+ },
+ {
+ "epoch": 0.45812884873870774,
+ "grad_norm": 0.6495805978775024,
+ "learning_rate": 0.00018869503223084414,
+ "loss": 1.5282,
+ "step": 458
+ },
+ {
+ "epoch": 0.45912913006783157,
+ "grad_norm": 0.6099816560745239,
+ "learning_rate": 0.00018864656872260985,
+ "loss": 1.4691,
+ "step": 459
+ },
+ {
+ "epoch": 0.4601294113969554,
+ "grad_norm": 0.5208227038383484,
+ "learning_rate": 0.00018859800780782828,
+ "loss": 1.3949,
+ "step": 460
+ },
+ {
+ "epoch": 0.46112969272607923,
+ "grad_norm": 0.5526600480079651,
+ "learning_rate": 0.000188549349539859,
+ "loss": 1.3557,
+ "step": 461
+ },
+ {
+ "epoch": 0.462129974055203,
+ "grad_norm": 0.5537740588188171,
+ "learning_rate": 0.00018850059397216876,
+ "loss": 1.4703,
+ "step": 462
+ },
+ {
+ "epoch": 0.46313025538432684,
+ "grad_norm": 0.5553976893424988,
+ "learning_rate": 0.00018845174115833099,
+ "loss": 1.4356,
+ "step": 463
+ },
+ {
+ "epoch": 0.46413053671345067,
+ "grad_norm": 0.6027779579162598,
+ "learning_rate": 0.0001884027911520262,
+ "loss": 1.4763,
+ "step": 464
+ },
+ {
+ "epoch": 0.4651308180425745,
+ "grad_norm": 0.5559154748916626,
+ "learning_rate": 0.00018835374400704154,
+ "loss": 1.4148,
+ "step": 465
+ },
+ {
+ "epoch": 0.4661310993716983,
+ "grad_norm": 0.6124109029769897,
+ "learning_rate": 0.00018830459977727096,
+ "loss": 1.4468,
+ "step": 466
+ },
+ {
+ "epoch": 0.4671313807008221,
+ "grad_norm": 0.4762580692768097,
+ "learning_rate": 0.0001882553585167151,
+ "loss": 1.3714,
+ "step": 467
+ },
+ {
+ "epoch": 0.46813166202994594,
+ "grad_norm": 0.5793487429618835,
+ "learning_rate": 0.00018820602027948114,
+ "loss": 1.4828,
+ "step": 468
+ },
+ {
+ "epoch": 0.4691319433590697,
+ "grad_norm": 0.55177241563797,
+ "learning_rate": 0.00018815658511978298,
+ "loss": 1.4157,
+ "step": 469
+ },
+ {
+ "epoch": 0.47013222468819355,
+ "grad_norm": 0.5065292716026306,
+ "learning_rate": 0.00018810705309194083,
+ "loss": 1.4519,
+ "step": 470
+ },
+ {
+ "epoch": 0.4711325060173174,
+ "grad_norm": 0.5401413440704346,
+ "learning_rate": 0.00018805742425038145,
+ "loss": 1.4344,
+ "step": 471
+ },
+ {
+ "epoch": 0.4721327873464412,
+ "grad_norm": 0.7173880338668823,
+ "learning_rate": 0.00018800769864963802,
+ "loss": 1.7325,
+ "step": 472
+ },
+ {
+ "epoch": 0.473133068675565,
+ "grad_norm": 0.507682204246521,
+ "learning_rate": 0.00018795787634434994,
+ "loss": 1.37,
+ "step": 473
+ },
+ {
+ "epoch": 0.4741333500046888,
+ "grad_norm": 0.551888644695282,
+ "learning_rate": 0.0001879079573892629,
+ "loss": 1.3695,
+ "step": 474
+ },
+ {
+ "epoch": 0.47513363133381264,
+ "grad_norm": 0.5109260082244873,
+ "learning_rate": 0.00018785794183922883,
+ "loss": 1.4001,
+ "step": 475
+ },
+ {
+ "epoch": 0.4761339126629365,
+ "grad_norm": 0.4565551280975342,
+ "learning_rate": 0.00018780782974920572,
+ "loss": 1.1752,
+ "step": 476
+ },
+ {
+ "epoch": 0.47713419399206025,
+ "grad_norm": 0.5651509761810303,
+ "learning_rate": 0.00018775762117425777,
+ "loss": 1.4291,
+ "step": 477
+ },
+ {
+ "epoch": 0.4781344753211841,
+ "grad_norm": 0.5827792286872864,
+ "learning_rate": 0.0001877073161695551,
+ "loss": 1.3438,
+ "step": 478
+ },
+ {
+ "epoch": 0.4791347566503079,
+ "grad_norm": 0.5719752907752991,
+ "learning_rate": 0.00018765691479037376,
+ "loss": 1.4683,
+ "step": 479
+ },
+ {
+ "epoch": 0.4801350379794317,
+ "grad_norm": 0.5153111815452576,
+ "learning_rate": 0.00018760641709209583,
+ "loss": 1.4392,
+ "step": 480
+ },
+ {
+ "epoch": 0.4811353193085555,
+ "grad_norm": 0.5455904603004456,
+ "learning_rate": 0.0001875558231302091,
+ "loss": 1.1603,
+ "step": 481
+ },
+ {
+ "epoch": 0.48213560063767935,
+ "grad_norm": 0.5857074856758118,
+ "learning_rate": 0.00018750513296030718,
+ "loss": 1.3099,
+ "step": 482
+ },
+ {
+ "epoch": 0.4831358819668032,
+ "grad_norm": 0.6051676273345947,
+ "learning_rate": 0.00018745434663808942,
+ "loss": 1.3587,
+ "step": 483
+ },
+ {
+ "epoch": 0.48413616329592696,
+ "grad_norm": 0.588749885559082,
+ "learning_rate": 0.0001874034642193608,
+ "loss": 1.5277,
+ "step": 484
+ },
+ {
+ "epoch": 0.4851364446250508,
+ "grad_norm": 0.5295410752296448,
+ "learning_rate": 0.0001873524857600319,
+ "loss": 1.2084,
+ "step": 485
+ },
+ {
+ "epoch": 0.4861367259541746,
+ "grad_norm": 0.5313368439674377,
+ "learning_rate": 0.00018730141131611882,
+ "loss": 1.4002,
+ "step": 486
+ },
+ {
+ "epoch": 0.48713700728329845,
+ "grad_norm": 0.5166353583335876,
+ "learning_rate": 0.00018725024094374315,
+ "loss": 1.208,
+ "step": 487
+ },
+ {
+ "epoch": 0.4881372886124222,
+ "grad_norm": 0.5478363037109375,
+ "learning_rate": 0.00018719897469913184,
+ "loss": 1.3236,
+ "step": 488
+ },
+ {
+ "epoch": 0.48913756994154606,
+ "grad_norm": 0.5531913042068481,
+ "learning_rate": 0.00018714761263861728,
+ "loss": 1.4938,
+ "step": 489
+ },
+ {
+ "epoch": 0.4901378512706699,
+ "grad_norm": 0.5334530472755432,
+ "learning_rate": 0.000187096154818637,
+ "loss": 1.4172,
+ "step": 490
+ },
+ {
+ "epoch": 0.4911381325997937,
+ "grad_norm": 0.5667001605033875,
+ "learning_rate": 0.00018704460129573391,
+ "loss": 1.3517,
+ "step": 491
+ },
+ {
+ "epoch": 0.4921384139289175,
+ "grad_norm": 0.5568780303001404,
+ "learning_rate": 0.00018699295212655596,
+ "loss": 1.4287,
+ "step": 492
+ },
+ {
+ "epoch": 0.4931386952580413,
+ "grad_norm": 0.6663610935211182,
+ "learning_rate": 0.00018694120736785632,
+ "loss": 1.5416,
+ "step": 493
+ },
+ {
+ "epoch": 0.49413897658716516,
+ "grad_norm": 0.5753045082092285,
+ "learning_rate": 0.00018688936707649304,
+ "loss": 1.5552,
+ "step": 494
+ },
+ {
+ "epoch": 0.49513925791628893,
+ "grad_norm": 0.5707410573959351,
+ "learning_rate": 0.00018683743130942928,
+ "loss": 1.5332,
+ "step": 495
+ },
+ {
+ "epoch": 0.49613953924541276,
+ "grad_norm": 0.5847951173782349,
+ "learning_rate": 0.00018678540012373302,
+ "loss": 1.3488,
+ "step": 496
+ },
+ {
+ "epoch": 0.4971398205745366,
+ "grad_norm": 0.60503751039505,
+ "learning_rate": 0.00018673327357657715,
+ "loss": 1.3924,
+ "step": 497
+ },
+ {
+ "epoch": 0.4981401019036604,
+ "grad_norm": 0.635142982006073,
+ "learning_rate": 0.0001866810517252393,
+ "loss": 1.4392,
+ "step": 498
+ },
+ {
+ "epoch": 0.4991403832327842,
+ "grad_norm": 0.5536782741546631,
+ "learning_rate": 0.00018662873462710184,
+ "loss": 1.286,
+ "step": 499
+ },
+ {
+ "epoch": 0.5001406645619081,
+ "grad_norm": 0.5676659345626831,
+ "learning_rate": 0.0001865763223396518,
+ "loss": 1.3006,
+ "step": 500
+ }
+ ],
+ "logging_steps": 1,
+ "max_steps": 2997,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 3,
+ "save_steps": 500,
+ "stateful_callbacks": {
+ "TrainerControl": {
+ "args": {
+ "should_epoch_stop": false,
+ "should_evaluate": false,
+ "should_log": false,
+ "should_save": true,
+ "should_training_stop": false
+ },
+ "attributes": {}
+ }
+ },
+ "total_flos": 3.234921192731443e+16,
+ "train_batch_size": 2,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/checkpoint-500/training_args.bin b/checkpoint-500/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..2f5db54c03213459099040d14f85829b6aeb0666
--- /dev/null
+++ b/checkpoint-500/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7cb43d78443117126e44061cb7a0c1f9a5c40f27f7bf1d5cd0232587a4334407
+size 5304
diff --git a/runs/Jul25_08-21-52_65c2ef5bfb68/events.out.tfevents.1721896565.65c2ef5bfb68.573.0 b/runs/Jul25_08-21-52_65c2ef5bfb68/events.out.tfevents.1721896565.65c2ef5bfb68.573.0
new file mode 100644
index 0000000000000000000000000000000000000000..ee9d001796ee47bc0656800aabb3776ad09c2fa7
--- /dev/null
+++ b/runs/Jul25_08-21-52_65c2ef5bfb68/events.out.tfevents.1721896565.65c2ef5bfb68.573.0
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1d3d25684ffc5e057f26c07c332a4465d6cba8de1696661d481edd2ed3d864ee
+size 637462