|
[INFO|configuration_utils.py:672] 2024-10-17 07:49:30,560 >> loading configuration file config.json from cache at /home/.cache/huggingface/hub/models--unsloth--gemma-2-2b-it/snapshots/457f2e15bf550c227ce6ad86e2ec108d3e42c106/config.json |
|
|
|
[INFO|configuration_utils.py:739] 2024-10-17 07:49:30,563 >> Model config Gemma2Config { |
|
"_name_or_path": "unsloth/gemma-2-2b-it", |
|
"architectures": [ |
|
"Gemma2ForCausalLM" |
|
], |
|
"attention_bias": false, |
|
"attention_dropout": 0.0, |
|
"attn_logit_softcapping": 50.0, |
|
"bos_token_id": 2, |
|
"cache_implementation": "hybrid", |
|
"eos_token_id": [ |
|
1, |
|
107 |
|
], |
|
"final_logit_softcapping": 30.0, |
|
"head_dim": 256, |
|
"hidden_act": "gelu_pytorch_tanh", |
|
"hidden_activation": "gelu_pytorch_tanh", |
|
"hidden_size": 2304, |
|
"initializer_range": 0.02, |
|
"intermediate_size": 9216, |
|
"max_position_embeddings": 8192, |
|
"model_type": "gemma2", |
|
"num_attention_heads": 8, |
|
"num_hidden_layers": 26, |
|
"num_key_value_heads": 4, |
|
"pad_token_id": 0, |
|
"query_pre_attn_scalar": 256, |
|
"rms_norm_eps": 1e-06, |
|
"rope_theta": 10000.0, |
|
"sliding_window": 4096, |
|
"torch_dtype": "bfloat16", |
|
"transformers_version": "4.45.0", |
|
"unsloth_version": "2024.9", |
|
"use_cache": true, |
|
"vocab_size": 256000 |
|
} |
|
|
|
|
|
[INFO|tokenization_utils_base.py:2214] 2024-10-17 07:49:30,971 >> loading file tokenizer.model from cache at /home/.cache/huggingface/hub/models--unsloth--gemma-2-2b-it/snapshots/457f2e15bf550c227ce6ad86e2ec108d3e42c106/tokenizer.model |
|
|
|
[INFO|tokenization_utils_base.py:2214] 2024-10-17 07:49:30,971 >> loading file tokenizer.json from cache at /home/.cache/huggingface/hub/models--unsloth--gemma-2-2b-it/snapshots/457f2e15bf550c227ce6ad86e2ec108d3e42c106/tokenizer.json |
|
|
|
[INFO|tokenization_utils_base.py:2214] 2024-10-17 07:49:30,971 >> loading file added_tokens.json from cache at None |
|
|
|
[INFO|tokenization_utils_base.py:2214] 2024-10-17 07:49:30,971 >> loading file special_tokens_map.json from cache at /home/.cache/huggingface/hub/models--unsloth--gemma-2-2b-it/snapshots/457f2e15bf550c227ce6ad86e2ec108d3e42c106/special_tokens_map.json |
|
|
|
[INFO|tokenization_utils_base.py:2214] 2024-10-17 07:49:30,971 >> loading file tokenizer_config.json from cache at /home/.cache/huggingface/hub/models--unsloth--gemma-2-2b-it/snapshots/457f2e15bf550c227ce6ad86e2ec108d3e42c106/tokenizer_config.json |
|
|
|
[INFO|configuration_utils.py:672] 2024-10-17 07:49:33,042 >> loading configuration file config.json from cache at /home/.cache/huggingface/hub/models--unsloth--gemma-2-2b-it/snapshots/457f2e15bf550c227ce6ad86e2ec108d3e42c106/config.json |
|
|
|
[INFO|configuration_utils.py:739] 2024-10-17 07:49:33,043 >> Model config Gemma2Config { |
|
"_name_or_path": "unsloth/gemma-2-2b-it", |
|
"architectures": [ |
|
"Gemma2ForCausalLM" |
|
], |
|
"attention_bias": false, |
|
"attention_dropout": 0.0, |
|
"attn_logit_softcapping": 50.0, |
|
"bos_token_id": 2, |
|
"cache_implementation": "hybrid", |
|
"eos_token_id": [ |
|
1, |
|
107 |
|
], |
|
"final_logit_softcapping": 30.0, |
|
"head_dim": 256, |
|
"hidden_act": "gelu_pytorch_tanh", |
|
"hidden_activation": "gelu_pytorch_tanh", |
|
"hidden_size": 2304, |
|
"initializer_range": 0.02, |
|
"intermediate_size": 9216, |
|
"max_position_embeddings": 8192, |
|
"model_type": "gemma2", |
|
"num_attention_heads": 8, |
|
"num_hidden_layers": 26, |
|
"num_key_value_heads": 4, |
|
"pad_token_id": 0, |
|
"query_pre_attn_scalar": 256, |
|
"rms_norm_eps": 1e-06, |
|
"rope_theta": 10000.0, |
|
"sliding_window": 4096, |
|
"torch_dtype": "bfloat16", |
|
"transformers_version": "4.45.0", |
|
"unsloth_version": "2024.9", |
|
"use_cache": true, |
|
"vocab_size": 256000 |
|
} |
|
|
|
|
|
[INFO|tokenization_utils_base.py:2214] 2024-10-17 07:49:33,316 >> loading file tokenizer.model from cache at /home/.cache/huggingface/hub/models--unsloth--gemma-2-2b-it/snapshots/457f2e15bf550c227ce6ad86e2ec108d3e42c106/tokenizer.model |
|
|
|
[INFO|tokenization_utils_base.py:2214] 2024-10-17 07:49:33,316 >> loading file tokenizer.json from cache at /home/.cache/huggingface/hub/models--unsloth--gemma-2-2b-it/snapshots/457f2e15bf550c227ce6ad86e2ec108d3e42c106/tokenizer.json |
|
|
|
[INFO|tokenization_utils_base.py:2214] 2024-10-17 07:49:33,317 >> loading file added_tokens.json from cache at None |
|
|
|
[INFO|tokenization_utils_base.py:2214] 2024-10-17 07:49:33,317 >> loading file special_tokens_map.json from cache at /home/.cache/huggingface/hub/models--unsloth--gemma-2-2b-it/snapshots/457f2e15bf550c227ce6ad86e2ec108d3e42c106/special_tokens_map.json |
|
|
|
[INFO|tokenization_utils_base.py:2214] 2024-10-17 07:49:33,317 >> loading file tokenizer_config.json from cache at /home/.cache/huggingface/hub/models--unsloth--gemma-2-2b-it/snapshots/457f2e15bf550c227ce6ad86e2ec108d3e42c106/tokenizer_config.json |
|
|
|
[INFO|configuration_utils.py:672] 2024-10-17 07:49:43,349 >> loading configuration file config.json from cache at /home/.cache/huggingface/hub/models--unsloth--gemma-2-2b-it/snapshots/457f2e15bf550c227ce6ad86e2ec108d3e42c106/config.json |
|
|
|
[INFO|configuration_utils.py:739] 2024-10-17 07:49:43,350 >> Model config Gemma2Config { |
|
"_name_or_path": "unsloth/gemma-2-2b-it", |
|
"architectures": [ |
|
"Gemma2ForCausalLM" |
|
], |
|
"attention_bias": false, |
|
"attention_dropout": 0.0, |
|
"attn_logit_softcapping": 50.0, |
|
"bos_token_id": 2, |
|
"cache_implementation": "hybrid", |
|
"eos_token_id": [ |
|
1, |
|
107 |
|
], |
|
"final_logit_softcapping": 30.0, |
|
"head_dim": 256, |
|
"hidden_act": "gelu_pytorch_tanh", |
|
"hidden_activation": "gelu_pytorch_tanh", |
|
"hidden_size": 2304, |
|
"initializer_range": 0.02, |
|
"intermediate_size": 9216, |
|
"max_position_embeddings": 8192, |
|
"model_type": "gemma2", |
|
"num_attention_heads": 8, |
|
"num_hidden_layers": 26, |
|
"num_key_value_heads": 4, |
|
"pad_token_id": 0, |
|
"query_pre_attn_scalar": 256, |
|
"rms_norm_eps": 1e-06, |
|
"rope_theta": 10000.0, |
|
"sliding_window": 4096, |
|
"torch_dtype": "bfloat16", |
|
"transformers_version": "4.45.0", |
|
"unsloth_version": "2024.9", |
|
"use_cache": true, |
|
"vocab_size": 256000 |
|
} |
|
|
|
|
|
[INFO|modeling_utils.py:3726] 2024-10-17 07:49:43,408 >> loading weights file model.safetensors from cache at /home/.cache/huggingface/hub/models--unsloth--gemma-2-2b-it/snapshots/457f2e15bf550c227ce6ad86e2ec108d3e42c106/model.safetensors |
|
|
|
[INFO|modeling_utils.py:1622] 2024-10-17 07:49:52,501 >> Instantiating Gemma2ForCausalLM model under default dtype torch.bfloat16. |
|
|
|
[INFO|configuration_utils.py:1099] 2024-10-17 07:49:52,505 >> Generate config GenerationConfig { |
|
"bos_token_id": 2, |
|
"cache_implementation": "hybrid", |
|
"eos_token_id": [ |
|
1, |
|
107 |
|
], |
|
"pad_token_id": 0 |
|
} |
|
|
|
|
|
[INFO|modeling_utils.py:4568] 2024-10-17 07:52:51,954 >> All model checkpoint weights were used when initializing Gemma2ForCausalLM. |
|
|
|
|
|
[INFO|modeling_utils.py:4576] 2024-10-17 07:52:51,954 >> All the weights of Gemma2ForCausalLM were initialized from the model checkpoint at unsloth/gemma-2-2b-it. |
|
If your task is similar to the task the model of the checkpoint was trained on, you can already use Gemma2ForCausalLM for predictions without further training. |
|
|
|
[INFO|configuration_utils.py:1054] 2024-10-17 07:52:52,225 >> loading configuration file generation_config.json from cache at /home/.cache/huggingface/hub/models--unsloth--gemma-2-2b-it/snapshots/457f2e15bf550c227ce6ad86e2ec108d3e42c106/generation_config.json |
|
|
|
[INFO|configuration_utils.py:1099] 2024-10-17 07:52:52,225 >> Generate config GenerationConfig { |
|
"bos_token_id": 2, |
|
"cache_implementation": "hybrid", |
|
"eos_token_id": [ |
|
1, |
|
107 |
|
], |
|
"max_length": 8192, |
|
"pad_token_id": 0 |
|
} |
|
|
|
|
|
[INFO|trainer.py:667] 2024-10-17 07:52:52,594 >> Using auto half precision backend |
|
|
|
[INFO|trainer.py:2243] 2024-10-17 07:52:53,383 >> ***** Running training ***** |
|
|
|
[INFO|trainer.py:2244] 2024-10-17 07:52:53,383 >> Num examples = 4,244 |
|
|
|
[INFO|trainer.py:2245] 2024-10-17 07:52:53,383 >> Num Epochs = 6 |
|
|
|
[INFO|trainer.py:2246] 2024-10-17 07:52:53,383 >> Instantaneous batch size per device = 2 |
|
|
|
[INFO|trainer.py:2249] 2024-10-17 07:52:53,383 >> Total train batch size (w. parallel, distributed & accumulation) = 32 |
|
|
|
[INFO|trainer.py:2250] 2024-10-17 07:52:53,383 >> Gradient Accumulation steps = 8 |
|
|
|
[INFO|trainer.py:2251] 2024-10-17 07:52:53,383 >> Total optimization steps = 792 |
|
|
|
[INFO|trainer.py:2252] 2024-10-17 07:52:53,386 >> Number of trainable parameters = 10,383,360 |
|
|
|
[INFO|trainer.py:3705] 2024-10-17 07:58:27,005 >> Saving model checkpoint to saves/Gemma-2B/lora/4k_train_2024-10-17-07-48-45/checkpoint-100 |
|
|
|
[INFO|configuration_utils.py:672] 2024-10-17 07:58:27,602 >> loading configuration file config.json from cache at /home/.cache/huggingface/hub/models--unsloth--gemma-2-2b-it/snapshots/457f2e15bf550c227ce6ad86e2ec108d3e42c106/config.json |
|
|
|
[INFO|configuration_utils.py:739] 2024-10-17 07:58:27,603 >> Model config Gemma2Config { |
|
"_name_or_path": "unsloth/gemma-2-2b-it", |
|
"architectures": [ |
|
"Gemma2ForCausalLM" |
|
], |
|
"attention_bias": false, |
|
"attention_dropout": 0.0, |
|
"attn_logit_softcapping": 50.0, |
|
"bos_token_id": 2, |
|
"cache_implementation": "hybrid", |
|
"eos_token_id": [ |
|
1, |
|
107 |
|
], |
|
"final_logit_softcapping": 30.0, |
|
"head_dim": 256, |
|
"hidden_act": "gelu_pytorch_tanh", |
|
"hidden_activation": "gelu_pytorch_tanh", |
|
"hidden_size": 2304, |
|
"initializer_range": 0.02, |
|
"intermediate_size": 9216, |
|
"max_position_embeddings": 8192, |
|
"model_type": "gemma2", |
|
"num_attention_heads": 8, |
|
"num_hidden_layers": 26, |
|
"num_key_value_heads": 4, |
|
"pad_token_id": 0, |
|
"query_pre_attn_scalar": 256, |
|
"rms_norm_eps": 1e-06, |
|
"rope_theta": 10000.0, |
|
"sliding_window": 4096, |
|
"torch_dtype": "bfloat16", |
|
"transformers_version": "4.45.0", |
|
"unsloth_version": "2024.9", |
|
"use_cache": true, |
|
"vocab_size": 256000 |
|
} |
|
|
|
|
|
[INFO|tokenization_utils_base.py:2649] 2024-10-17 07:58:27,704 >> tokenizer config file saved in saves/Gemma-2B/lora/4k_train_2024-10-17-07-48-45/checkpoint-100/tokenizer_config.json |
|
|
|
[INFO|tokenization_utils_base.py:2658] 2024-10-17 07:58:27,704 >> Special tokens file saved in saves/Gemma-2B/lora/4k_train_2024-10-17-07-48-45/checkpoint-100/special_tokens_map.json |
|
|
|
[INFO|trainer.py:3705] 2024-10-17 08:03:56,426 >> Saving model checkpoint to saves/Gemma-2B/lora/4k_train_2024-10-17-07-48-45/checkpoint-200 |
|
|
|
[INFO|configuration_utils.py:672] 2024-10-17 08:04:00,522 >> loading configuration file config.json from cache at /home/.cache/huggingface/hub/models--unsloth--gemma-2-2b-it/snapshots/457f2e15bf550c227ce6ad86e2ec108d3e42c106/config.json |
|
|
|
[INFO|configuration_utils.py:739] 2024-10-17 08:04:00,523 >> Model config Gemma2Config { |
|
"_name_or_path": "unsloth/gemma-2-2b-it", |
|
"architectures": [ |
|
"Gemma2ForCausalLM" |
|
], |
|
"attention_bias": false, |
|
"attention_dropout": 0.0, |
|
"attn_logit_softcapping": 50.0, |
|
"bos_token_id": 2, |
|
"cache_implementation": "hybrid", |
|
"eos_token_id": [ |
|
1, |
|
107 |
|
], |
|
"final_logit_softcapping": 30.0, |
|
"head_dim": 256, |
|
"hidden_act": "gelu_pytorch_tanh", |
|
"hidden_activation": "gelu_pytorch_tanh", |
|
"hidden_size": 2304, |
|
"initializer_range": 0.02, |
|
"intermediate_size": 9216, |
|
"max_position_embeddings": 8192, |
|
"model_type": "gemma2", |
|
"num_attention_heads": 8, |
|
"num_hidden_layers": 26, |
|
"num_key_value_heads": 4, |
|
"pad_token_id": 0, |
|
"query_pre_attn_scalar": 256, |
|
"rms_norm_eps": 1e-06, |
|
"rope_theta": 10000.0, |
|
"sliding_window": 4096, |
|
"torch_dtype": "bfloat16", |
|
"transformers_version": "4.45.0", |
|
"unsloth_version": "2024.9", |
|
"use_cache": true, |
|
"vocab_size": 256000 |
|
} |
|
|
|
|
|
[INFO|tokenization_utils_base.py:2649] 2024-10-17 08:04:00,623 >> tokenizer config file saved in saves/Gemma-2B/lora/4k_train_2024-10-17-07-48-45/checkpoint-200/tokenizer_config.json |
|
|
|
[INFO|tokenization_utils_base.py:2658] 2024-10-17 08:04:00,623 >> Special tokens file saved in saves/Gemma-2B/lora/4k_train_2024-10-17-07-48-45/checkpoint-200/special_tokens_map.json |
|
|
|
[INFO|trainer.py:3705] 2024-10-17 08:09:34,776 >> Saving model checkpoint to saves/Gemma-2B/lora/4k_train_2024-10-17-07-48-45/checkpoint-300 |
|
|
|
[INFO|configuration_utils.py:672] 2024-10-17 08:09:35,375 >> loading configuration file config.json from cache at /home/.cache/huggingface/hub/models--unsloth--gemma-2-2b-it/snapshots/457f2e15bf550c227ce6ad86e2ec108d3e42c106/config.json |
|
|
|
[INFO|configuration_utils.py:739] 2024-10-17 08:09:35,376 >> Model config Gemma2Config { |
|
"_name_or_path": "unsloth/gemma-2-2b-it", |
|
"architectures": [ |
|
"Gemma2ForCausalLM" |
|
], |
|
"attention_bias": false, |
|
"attention_dropout": 0.0, |
|
"attn_logit_softcapping": 50.0, |
|
"bos_token_id": 2, |
|
"cache_implementation": "hybrid", |
|
"eos_token_id": [ |
|
1, |
|
107 |
|
], |
|
"final_logit_softcapping": 30.0, |
|
"head_dim": 256, |
|
"hidden_act": "gelu_pytorch_tanh", |
|
"hidden_activation": "gelu_pytorch_tanh", |
|
"hidden_size": 2304, |
|
"initializer_range": 0.02, |
|
"intermediate_size": 9216, |
|
"max_position_embeddings": 8192, |
|
"model_type": "gemma2", |
|
"num_attention_heads": 8, |
|
"num_hidden_layers": 26, |
|
"num_key_value_heads": 4, |
|
"pad_token_id": 0, |
|
"query_pre_attn_scalar": 256, |
|
"rms_norm_eps": 1e-06, |
|
"rope_theta": 10000.0, |
|
"sliding_window": 4096, |
|
"torch_dtype": "bfloat16", |
|
"transformers_version": "4.45.0", |
|
"unsloth_version": "2024.9", |
|
"use_cache": true, |
|
"vocab_size": 256000 |
|
} |
|
|
|
|
|
[INFO|tokenization_utils_base.py:2649] 2024-10-17 08:09:35,447 >> tokenizer config file saved in saves/Gemma-2B/lora/4k_train_2024-10-17-07-48-45/checkpoint-300/tokenizer_config.json |
|
|
|
[INFO|tokenization_utils_base.py:2658] 2024-10-17 08:09:35,448 >> Special tokens file saved in saves/Gemma-2B/lora/4k_train_2024-10-17-07-48-45/checkpoint-300/special_tokens_map.json |
|
|
|
[INFO|trainer.py:3705] 2024-10-17 08:15:13,571 >> Saving model checkpoint to saves/Gemma-2B/lora/4k_train_2024-10-17-07-48-45/checkpoint-400 |
|
|
|
[INFO|configuration_utils.py:672] 2024-10-17 08:15:15,143 >> loading configuration file config.json from cache at /home/.cache/huggingface/hub/models--unsloth--gemma-2-2b-it/snapshots/457f2e15bf550c227ce6ad86e2ec108d3e42c106/config.json |
|
|
|
[INFO|configuration_utils.py:739] 2024-10-17 08:15:15,145 >> Model config Gemma2Config { |
|
"_name_or_path": "unsloth/gemma-2-2b-it", |
|
"architectures": [ |
|
"Gemma2ForCausalLM" |
|
], |
|
"attention_bias": false, |
|
"attention_dropout": 0.0, |
|
"attn_logit_softcapping": 50.0, |
|
"bos_token_id": 2, |
|
"cache_implementation": "hybrid", |
|
"eos_token_id": [ |
|
1, |
|
107 |
|
], |
|
"final_logit_softcapping": 30.0, |
|
"head_dim": 256, |
|
"hidden_act": "gelu_pytorch_tanh", |
|
"hidden_activation": "gelu_pytorch_tanh", |
|
"hidden_size": 2304, |
|
"initializer_range": 0.02, |
|
"intermediate_size": 9216, |
|
"max_position_embeddings": 8192, |
|
"model_type": "gemma2", |
|
"num_attention_heads": 8, |
|
"num_hidden_layers": 26, |
|
"num_key_value_heads": 4, |
|
"pad_token_id": 0, |
|
"query_pre_attn_scalar": 256, |
|
"rms_norm_eps": 1e-06, |
|
"rope_theta": 10000.0, |
|
"sliding_window": 4096, |
|
"torch_dtype": "bfloat16", |
|
"transformers_version": "4.45.0", |
|
"unsloth_version": "2024.9", |
|
"use_cache": true, |
|
"vocab_size": 256000 |
|
} |
|
|
|
|
|
[INFO|tokenization_utils_base.py:2649] 2024-10-17 08:15:15,245 >> tokenizer config file saved in saves/Gemma-2B/lora/4k_train_2024-10-17-07-48-45/checkpoint-400/tokenizer_config.json |
|
|
|
[INFO|tokenization_utils_base.py:2658] 2024-10-17 08:15:15,245 >> Special tokens file saved in saves/Gemma-2B/lora/4k_train_2024-10-17-07-48-45/checkpoint-400/special_tokens_map.json |
|
|
|
[INFO|trainer.py:3705] 2024-10-17 08:20:53,112 >> Saving model checkpoint to saves/Gemma-2B/lora/4k_train_2024-10-17-07-48-45/checkpoint-500 |
|
|
|
[INFO|configuration_utils.py:672] 2024-10-17 08:20:53,739 >> loading configuration file config.json from cache at /home/.cache/huggingface/hub/models--unsloth--gemma-2-2b-it/snapshots/457f2e15bf550c227ce6ad86e2ec108d3e42c106/config.json |
|
|
|
[INFO|configuration_utils.py:739] 2024-10-17 08:20:53,741 >> Model config Gemma2Config { |
|
"_name_or_path": "unsloth/gemma-2-2b-it", |
|
"architectures": [ |
|
"Gemma2ForCausalLM" |
|
], |
|
"attention_bias": false, |
|
"attention_dropout": 0.0, |
|
"attn_logit_softcapping": 50.0, |
|
"bos_token_id": 2, |
|
"cache_implementation": "hybrid", |
|
"eos_token_id": [ |
|
1, |
|
107 |
|
], |
|
"final_logit_softcapping": 30.0, |
|
"head_dim": 256, |
|
"hidden_act": "gelu_pytorch_tanh", |
|
"hidden_activation": "gelu_pytorch_tanh", |
|
"hidden_size": 2304, |
|
"initializer_range": 0.02, |
|
"intermediate_size": 9216, |
|
"max_position_embeddings": 8192, |
|
"model_type": "gemma2", |
|
"num_attention_heads": 8, |
|
"num_hidden_layers": 26, |
|
"num_key_value_heads": 4, |
|
"pad_token_id": 0, |
|
"query_pre_attn_scalar": 256, |
|
"rms_norm_eps": 1e-06, |
|
"rope_theta": 10000.0, |
|
"sliding_window": 4096, |
|
"torch_dtype": "bfloat16", |
|
"transformers_version": "4.45.0", |
|
"unsloth_version": "2024.9", |
|
"use_cache": true, |
|
"vocab_size": 256000 |
|
} |
|
|
|
|
|
[INFO|tokenization_utils_base.py:2649] 2024-10-17 08:20:53,840 >> tokenizer config file saved in saves/Gemma-2B/lora/4k_train_2024-10-17-07-48-45/checkpoint-500/tokenizer_config.json |
|
|
|
[INFO|tokenization_utils_base.py:2658] 2024-10-17 08:20:53,840 >> Special tokens file saved in saves/Gemma-2B/lora/4k_train_2024-10-17-07-48-45/checkpoint-500/special_tokens_map.json |
|
|
|
[INFO|trainer.py:3705] 2024-10-17 08:26:23,393 >> Saving model checkpoint to saves/Gemma-2B/lora/4k_train_2024-10-17-07-48-45/checkpoint-600 |
|
|
|
[INFO|configuration_utils.py:672] 2024-10-17 08:26:25,621 >> loading configuration file config.json from cache at /home/.cache/huggingface/hub/models--unsloth--gemma-2-2b-it/snapshots/457f2e15bf550c227ce6ad86e2ec108d3e42c106/config.json |
|
|
|
[INFO|configuration_utils.py:739] 2024-10-17 08:26:25,622 >> Model config Gemma2Config { |
|
"_name_or_path": "unsloth/gemma-2-2b-it", |
|
"architectures": [ |
|
"Gemma2ForCausalLM" |
|
], |
|
"attention_bias": false, |
|
"attention_dropout": 0.0, |
|
"attn_logit_softcapping": 50.0, |
|
"bos_token_id": 2, |
|
"cache_implementation": "hybrid", |
|
"eos_token_id": [ |
|
1, |
|
107 |
|
], |
|
"final_logit_softcapping": 30.0, |
|
"head_dim": 256, |
|
"hidden_act": "gelu_pytorch_tanh", |
|
"hidden_activation": "gelu_pytorch_tanh", |
|
"hidden_size": 2304, |
|
"initializer_range": 0.02, |
|
"intermediate_size": 9216, |
|
"max_position_embeddings": 8192, |
|
"model_type": "gemma2", |
|
"num_attention_heads": 8, |
|
"num_hidden_layers": 26, |
|
"num_key_value_heads": 4, |
|
"pad_token_id": 0, |
|
"query_pre_attn_scalar": 256, |
|
"rms_norm_eps": 1e-06, |
|
"rope_theta": 10000.0, |
|
"sliding_window": 4096, |
|
"torch_dtype": "bfloat16", |
|
"transformers_version": "4.45.0", |
|
"unsloth_version": "2024.9", |
|
"use_cache": true, |
|
"vocab_size": 256000 |
|
} |
|
|
|
|
|
[INFO|tokenization_utils_base.py:2649] 2024-10-17 08:26:25,722 >> tokenizer config file saved in saves/Gemma-2B/lora/4k_train_2024-10-17-07-48-45/checkpoint-600/tokenizer_config.json |
|
|
|
[INFO|tokenization_utils_base.py:2658] 2024-10-17 08:26:25,722 >> Special tokens file saved in saves/Gemma-2B/lora/4k_train_2024-10-17-07-48-45/checkpoint-600/special_tokens_map.json |
|
|
|
[INFO|trainer.py:3705] 2024-10-17 08:31:53,889 >> Saving model checkpoint to saves/Gemma-2B/lora/4k_train_2024-10-17-07-48-45/checkpoint-700 |
|
|
|
[INFO|configuration_utils.py:672] 2024-10-17 08:31:54,915 >> loading configuration file config.json from cache at /home/.cache/huggingface/hub/models--unsloth--gemma-2-2b-it/snapshots/457f2e15bf550c227ce6ad86e2ec108d3e42c106/config.json |
|
|
|
[INFO|configuration_utils.py:739] 2024-10-17 08:31:54,917 >> Model config Gemma2Config { |
|
"_name_or_path": "unsloth/gemma-2-2b-it", |
|
"architectures": [ |
|
"Gemma2ForCausalLM" |
|
], |
|
"attention_bias": false, |
|
"attention_dropout": 0.0, |
|
"attn_logit_softcapping": 50.0, |
|
"bos_token_id": 2, |
|
"cache_implementation": "hybrid", |
|
"eos_token_id": [ |
|
1, |
|
107 |
|
], |
|
"final_logit_softcapping": 30.0, |
|
"head_dim": 256, |
|
"hidden_act": "gelu_pytorch_tanh", |
|
"hidden_activation": "gelu_pytorch_tanh", |
|
"hidden_size": 2304, |
|
"initializer_range": 0.02, |
|
"intermediate_size": 9216, |
|
"max_position_embeddings": 8192, |
|
"model_type": "gemma2", |
|
"num_attention_heads": 8, |
|
"num_hidden_layers": 26, |
|
"num_key_value_heads": 4, |
|
"pad_token_id": 0, |
|
"query_pre_attn_scalar": 256, |
|
"rms_norm_eps": 1e-06, |
|
"rope_theta": 10000.0, |
|
"sliding_window": 4096, |
|
"torch_dtype": "bfloat16", |
|
"transformers_version": "4.45.0", |
|
"unsloth_version": "2024.9", |
|
"use_cache": true, |
|
"vocab_size": 256000 |
|
} |
|
|
|
|
|
[INFO|tokenization_utils_base.py:2649] 2024-10-17 08:31:55,024 >> tokenizer config file saved in saves/Gemma-2B/lora/4k_train_2024-10-17-07-48-45/checkpoint-700/tokenizer_config.json |
|
|
|
[INFO|tokenization_utils_base.py:2658] 2024-10-17 08:31:55,024 >> Special tokens file saved in saves/Gemma-2B/lora/4k_train_2024-10-17-07-48-45/checkpoint-700/special_tokens_map.json |
|
|
|
[INFO|trainer.py:3705] 2024-10-17 08:37:04,811 >> Saving model checkpoint to saves/Gemma-2B/lora/4k_train_2024-10-17-07-48-45/checkpoint-792 |
|
|
|
[INFO|configuration_utils.py:672] 2024-10-17 08:37:05,459 >> loading configuration file config.json from cache at /home/.cache/huggingface/hub/models--unsloth--gemma-2-2b-it/snapshots/457f2e15bf550c227ce6ad86e2ec108d3e42c106/config.json |
|
|
|
[INFO|configuration_utils.py:739] 2024-10-17 08:37:05,460 >> Model config Gemma2Config { |
|
"_name_or_path": "unsloth/gemma-2-2b-it", |
|
"architectures": [ |
|
"Gemma2ForCausalLM" |
|
], |
|
"attention_bias": false, |
|
"attention_dropout": 0.0, |
|
"attn_logit_softcapping": 50.0, |
|
"bos_token_id": 2, |
|
"cache_implementation": "hybrid", |
|
"eos_token_id": [ |
|
1, |
|
107 |
|
], |
|
"final_logit_softcapping": 30.0, |
|
"head_dim": 256, |
|
"hidden_act": "gelu_pytorch_tanh", |
|
"hidden_activation": "gelu_pytorch_tanh", |
|
"hidden_size": 2304, |
|
"initializer_range": 0.02, |
|
"intermediate_size": 9216, |
|
"max_position_embeddings": 8192, |
|
"model_type": "gemma2", |
|
"num_attention_heads": 8, |
|
"num_hidden_layers": 26, |
|
"num_key_value_heads": 4, |
|
"pad_token_id": 0, |
|
"query_pre_attn_scalar": 256, |
|
"rms_norm_eps": 1e-06, |
|
"rope_theta": 10000.0, |
|
"sliding_window": 4096, |
|
"torch_dtype": "bfloat16", |
|
"transformers_version": "4.45.0", |
|
"unsloth_version": "2024.9", |
|
"use_cache": true, |
|
"vocab_size": 256000 |
|
} |
|
|
|
|
|
[INFO|tokenization_utils_base.py:2649] 2024-10-17 08:37:05,563 >> tokenizer config file saved in saves/Gemma-2B/lora/4k_train_2024-10-17-07-48-45/checkpoint-792/tokenizer_config.json |
|
|
|
[INFO|tokenization_utils_base.py:2658] 2024-10-17 08:37:05,566 >> Special tokens file saved in saves/Gemma-2B/lora/4k_train_2024-10-17-07-48-45/checkpoint-792/special_tokens_map.json |
|
|
|
[INFO|trainer.py:2505] 2024-10-17 08:37:06,016 >> |
|
|
|
Training completed. Do not forget to share your model on huggingface.co/models =) |
|
|
|
|
|
|
|
[INFO|trainer.py:3705] 2024-10-17 08:37:06,018 >> Saving model checkpoint to saves/Gemma-2B/lora/4k_train_2024-10-17-07-48-45 |
|
|
|
[INFO|configuration_utils.py:672] 2024-10-17 08:37:07,036 >> loading configuration file config.json from cache at /home/.cache/huggingface/hub/models--unsloth--gemma-2-2b-it/snapshots/457f2e15bf550c227ce6ad86e2ec108d3e42c106/config.json |
|
|
|
[INFO|configuration_utils.py:739] 2024-10-17 08:37:07,036 >> Model config Gemma2Config { |
|
"_name_or_path": "unsloth/gemma-2-2b-it", |
|
"architectures": [ |
|
"Gemma2ForCausalLM" |
|
], |
|
"attention_bias": false, |
|
"attention_dropout": 0.0, |
|
"attn_logit_softcapping": 50.0, |
|
"bos_token_id": 2, |
|
"cache_implementation": "hybrid", |
|
"eos_token_id": [ |
|
1, |
|
107 |
|
], |
|
"final_logit_softcapping": 30.0, |
|
"head_dim": 256, |
|
"hidden_act": "gelu_pytorch_tanh", |
|
"hidden_activation": "gelu_pytorch_tanh", |
|
"hidden_size": 2304, |
|
"initializer_range": 0.02, |
|
"intermediate_size": 9216, |
|
"max_position_embeddings": 8192, |
|
"model_type": "gemma2", |
|
"num_attention_heads": 8, |
|
"num_hidden_layers": 26, |
|
"num_key_value_heads": 4, |
|
"pad_token_id": 0, |
|
"query_pre_attn_scalar": 256, |
|
"rms_norm_eps": 1e-06, |
|
"rope_theta": 10000.0, |
|
"sliding_window": 4096, |
|
"torch_dtype": "bfloat16", |
|
"transformers_version": "4.45.0", |
|
"unsloth_version": "2024.9", |
|
"use_cache": true, |
|
"vocab_size": 256000 |
|
} |
|
|
|
|
|
[INFO|tokenization_utils_base.py:2649] 2024-10-17 08:37:07,112 >> tokenizer config file saved in saves/Gemma-2B/lora/4k_train_2024-10-17-07-48-45/tokenizer_config.json |
|
|
|
[INFO|tokenization_utils_base.py:2658] 2024-10-17 08:37:07,112 >> Special tokens file saved in saves/Gemma-2B/lora/4k_train_2024-10-17-07-48-45/special_tokens_map.json |
|
|
|
[INFO|modelcard.py:449] 2024-10-17 08:37:07,533 >> Dropping the following result as it does not have all the necessary fields: |
|
{'task': {'name': 'Causal Language Modeling', 'type': 'text-generation'}} |
|
|
|
|