from huggingface_hub import upload_folder | |
upload_folder( | |
folder_path="/home/yiren/new_ssd2/chunhui/yaning/project/mission-impossible-language-models", | |
repo_id="Yaning1001/impossible_llm", | |
path_in_repo="mission-impossible-language-models" | |
) | |
# import torch | |
# from transformers import AutoModelForCausalLM, AutoTokenizer | |
# model_id_1 = "meta-llama/Llama-3.2-3B" | |
# model_id_2 = "Qwen/Qwen2.5-7B" | |
# # Check your authentication - this line should succeed without errors! | |
# tokenizer_0 = AutoTokenizer.from_pretrained('gpt2') | |
# tokenizer_1 = AutoTokenizer.from_pretrained(model_id_1) | |
# tokenizer_2 = AutoTokenizer.from_pretrained(model_id_2) | |
# tokenizer_2.pad_token = tokenizer_1.pad_token | |
# print("tokenizer_0.pad_token:", type(tokenizer_0.pad_token)) | |
# print("tokenizer_1.pad_token:", type(tokenizer_1.pad_token)) | |
# print("tokenizer_2.pad_token:", type(tokenizer_2.pad_token)) |