import transformers import os import torch MAX_LEN = 150 #256 TRAIN_BATCH_SIZE = 8 VALID_BATCH_SIZE = 4 EPOCHS = 5 # Folder to contain all the datasets DATASET_LOCATION = "" # MODEL_PATH = "https://huggingface.co/thak123/bert-emoji-latvian-twitter-classifier/resolve/main/model.bin" device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # 7 EPOCH Version BERT_PATH = "FFZG-cleopatra/bert-emoji-latvian-twitter" # TODO check if lower casing is required # BertTokenizer TOKENIZER = transformers.BertTokenizer.from_pretrained( BERT_PATH, do_lower_case=True ) ####################################################################################################################################