import pathlib import random import gradio as gr from src import HindiTokenizer, BasicTokenizer Basic = BasicTokenizer() Basic._build_vocab() Hindi = HindiTokenizer() Hindi.load( model_file_path=pathlib.Path( "saved_vocabs/batch_1_Hindi_Tokenizer-test-all_batches-100_000_batchsize-initial_vocab_size_5000.model")) def tokenize_and_color(text, tokenizer_choice="HindiTokenizer"): if tokenizer_choice == "BasicTokenizer": tokenizer = Basic else: tokenizer = Hindi tokens = tokenizer.encode(text) # colors = [ # "#FF5733", "#33FF57", "#3357FF", "#F333FF", # "#33FFF3", "#F3FF33", "#FF3380", "#3380FF", # "#83FF33", "#FF8333" # ] # colors = [ # "#FF5733", "#33FF57", "#3357FF", "#F333FF", # "#33FFF3", "#FF3380", "#3380FF", # "#83FF33", "#FF8333", "#7FDBFF", "#0074D9", # "#39CCCC", "#3D9970", "#2ECC40", "#01FF70", # "#FFDC00", "#FF851B", "#FF4136", "#85144b", # "#F012BE", "#B10DC9", "#AAAAAA", "#DDDDDD" # ] colors = [ "#33FF57", "#3357FF", "#F333FF", "#33FFF3", "#FF3380", "#3380FF", "#FF8333", "#7FDBFF", "#0074D9", "#39CCCC", "#3D9970", "#2ECC40", "#FFDC00", "#FF851B", "#0000FF", "#85144b", "#F012BE", "#B10DC9", "#AAAAAA", "#DDDDDD", "#800000", "#808000", "#008080", "#800080", "#FFC0CB", "#000080", "#FFD700", "#FF69B4", "#8A2BE2", "#5F9EA0" ] colored_text = '