xlsr_ur_training / vocab.json
hadiqa123's picture
add tokenizer
bfb3a27
raw
history blame
657 Bytes
{
"'": 25,
"[PAD]": 54,
"[UNK]": 53,
"|": 9,
"؟": 36,
"ء": 16,
"آ": 40,
"ؤ": 46,
"ئ": 27,
"ا": 44,
"ب": 17,
"ت": 50,
"ث": 20,
"ج": 51,
"ح": 31,
"خ": 7,
"د": 33,
"ذ": 28,
"ر": 10,
"ز": 14,
"س": 2,
"ش": 18,
"ص": 13,
"ض": 19,
"ط": 34,
"ظ": 22,
"ع": 32,
"غ": 37,
"ف": 8,
"ق": 42,
"ل": 5,
"م": 29,
"ن": 4,
"و": 49,
"ي": 38,
"ُ": 21,
"ِ": 1,
"ّ": 15,
"ٔ": 11,
"ٰ": 39,
"ٹ": 47,
"پ": 30,
"چ": 43,
"ڈ": 6,
"ڑ": 23,
"ژ": 12,
"ک": 0,
"گ": 52,
"ں": 24,
"ھ": 48,
"ہ": 3,
"ۂ": 45,
"ی": 35,
"ے": 41,
"۔": 26
}