xlsr_ur_training / vocab.json
hadiqa123's picture
add tokenizer
e9b7ed9
raw
history blame
794 Bytes
{
"[PAD]": 65,
"[UNK]": 64,
"|": 0,
"،": 1,
"؟": 2,
"ء": 3,
"آ": 4,
"أ": 5,
"ؤ": 6,
"ئ": 7,
"ا": 8,
"ب": 9,
"ت": 10,
"ث": 11,
"ج": 12,
"ح": 13,
"خ": 14,
"د": 15,
"ذ": 16,
"ر": 17,
"ز": 18,
"س": 19,
"ش": 20,
"ص": 21,
"ض": 22,
"ط": 23,
"ظ": 24,
"ع": 25,
"غ": 26,
"ف": 27,
"ق": 28,
"ل": 29,
"م": 30,
"ن": 31,
"و": 32,
"ى": 33,
"ي": 34,
"ً": 35,
"َ": 36,
"ُ": 37,
"ِ": 38,
"ّ": 39,
"ٓ": 40,
"ٔ": 41,
"ٰ": 42,
"ٹ": 43,
"پ": 44,
"چ": 45,
"ڈ": 46,
"ڑ": 47,
"ژ": 48,
"ک": 49,
"گ": 50,
"ں": 51,
"ھ": 52,
"ہ": 53,
"ۂ": 54,
"ۃ": 55,
"ی": 56,
"ے": 57,
"ۓ": 58,
"۔": 59,
"’": 60,
"…": 61,
"ﷲ": 62,
"ﷺ": 63
}