xls-r_cv_ur / vocab.json
hadiqa123's picture
add tokenizer
a5c56c1
raw
history blame
538 Bytes
{
" ": 6,
"[PAD]": 44,
"[UNK]": 43,
"آ": 35,
"أ": 0,
"ؤ": 24,
"ئ": 11,
"ا": 9,
"ب": 31,
"ت": 39,
"ث": 12,
"ج": 10,
"ح": 16,
"خ": 28,
"د": 7,
"ذ": 19,
"ر": 42,
"ز": 5,
"س": 8,
"ش": 18,
"ص": 4,
"ض": 14,
"ط": 37,
"ظ": 13,
"ع": 25,
"غ": 38,
"ف": 3,
"ق": 21,
"ل": 33,
"م": 26,
"ن": 1,
"و": 40,
"ً": 29,
"ٹ": 15,
"پ": 20,
"چ": 27,
"ڈ": 41,
"ڑ": 36,
"ک": 32,
"گ": 34,
"ں": 30,
"ھ": 22,
"ہ": 2,
"ی": 23,
"ے": 17
}