File size: 2,384 Bytes
751936e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
"""
1. jd_vocab_tokens的中文:



2. 中文标点


3. 全中文(单字) unicode


4. 全中文()
词典大小:46145。其中 中文汉字数:{'total': 25359, '中文单字': 5089, '中文多字': 20270}, 中文标点数: 266
"""

from collections import Counter
from transformers import AutoTokenizer
from data_sample.oov_base import jd_vocab_tokens
from utils.text_util import is_chinese, has_chinese
from zhon.hanzi import punctuation as zh_punc

tokenizer = AutoTokenizer.from_pretrained("tokenizer", trust_remote_code=True)
# tokenizer = Tokenizer.from_file("../gpt_neox_chinese/20B_tokenizer_chinese.json")
vocab = tokenizer.get_vocab()

def zh_iterator():
    for idx in range(ord(u'\u4e00'), ord(u'\u9fa5')):
        yield (chr(idx))


def test_coding_length(vocab, filter=None):
    all_length = []
    for word in vocab:
        if len(word) > 1:
            continue
        if filter is not None and filter(word):
            continue
        tokens = tokenizer.encode(word)
        all_length.append(len(tokens))
        # if len(tokens.ids) > 1:
        if len(tokens.ids) == 1:
            print(word, tokens.ids)

    print("编码长度统计:", Counter(all_length))
    print("平均编码长度:", sum(all_length)/len(all_length))


def has_zh_char(text):
    return any(ch in zh_punc for ch in text)


def iter_vocab():

    f_out = open("vocab.zh.txt", "w", encoding="utf-8")
    zh_token_count = {"total": 0, "中文单字": 0, "中文多字": 0}
    zh_symbol_count = 0
    for idx in range(len(vocab)):
        decode_str = tokenizer.decode([idx])
        if has_chinese(decode_str):
            zh_token_count["total"] += 1
            if len(decode_str.strip()) > 1:
                zh_token_count["中文多字"] += 1
            else:
                zh_token_count["中文单字"] += 1


            f_out.write("%d\t%s\t中文汉字\n" % (idx, decode_str))


        elif has_zh_char(decode_str):
            zh_symbol_count += 1
            f_out.write("%d\t%s\t中文标点\n" % (idx, decode_str))

    print("词典大小:%d。其中 中文汉字数:%s, 中文标点数: %d" % (len(vocab), str(zh_token_count), zh_symbol_count))


if __name__ == "__main__":

    # test_coding_length(jd_vocab_tokens, filter=lambda k: not is_chinese(k))
    # test_coding_length(zh_punc)
    # test_coding_length(zh_iterator())
    iter_vocab()