mengshyu commited on
Commit
7cf327e
1 Parent(s): bca36cf

Upload folder using huggingface_hub

Browse files
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
mlc-chat-config.json ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": "0.1.0",
3
+ "model_type": "qwen2",
4
+ "quantization": "q4f16_0",
5
+ "model_config": {
6
+ "hidden_act": "silu",
7
+ "hidden_size": 896,
8
+ "intermediate_size": 4864,
9
+ "num_attention_heads": 14,
10
+ "num_hidden_layers": 24,
11
+ "num_key_value_heads": 2,
12
+ "rms_norm_eps": 1e-06,
13
+ "rope_theta": 1000000.0,
14
+ "vocab_size": 151936,
15
+ "tie_word_embeddings": true,
16
+ "context_window_size": 32768,
17
+ "prefill_chunk_size": 2048,
18
+ "tensor_parallel_shards": 1,
19
+ "head_dim": 64,
20
+ "dtype": "float32",
21
+ "max_batch_size": 80
22
+ },
23
+ "vocab_size": 151936,
24
+ "context_window_size": 32768,
25
+ "sliding_window_size": -1,
26
+ "prefill_chunk_size": 2048,
27
+ "attention_sink_size": -1,
28
+ "tensor_parallel_shards": 1,
29
+ "pipeline_parallel_stages": 1,
30
+ "temperature": 0.7,
31
+ "presence_penalty": 0.0,
32
+ "frequency_penalty": 0.0,
33
+ "repetition_penalty": 1.1,
34
+ "top_p": 0.8,
35
+ "tokenizer_files": [
36
+ "tokenizer.json",
37
+ "vocab.json",
38
+ "merges.txt",
39
+ "tokenizer_config.json"
40
+ ],
41
+ "tokenizer_info": {
42
+ "token_postproc_method": "byte_level",
43
+ "prepend_space_in_encode": false,
44
+ "strip_space_in_decode": false
45
+ },
46
+ "conv_template": {
47
+ "name": "qwen2",
48
+ "system_template": "<|im_start|>system\n{system_message}<|im_end|>\n",
49
+ "system_message": "You are a helpful assistant.",
50
+ "system_prefix_token_ids": null,
51
+ "add_role_after_system_message": true,
52
+ "roles": {
53
+ "user": "<|im_start|>user",
54
+ "assistant": "<|im_start|>assistant"
55
+ },
56
+ "role_templates": {
57
+ "user": "{user_message}",
58
+ "assistant": "{assistant_message}",
59
+ "tool": "{tool_message}"
60
+ },
61
+ "messages": [],
62
+ "seps": [
63
+ "<|im_end|>\n"
64
+ ],
65
+ "role_content_sep": "\n",
66
+ "role_empty_sep": "\n",
67
+ "stop_str": [
68
+ "<|endoftext|>, <|im_end|>"
69
+ ],
70
+ "stop_token_ids": [
71
+ 151643,
72
+ 151645
73
+ ],
74
+ "function_string": "",
75
+ "use_function_calling": false
76
+ },
77
+ "pad_token_id": 151643,
78
+ "bos_token_id": 151643,
79
+ "eos_token_id": [
80
+ 151645,
81
+ 151643
82
+ ]
83
+ }
ndarray-cache.json ADDED
The diff for this file is too large to render. See raw diff
 
params_shard_0.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5bd4b998cfc27d72db1f2f0bc1dcb6af547de4e7ac6f0db0f1977ddd33637f1f
3
+ size 68067328
params_shard_1.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:53e2b7fbe76a608552f4c946e49388aed0087a0de867382944552aa97966cacf
3
+ size 33234176
params_shard_2.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cd83515c7a7cb9acfe9adaa66af64bc778449a201c6e769b07ca35647b93fb57
3
+ size 33505280
params_shard_3.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cdd8758f160643a9009c3dd27f90b91559e29dd19305c2c12826c37957f13bcf
3
+ size 33053696
params_shard_4.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9bb14d261ec702e5fc74e24ac02059b55fcd7404f5bf288a6f7c67e95da9e314
3
+ size 33020928
params_shard_5.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4159ae42a0cb2da6dc31cc586fccdda77989dac3d3afe345719100f6416239b3
3
+ size 29211648
params_shard_6.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c0a650e55b6e9d351a2e4760f9cb14059db0397588a3796b484cadfea7fc5c5
3
+ size 33297408
params_shard_7.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d0e5618d3a1acf6c1305c3448b0a774c7bde35c40655c429b1c7297763136eeb
3
+ size 14605824
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "151643": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "151644": {
13
+ "content": "<|im_start|>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "151645": {
21
+ "content": "<|im_end|>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ }
28
+ },
29
+ "additional_special_tokens": ["<|im_start|>", "<|im_end|>"],
30
+ "bos_token": null,
31
+ "chat_template": "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
32
+ "clean_up_tokenization_spaces": false,
33
+ "eos_token": "<|im_end|>",
34
+ "errors": "replace",
35
+ "model_max_length": 32768,
36
+ "pad_token": "<|endoftext|>",
37
+ "split_special_tokens": false,
38
+ "tokenizer_class": "Qwen2Tokenizer",
39
+ "unk_token": null
40
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff