Update mlc-chat-config.json
Browse files- mlc-chat-config.json +30 -7
mlc-chat-config.json
CHANGED
@@ -1,22 +1,45 @@
|
|
1 |
{
|
2 |
-
"model_lib": "mistral
|
3 |
-
"local_id": "mistral
|
4 |
"conv_template": "mistral_default",
|
5 |
"temperature": 0.7,
|
6 |
"repetition_penalty": 1.0,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
"top_p": 0.95,
|
8 |
"mean_gen_len": 128,
|
9 |
-
"max_gen_len":
|
10 |
"num_shards": 1,
|
11 |
-
"use_presharded_weights": false,
|
12 |
"shift_fill_factor": 0.3,
|
13 |
"tokenizer_files": [
|
14 |
"tokenizer.json",
|
15 |
"tokenizer.model"
|
16 |
],
|
17 |
"model_category": "mistral",
|
18 |
-
"model_name": "
|
19 |
"vocab_size": 32000,
|
20 |
-
"
|
21 |
-
"
|
22 |
}
|
|
|
1 |
{
|
2 |
+
"model_lib": "mistral",
|
3 |
+
"local_id": "mistral",
|
4 |
"conv_template": "mistral_default",
|
5 |
"temperature": 0.7,
|
6 |
"repetition_penalty": 1.0,
|
7 |
+
|
8 |
+
"conv_config": {
|
9 |
+
"seps": [
|
10 |
+
" "
|
11 |
+
],
|
12 |
+
"stop_tokens": [
|
13 |
+
2
|
14 |
+
],
|
15 |
+
"offset": 0,
|
16 |
+
"messages": [
|
17 |
+
|
18 |
+
|
19 |
+
],
|
20 |
+
"stop_str": "</s>",
|
21 |
+
"roles": [
|
22 |
+
"[INST]",
|
23 |
+
"[/INST]"
|
24 |
+
],
|
25 |
+
"role_msg_sep": ": ",
|
26 |
+
"role_empty_sep": ": ",
|
27 |
+
"system": "<s>[INST] A chat between a curious user and an artificial intelligence research data management assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. [/INST]",
|
28 |
+
"add_bos": true,
|
29 |
+
"name": "Llama2"
|
30 |
+
},
|
31 |
"top_p": 0.95,
|
32 |
"mean_gen_len": 128,
|
33 |
+
"max_gen_len": 512,
|
34 |
"num_shards": 1,
|
|
|
35 |
"shift_fill_factor": 0.3,
|
36 |
"tokenizer_files": [
|
37 |
"tokenizer.json",
|
38 |
"tokenizer.model"
|
39 |
],
|
40 |
"model_category": "mistral",
|
41 |
+
"model_name": "Mistral-7B-Instruct-v0.1",
|
42 |
"vocab_size": 32000,
|
43 |
+
"sliding_window": 4096,
|
44 |
+
"prefill_chunk_size": 4096
|
45 |
}
|