Upload folder using huggingface_hub
Browse files- README.md +21 -0
- added_tokens.json +5 -0
- config.json +85 -0
- merges.txt +0 -0
- model-00001-of-00003.safetensors +3 -0
- model-00002-of-00003.safetensors +3 -0
- model-00003-of-00003.safetensors +3 -0
- model.safetensors.index.json +0 -0
- quant_strategy.json +0 -0
- special_tokens_map.json +16 -0
- tokenizer.json +0 -0
- tokenizer_config.json +40 -0
- vocab.json +0 -0
README.md
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
license: apache-2.0
|
3 |
+
tags:
|
4 |
+
- mlx
|
5 |
+
---
|
6 |
+
|
7 |
+
# GreenBitAI/Qwen-1.5-32B-layer-mix-bpw-2.5-mlx
|
8 |
+
This quantized low-bit model was converted to MLX format from [`GreenBitAI/Qwen-1.5-32B-layer-mix-bpw-2.5`]().
|
9 |
+
Refer to the [original model card](https://huggingface.co/GreenBitAI/Qwen-1.5-32B-layer-mix-bpw-2.5) for more details on the model.
|
10 |
+
## Use with mlx
|
11 |
+
|
12 |
+
```bash
|
13 |
+
pip install gbx-lm
|
14 |
+
```
|
15 |
+
|
16 |
+
```python
|
17 |
+
from gbx_lm import load, generate
|
18 |
+
|
19 |
+
model, tokenizer = load("GreenBitAI/Qwen-1.5-32B-layer-mix-bpw-2.5-mlx")
|
20 |
+
response = generate(model, tokenizer, prompt="hello", verbose=True)
|
21 |
+
```
|
added_tokens.json
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"<|endoftext|>": 151643,
|
3 |
+
"<|im_end|>": 151645,
|
4 |
+
"<|im_start|>": 151644
|
5 |
+
}
|
config.json
ADDED
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_cross_attention": false,
|
3 |
+
"architectures": [
|
4 |
+
"Qwen2ForCausalLM"
|
5 |
+
],
|
6 |
+
"attention_dropout": 0.0,
|
7 |
+
"bad_words_ids": null,
|
8 |
+
"begin_suppress_tokens": null,
|
9 |
+
"bos_token_id": 151643,
|
10 |
+
"chunk_size_feed_forward": 0,
|
11 |
+
"cross_attention_hidden_size": null,
|
12 |
+
"decoder_start_token_id": null,
|
13 |
+
"diversity_penalty": 0.0,
|
14 |
+
"do_sample": false,
|
15 |
+
"early_stopping": false,
|
16 |
+
"encoder_no_repeat_ngram_size": 0,
|
17 |
+
"eos_token_id": 151643,
|
18 |
+
"exponential_decay_length_penalty": null,
|
19 |
+
"finetuning_task": null,
|
20 |
+
"forced_bos_token_id": null,
|
21 |
+
"forced_eos_token_id": null,
|
22 |
+
"hidden_act": "silu",
|
23 |
+
"hidden_size": 5120,
|
24 |
+
"id2label": {
|
25 |
+
"0": "LABEL_0",
|
26 |
+
"1": "LABEL_1"
|
27 |
+
},
|
28 |
+
"initializer_range": 0.02,
|
29 |
+
"intermediate_size": 27392,
|
30 |
+
"is_decoder": false,
|
31 |
+
"is_encoder_decoder": false,
|
32 |
+
"label2id": {
|
33 |
+
"LABEL_0": 0,
|
34 |
+
"LABEL_1": 1
|
35 |
+
},
|
36 |
+
"length_penalty": 1.0,
|
37 |
+
"max_length": 20,
|
38 |
+
"max_position_embeddings": 32768,
|
39 |
+
"max_window_layers": 35,
|
40 |
+
"min_length": 0,
|
41 |
+
"model_type": "qwen2",
|
42 |
+
"no_repeat_ngram_size": 0,
|
43 |
+
"num_attention_heads": 40,
|
44 |
+
"num_beam_groups": 1,
|
45 |
+
"num_beams": 1,
|
46 |
+
"num_hidden_layers": 64,
|
47 |
+
"num_key_value_heads": 8,
|
48 |
+
"num_return_sequences": 1,
|
49 |
+
"output_attentions": false,
|
50 |
+
"output_hidden_states": false,
|
51 |
+
"output_scores": false,
|
52 |
+
"pad_token_id": null,
|
53 |
+
"prefix": null,
|
54 |
+
"problem_type": null,
|
55 |
+
"pruned_heads": {},
|
56 |
+
"quantization": {
|
57 |
+
"group_size": null,
|
58 |
+
"bits": null
|
59 |
+
},
|
60 |
+
"remove_invalid_values": false,
|
61 |
+
"repetition_penalty": 1.0,
|
62 |
+
"return_dict": true,
|
63 |
+
"return_dict_in_generate": false,
|
64 |
+
"rms_norm_eps": 1e-06,
|
65 |
+
"rope_theta": 1000000.0,
|
66 |
+
"sep_token_id": null,
|
67 |
+
"sliding_window": 32768,
|
68 |
+
"suppress_tokens": null,
|
69 |
+
"task_specific_params": null,
|
70 |
+
"temperature": 1.0,
|
71 |
+
"tf_legacy_loss": false,
|
72 |
+
"tie_encoder_decoder": false,
|
73 |
+
"tie_word_embeddings": false,
|
74 |
+
"tokenizer_class": null,
|
75 |
+
"top_k": 50,
|
76 |
+
"top_p": 1.0,
|
77 |
+
"torch_dtype": "float16",
|
78 |
+
"torchscript": false,
|
79 |
+
"transformers_version": "4.39.3",
|
80 |
+
"typical_p": 1.0,
|
81 |
+
"use_bfloat16": false,
|
82 |
+
"use_cache": true,
|
83 |
+
"use_sliding_window": false,
|
84 |
+
"vocab_size": 152064
|
85 |
+
}
|
merges.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
model-00001-of-00003.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f9076288711371b478f66b3d3a8348fb6947af24c1cca3f106b2492862adf5fe
|
3 |
+
size 5356184032
|
model-00002-of-00003.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8a6a61d18f7d5dc01abc7537c423031c7683a124f4a13282bd5b2481b80ae119
|
3 |
+
size 5357618849
|
model-00003-of-00003.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:92e207b80cdd0ef8b01d602cf83eb343950f64ed62d5745b799fa7a9290d00bb
|
3 |
+
size 3594836770
|
model.safetensors.index.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
quant_strategy.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
special_tokens_map.json
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"eos_token": {
|
3 |
+
"content": "<|endoftext|>",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": false,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
},
|
9 |
+
"pad_token": {
|
10 |
+
"content": "<|endoftext|>",
|
11 |
+
"lstrip": false,
|
12 |
+
"normalized": false,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
}
|
16 |
+
}
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
ADDED
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_prefix_space": false,
|
3 |
+
"added_tokens_decoder": {
|
4 |
+
"151643": {
|
5 |
+
"content": "<|endoftext|>",
|
6 |
+
"lstrip": false,
|
7 |
+
"normalized": false,
|
8 |
+
"rstrip": false,
|
9 |
+
"single_word": false,
|
10 |
+
"special": true
|
11 |
+
},
|
12 |
+
"151644": {
|
13 |
+
"content": "<|im_start|>",
|
14 |
+
"lstrip": false,
|
15 |
+
"normalized": false,
|
16 |
+
"rstrip": false,
|
17 |
+
"single_word": false,
|
18 |
+
"special": true
|
19 |
+
},
|
20 |
+
"151645": {
|
21 |
+
"content": "<|im_end|>",
|
22 |
+
"lstrip": false,
|
23 |
+
"normalized": false,
|
24 |
+
"rstrip": false,
|
25 |
+
"single_word": false,
|
26 |
+
"special": true
|
27 |
+
}
|
28 |
+
},
|
29 |
+
"additional_special_tokens": [],
|
30 |
+
"bos_token": null,
|
31 |
+
"chat_template": "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful assistant<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
|
32 |
+
"clean_up_tokenization_spaces": false,
|
33 |
+
"eos_token": "<|endoftext|>",
|
34 |
+
"errors": "replace",
|
35 |
+
"model_max_length": 32768,
|
36 |
+
"pad_token": "<|endoftext|>",
|
37 |
+
"split_special_tokens": false,
|
38 |
+
"tokenizer_class": "Qwen2Tokenizer",
|
39 |
+
"unk_token": null
|
40 |
+
}
|
vocab.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|