morriszms commited on
Commit
dd420f4
1 Parent(s): 4ea405c

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,15 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ PowerMoE-3b-Q2_K.gguf filter=lfs diff=lfs merge=lfs -text
37
+ PowerMoE-3b-Q3_K_L.gguf filter=lfs diff=lfs merge=lfs -text
38
+ PowerMoE-3b-Q3_K_M.gguf filter=lfs diff=lfs merge=lfs -text
39
+ PowerMoE-3b-Q3_K_S.gguf filter=lfs diff=lfs merge=lfs -text
40
+ PowerMoE-3b-Q4_0.gguf filter=lfs diff=lfs merge=lfs -text
41
+ PowerMoE-3b-Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text
42
+ PowerMoE-3b-Q4_K_S.gguf filter=lfs diff=lfs merge=lfs -text
43
+ PowerMoE-3b-Q5_0.gguf filter=lfs diff=lfs merge=lfs -text
44
+ PowerMoE-3b-Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text
45
+ PowerMoE-3b-Q5_K_S.gguf filter=lfs diff=lfs merge=lfs -text
46
+ PowerMoE-3b-Q6_K.gguf filter=lfs diff=lfs merge=lfs -text
47
+ PowerMoE-3b-Q8_0.gguf filter=lfs diff=lfs merge=lfs -text
PowerMoE-3b-Q2_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d07cdbc57576d9df8e35de08edc2d40df8b832692d448ae265c7f518ce8d1ea3
3
+ size 1266132960
PowerMoE-3b-Q3_K_L.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a3430a2618d4479fa3526e397ad315252c1ee2ad2aef10acabbae2d9c2a4039b
3
+ size 1773971424
PowerMoE-3b-Q3_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e9fd1ba5462d301254ae3e36b5e497b6ed36dd0feba3ade610ac611c9e1c6385
3
+ size 1643620320
PowerMoE-3b-Q3_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bb4e8963d24f9699896f3e49a7c1bc900c009290ac600ac5f963c1c13e3806a6
3
+ size 1488496608
PowerMoE-3b-Q4_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:24ca286eb77ad8e26c2de2981ff29c82a40f34a5596c68a7ba98c061fab082ff
3
+ size 1926342624
PowerMoE-3b-Q4_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5036f68e4a34774a698dc36dd51b7677ae6a70486b0c24f7da06a0ed86d95714
3
+ size 2059347936
PowerMoE-3b-Q4_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1d0b900f3ed43ac8ca38d576b55c6d055ccb3129853457cea0b26fddffe130e7
3
+ size 1942464480
PowerMoE-3b-Q5_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6f61962944a7f251e9150c1fe270763fa7fbaa5fcd8f1ed5753760671e408e3d
3
+ size 2338432992
PowerMoE-3b-Q5_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f49b9b85b8ec15ff2ada35d840e93a389236ad9537d9f4dc6eeacdd4c0d26faa
3
+ size 2406950880
PowerMoE-3b-Q5_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cf360ac529721c59393a57ca3845a7ec25e1101fcfde5992595e07e6a00e2141
3
+ size 2338432992
PowerMoE-3b-Q6_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8dae3975106e98bf4b14595a4312150ca43f0c94a3a5f98a38fed691c4e96bb3
3
+ size 2776279008
PowerMoE-3b-Q8_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:63ac5e14508dff4d7601d8130f0d5c5c508d694b812805441f7bd4e46e8a1a76
3
+ size 3592988640
README.md ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ pipeline_tag: text-generation
3
+ inference: false
4
+ license: apache-2.0
5
+ library_name: transformers
6
+ tags:
7
+ - TensorBlock
8
+ - GGUF
9
+ base_model: ibm/PowerMoE-3b
10
+ model-index:
11
+ - name: ibm/PowerMoE-3b
12
+ results:
13
+ - task:
14
+ type: text-generation
15
+ dataset:
16
+ name: ARC
17
+ type: lm-eval-harness
18
+ metrics:
19
+ - type: accuracy-norm
20
+ value: 58.1
21
+ name: accuracy-norm
22
+ verified: false
23
+ - type: accuracy
24
+ value: 65.0
25
+ name: accuracy
26
+ verified: false
27
+ - type: accuracy-norm
28
+ value: 71.5
29
+ name: accuracy-norm
30
+ verified: false
31
+ - type: accuracy-norm
32
+ value: 41.0
33
+ name: accuracy-norm
34
+ verified: false
35
+ - type: accuracy-norm
36
+ value: 79.1
37
+ name: accuracy-norm
38
+ verified: false
39
+ - type: accuracy-norm
40
+ value: 65.0
41
+ name: accuracy-norm
42
+ verified: false
43
+ - type: accuracy
44
+ value: 42.8
45
+ name: accuracy
46
+ verified: false
47
+ - type: accuracy
48
+ value: 25.9
49
+ name: accuracy
50
+ verified: false
51
+ - type: accuracy
52
+ value: 14.8
53
+ name: accuracy
54
+ verified: false
55
+ - task:
56
+ type: text-generation
57
+ dataset:
58
+ name: humaneval
59
+ type: bigcode-eval
60
+ metrics:
61
+ - type: pass@1
62
+ value: 20.1
63
+ name: pass@1
64
+ verified: false
65
+ - type: pass@1
66
+ value: 32.4
67
+ name: pass@1
68
+ verified: false
69
+ ---
70
+
71
+ <div style="width: auto; margin-left: auto; margin-right: auto">
72
+ <img src="https://i.imgur.com/jC7kdl8.jpeg" alt="TensorBlock" style="width: 100%; min-width: 400px; display: block; margin: auto;">
73
+ </div>
74
+ <div style="display: flex; justify-content: space-between; width: 100%;">
75
+ <div style="display: flex; flex-direction: column; align-items: flex-start;">
76
+ <p style="margin-top: 0.5em; margin-bottom: 0em;">
77
+ Feedback and support: TensorBlock's <a href="https://x.com/tensorblock_aoi">Twitter/X</a>, <a href="https://t.me/TensorBlock">Telegram Group</a> and <a href="https://x.com/tensorblock_aoi">Discord server</a>
78
+ </p>
79
+ </div>
80
+ </div>
81
+
82
+ ## ibm/PowerMoE-3b - GGUF
83
+
84
+ This repo contains GGUF format model files for [ibm/PowerMoE-3b](https://huggingface.co/ibm/PowerMoE-3b).
85
+
86
+ The files were quantized using machines provided by [TensorBlock](https://tensorblock.co/), and they are compatible with llama.cpp as of [commit b4011](https://github.com/ggerganov/llama.cpp/commit/a6744e43e80f4be6398fc7733a01642c846dce1d).
87
+
88
+ ## Prompt template
89
+
90
+ ```
91
+
92
+ ```
93
+
94
+ ## Model file specification
95
+
96
+ | Filename | Quant type | File Size | Description |
97
+ | -------- | ---------- | --------- | ----------- |
98
+ | [PowerMoE-3b-Q2_K.gguf](https://huggingface.co/tensorblock/PowerMoE-3b-GGUF/tree/main/PowerMoE-3b-Q2_K.gguf) | Q2_K | 1.179 GB | smallest, significant quality loss - not recommended for most purposes |
99
+ | [PowerMoE-3b-Q3_K_S.gguf](https://huggingface.co/tensorblock/PowerMoE-3b-GGUF/tree/main/PowerMoE-3b-Q3_K_S.gguf) | Q3_K_S | 1.386 GB | very small, high quality loss |
100
+ | [PowerMoE-3b-Q3_K_M.gguf](https://huggingface.co/tensorblock/PowerMoE-3b-GGUF/tree/main/PowerMoE-3b-Q3_K_M.gguf) | Q3_K_M | 1.531 GB | very small, high quality loss |
101
+ | [PowerMoE-3b-Q3_K_L.gguf](https://huggingface.co/tensorblock/PowerMoE-3b-GGUF/tree/main/PowerMoE-3b-Q3_K_L.gguf) | Q3_K_L | 1.652 GB | small, substantial quality loss |
102
+ | [PowerMoE-3b-Q4_0.gguf](https://huggingface.co/tensorblock/PowerMoE-3b-GGUF/tree/main/PowerMoE-3b-Q4_0.gguf) | Q4_0 | 1.794 GB | legacy; small, very high quality loss - prefer using Q3_K_M |
103
+ | [PowerMoE-3b-Q4_K_S.gguf](https://huggingface.co/tensorblock/PowerMoE-3b-GGUF/tree/main/PowerMoE-3b-Q4_K_S.gguf) | Q4_K_S | 1.809 GB | small, greater quality loss |
104
+ | [PowerMoE-3b-Q4_K_M.gguf](https://huggingface.co/tensorblock/PowerMoE-3b-GGUF/tree/main/PowerMoE-3b-Q4_K_M.gguf) | Q4_K_M | 1.918 GB | medium, balanced quality - recommended |
105
+ | [PowerMoE-3b-Q5_0.gguf](https://huggingface.co/tensorblock/PowerMoE-3b-GGUF/tree/main/PowerMoE-3b-Q5_0.gguf) | Q5_0 | 2.178 GB | legacy; medium, balanced quality - prefer using Q4_K_M |
106
+ | [PowerMoE-3b-Q5_K_S.gguf](https://huggingface.co/tensorblock/PowerMoE-3b-GGUF/tree/main/PowerMoE-3b-Q5_K_S.gguf) | Q5_K_S | 2.178 GB | large, low quality loss - recommended |
107
+ | [PowerMoE-3b-Q5_K_M.gguf](https://huggingface.co/tensorblock/PowerMoE-3b-GGUF/tree/main/PowerMoE-3b-Q5_K_M.gguf) | Q5_K_M | 2.242 GB | large, very low quality loss - recommended |
108
+ | [PowerMoE-3b-Q6_K.gguf](https://huggingface.co/tensorblock/PowerMoE-3b-GGUF/tree/main/PowerMoE-3b-Q6_K.gguf) | Q6_K | 2.586 GB | very large, extremely low quality loss |
109
+ | [PowerMoE-3b-Q8_0.gguf](https://huggingface.co/tensorblock/PowerMoE-3b-GGUF/tree/main/PowerMoE-3b-Q8_0.gguf) | Q8_0 | 3.346 GB | very large, extremely low quality loss - not recommended |
110
+
111
+
112
+ ## Downloading instruction
113
+
114
+ ### Command line
115
+
116
+ Firstly, install Huggingface Client
117
+
118
+ ```shell
119
+ pip install -U "huggingface_hub[cli]"
120
+ ```
121
+
122
+ Then, downoad the individual model file the a local directory
123
+
124
+ ```shell
125
+ huggingface-cli download tensorblock/PowerMoE-3b-GGUF --include "PowerMoE-3b-Q2_K.gguf" --local-dir MY_LOCAL_DIR
126
+ ```
127
+
128
+ If you wanna download multiple model files with a pattern (e.g., `*Q4_K*gguf`), you can try:
129
+
130
+ ```shell
131
+ huggingface-cli download tensorblock/PowerMoE-3b-GGUF --local-dir MY_LOCAL_DIR --local-dir-use-symlinks False --include='*Q4_K*gguf'
132
+ ```