mantafloppy
commited on
Commit
•
51ff810
1
Parent(s):
23dc31f
Upload folder using huggingface_hub
Browse files- .gitattributes +1 -0
- README.md +0 -3
- whiterabbitneo-33b-v1-q4_k.gguf +3 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
*.gguf filter=lfs diff=lfs merge=lfs -text
|
README.md
CHANGED
@@ -19,9 +19,7 @@ snapshot_download(repo_id=model_id, local_dir="whiterabbitneo-hf",
|
|
19 |
```
|
20 |
|
21 |
brew install gh
|
22 |
-
|
23 |
gh auth login
|
24 |
-
|
25 |
gh pr checkout 3633
|
26 |
|
27 |
python3 llama.cpp/convert.py whiterabbitneo-hf --outfile whiterabbitneo-33b-v1-q8_0.gguf --outtype q8_0 --padvocab
|
@@ -29,7 +27,6 @@ python3 llama.cpp/convert.py whiterabbitneo-hf --outfile whiterabbitneo-33b-v1-q
|
|
29 |
|
30 |
|
31 |
python3 llama.cpp/convert.py whiterabbitneo-hf --outfile whiterabbitneo-f16.gguf --outtype f16 --padvocab
|
32 |
-
|
33 |
llama.cpp/quantize whiterabbitneo-f16.gguf whiterabbitneo-q4_k.gguf q4_k
|
34 |
|
35 |
```
|
|
|
19 |
```
|
20 |
|
21 |
brew install gh
|
|
|
22 |
gh auth login
|
|
|
23 |
gh pr checkout 3633
|
24 |
|
25 |
python3 llama.cpp/convert.py whiterabbitneo-hf --outfile whiterabbitneo-33b-v1-q8_0.gguf --outtype q8_0 --padvocab
|
|
|
27 |
|
28 |
|
29 |
python3 llama.cpp/convert.py whiterabbitneo-hf --outfile whiterabbitneo-f16.gguf --outtype f16 --padvocab
|
|
|
30 |
llama.cpp/quantize whiterabbitneo-f16.gguf whiterabbitneo-q4_k.gguf q4_k
|
31 |
|
32 |
```
|
whiterabbitneo-33b-v1-q4_k.gguf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b78315b5ab07dc687bc5650089ee224aa640468b39f2e5728000495a6061d874
|
3 |
+
size 19940659296
|