Update README.md
Browse files
README.md
CHANGED
@@ -1,3 +1,40 @@
|
|
1 |
-
---
|
2 |
-
license: unknown
|
3 |
-
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
license: unknown
|
3 |
+
---
|
4 |
+
Llama.cpp command-r pre-tokenizer gguf fixed
|
5 |
+
|
6 |
+
|
7 |
+
|
8 |
+
```
|
9 |
+
main: build = 2789 (84250014)
|
10 |
+
main: built with gcc (Ubuntu 13.2.0-4ubuntu3) 13.2.0 for x86_64-linux-gnu
|
11 |
+
main: quantizing '/gguf/c4ai-commandr-v01_a.gguf' to '/gguf/c4ai-command-r-v01-Q5_K_M.gguf' as Q5_K_M
|
12 |
+
llama_model_loader: loaded meta data with 26 key-value pairs and 322 tensors from /mnt/Orlando/gguf/c4ai-commandr-v01_a.gguf (version GGUF V3 (latest))
|
13 |
+
llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output.
|
14 |
+
llama_model_loader: - kv 0: general.architecture str = command-r
|
15 |
+
llama_model_loader: - kv 1: command-r.block_count u32 = 40
|
16 |
+
llama_model_loader: - kv 2: command-r.context_length u32 = 131072
|
17 |
+
llama_model_loader: - kv 3: command-r.embedding_length u32 = 8192
|
18 |
+
llama_model_loader: - kv 4: command-r.feed_forward_length u32 = 22528
|
19 |
+
llama_model_loader: - kv 5: command-r.attention.head_count u32 = 64
|
20 |
+
llama_model_loader: - kv 6: command-r.attention.head_count_kv u32 = 64
|
21 |
+
llama_model_loader: - kv 7: command-r.rope.freq_base f32 = 8000000.000000
|
22 |
+
llama_model_loader: - kv 8: command-r.attention.layer_norm_epsilon f32 = 0.000010
|
23 |
+
llama_model_loader: - kv 9: general.file_type u32 = 1
|
24 |
+
llama_model_loader: - kv 10: command-r.logit_scale f32 = 0.062500
|
25 |
+
llama_model_loader: - kv 11: command-r.rope.scaling.type str = none
|
26 |
+
llama_model_loader: - kv 12: tokenizer.ggml.model str = gpt2
|
27 |
+
llama_model_loader: - kv 13: tokenizer.ggml.pre str = command-r
|
28 |
+
llama_model_loader: - kv 14: tokenizer.ggml.tokens arr[str,256000] = ["<PAD>", "<UNK>", "<CLS>", "<SEP>", ...
|
29 |
+
llama_model_loader: - kv 15: tokenizer.ggml.token_type arr[i32,256000] = [3, 3, 3, 3, 3, 3, 3, 3, 1, 1, 1, 1, ...
|
30 |
+
llama_model_loader: - kv 16: tokenizer.ggml.merges arr[str,253333] = ["Ġ Ġ", "Ġ t", "e r", "i n", "Ġ a...
|
31 |
+
llama_model_loader: - kv 17: tokenizer.ggml.bos_token_id u32 = 5
|
32 |
+
llama_model_loader: - kv 18: tokenizer.ggml.eos_token_id u32 = 255001
|
33 |
+
llama_model_loader: - kv 19: tokenizer.ggml.padding_token_id u32 = 0
|
34 |
+
llama_model_loader: - kv 20: tokenizer.ggml.add_bos_token bool = true
|
35 |
+
llama_model_loader: - kv 21: tokenizer.ggml.add_eos_token bool = false
|
36 |
+
llama_model_loader: - kv 22: tokenizer.chat_template.tool_use str = {{ bos_token }}{% if messages[0]['rol...
|
37 |
+
llama_model_loader: - kv 23: tokenizer.chat_template.rag str = {{ bos_token }}{% if messages[0]['rol...
|
38 |
+
llama_model_loader: - kv 24: tokenizer.chat_templates arr[str,2] = ["rag", "tool_use"]
|
39 |
+
llama_model_loader: - kv 25: tokenizer.chat_template str = {{ bos_token }}{% if messages[0]['rol...
|
40 |
+
```
|