EricB HF staff commited on
Commit
4a31c07
1 Parent(s): d517ef2

Upload model

Browse files
.gitattributes CHANGED
@@ -37,3 +37,10 @@ mistral-nemo-instruct-2407-q4k.uqff filter=lfs diff=lfs merge=lfs -text
37
  mistral-nemo-instruct-2407-q5k.uqff filter=lfs diff=lfs merge=lfs -text
38
  mistral-nemo-instruct-2407-q6k.uqff filter=lfs diff=lfs merge=lfs -text
39
  mistral-nemo-instruct-2407-q8_0.uqff filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
37
  mistral-nemo-instruct-2407-q5k.uqff filter=lfs diff=lfs merge=lfs -text
38
  mistral-nemo-instruct-2407-q6k.uqff filter=lfs diff=lfs merge=lfs -text
39
  mistral-nemo-instruct-2407-q8_0.uqff filter=lfs diff=lfs merge=lfs -text
40
+ mistral-nemo-2407-instruct-f8e4m3.uqff filter=lfs diff=lfs merge=lfs -text
41
+ mistral-nemo-2407-instruct-hqq4.uqff filter=lfs diff=lfs merge=lfs -text
42
+ mistral-nemo-2407-instruct-hqq8.uqff filter=lfs diff=lfs merge=lfs -text
43
+ mistral-nemo-2407-instruct-q3k.uqff filter=lfs diff=lfs merge=lfs -text
44
+ mistral-nemo-2407-instruct-q4k.uqff filter=lfs diff=lfs merge=lfs -text
45
+ mistral-nemo-2407-instruct-q5k.uqff filter=lfs diff=lfs merge=lfs -text
46
+ mistral-nemo-2407-instruct-q8_0.uqff filter=lfs diff=lfs merge=lfs -text
config.json CHANGED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "MistralForCausalLM"
4
+ ],
5
+ "attention_dropout": 0.0,
6
+ "bos_token_id": 1,
7
+ "eos_token_id": 2,
8
+ "head_dim": 128,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 5120,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 14336,
13
+ "max_position_embeddings": 1024000,
14
+ "model_type": "mistral",
15
+ "num_attention_heads": 32,
16
+ "num_hidden_layers": 40,
17
+ "num_key_value_heads": 8,
18
+ "rms_norm_eps": 1e-05,
19
+ "rope_theta": 1000000.0,
20
+ "sliding_window": null,
21
+ "tie_word_embeddings": false,
22
+ "torch_dtype": "bfloat16",
23
+ "transformers_version": "4.43.0.dev0",
24
+ "use_cache": true,
25
+ "vocab_size": 131072
26
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "transformers_version": "4.43.0.dev0"
6
+ }
mistral-nemo-2407-instruct-f8e4m3.uqff ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:856a84a348dd83026615e2633d2b1a47a6fdea8756464175b3964206b76b2c46
3
+ size 11576312914
mistral-nemo-2407-instruct-hqq4.uqff ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:96b2b52b9d8965cc7178c663ea45e8444ce5403a08cb52874ab0e0bc40264fe3
3
+ size 7235221650
mistral-nemo-2407-instruct-hqq8.uqff ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:46a843bbbd90714f3aad5eb3102d4037d3c38e13fe901f6fde252c077a8b3523
3
+ size 13023361338
mistral-nemo-2407-instruct-q3k.uqff ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:899649e508dd1034367c25b1683b574534ec28615afff22738903b234b012daf
3
+ size 4974211554
mistral-nemo-2407-instruct-q4k.uqff ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dc03842c5c77fdc81c17272ae5f233426f3ee92096310db82b7e02b3e1835c01
3
+ size 6511686218
mistral-nemo-2407-instruct-q5k.uqff ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1ce79daf687c46d46eac8ec9bb54cfc5cf91e27390808b8530e79a41acc5fe0b
3
+ size 7958721114
mistral-nemo-2407-instruct-q8_0.uqff ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4c817b9add723d1d2f186c6511d24395829b7aadcf9bc54c01f693a2f26f7450
3
+ size 12299825890
residual.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d99c1a76d5da61c16b9746cf50472c42d59137291cd7f5f9acab9a5e3e1e8e7
3
+ size 1343016224
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
The diff for this file is too large to render. See raw diff