|
llama_model_loader: loaded meta data with 34 key-value pairs and 464 tensors from shieldgemma-9b-IMat-GGUF/shieldgemma-9b.Q8_0.gguf.hardlink.gguf (version GGUF V3 (latest)) |
|
llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output. |
|
llama_model_loader: - kv 0: general.architecture str = gemma2 |
|
llama_model_loader: - kv 1: general.type str = model |
|
llama_model_loader: - kv 2: general.name str = Shieldgemma 9b |
|
llama_model_loader: - kv 3: general.basename str = shieldgemma |
|
llama_model_loader: - kv 4: general.size_label str = 9B |
|
llama_model_loader: - kv 5: general.license str = gemma |
|
llama_model_loader: - kv 6: general.tags arr[str,1] = ["text-generation"] |
|
llama_model_loader: - kv 7: gemma2.context_length u32 = 8192 |
|
llama_model_loader: - kv 8: gemma2.embedding_length u32 = 3584 |
|
llama_model_loader: - kv 9: gemma2.block_count u32 = 42 |
|
llama_model_loader: - kv 10: gemma2.feed_forward_length u32 = 14336 |
|
llama_model_loader: - kv 11: gemma2.attention.head_count u32 = 16 |
|
llama_model_loader: - kv 12: gemma2.attention.head_count_kv u32 = 8 |
|
llama_model_loader: - kv 13: gemma2.attention.layer_norm_rms_epsilon f32 = 0.000001 |
|
llama_model_loader: - kv 14: gemma2.attention.key_length u32 = 256 |
|
llama_model_loader: - kv 15: gemma2.attention.value_length u32 = 256 |
|
llama_model_loader: - kv 16: general.file_type u32 = 7 |
|
llama_model_loader: - kv 17: gemma2.attn_logit_softcapping f32 = 50.000000 |
|
llama_model_loader: - kv 18: gemma2.final_logit_softcapping f32 = 30.000000 |
|
llama_model_loader: - kv 19: gemma2.attention.sliding_window u32 = 4096 |
|
llama_model_loader: - kv 20: tokenizer.ggml.model str = llama |
|
llama_model_loader: - kv 21: tokenizer.ggml.pre str = default |
|
llama_model_loader: - kv 22: tokenizer.ggml.tokens arr[str,256000] = ["<pad>", "<eos>", "<bos>", "<unk>", ... |
|
llama_model_loader: - kv 23: tokenizer.ggml.scores arr[f32,256000] = [-1000.000000, -1000.000000, -1000.00... |
|
llama_model_loader: - kv 24: tokenizer.ggml.token_type arr[i32,256000] = [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, ... |
|
llama_model_loader: - kv 25: tokenizer.ggml.bos_token_id u32 = 2 |
|
llama_model_loader: - kv 26: tokenizer.ggml.eos_token_id u32 = 1 |
|
llama_model_loader: - kv 27: tokenizer.ggml.unknown_token_id u32 = 3 |
|
llama_model_loader: - kv 28: tokenizer.ggml.padding_token_id u32 = 0 |
|
llama_model_loader: - kv 29: tokenizer.ggml.add_bos_token bool = true |
|
llama_model_loader: - kv 30: tokenizer.ggml.add_eos_token bool = false |
|
llama_model_loader: - kv 31: tokenizer.chat_template str = {{- bos_token }}\n{%- if messages[-1].... |
|
llama_model_loader: - kv 32: tokenizer.ggml.add_space_prefix bool = false |
|
llama_model_loader: - kv 33: general.quantization_version u32 = 2 |
|
llama_model_loader: - type f32: 169 tensors |
|
llama_model_loader: - type q8_0: 295 tensors |
|
llm_load_vocab: special tokens cache size = 249 |
|
llm_load_vocab: token to piece cache size = 1.6014 MB |
|
llm_load_print_meta: format = GGUF V3 (latest) |
|
llm_load_print_meta: arch = gemma2 |
|
llm_load_print_meta: vocab type = SPM |
|
llm_load_print_meta: n_vocab = 256000 |
|
llm_load_print_meta: n_merges = 0 |
|
llm_load_print_meta: vocab_only = 0 |
|
llm_load_print_meta: n_ctx_train = 8192 |
|
llm_load_print_meta: n_embd = 3584 |
|
llm_load_print_meta: n_layer = 42 |
|
llm_load_print_meta: n_head = 16 |
|
llm_load_print_meta: n_head_kv = 8 |
|
llm_load_print_meta: n_rot = 256 |
|
llm_load_print_meta: n_swa = 4096 |
|
llm_load_print_meta: n_embd_head_k = 256 |
|
llm_load_print_meta: n_embd_head_v = 256 |
|
llm_load_print_meta: n_gqa = 2 |
|
llm_load_print_meta: n_embd_k_gqa = 2048 |
|
llm_load_print_meta: n_embd_v_gqa = 2048 |
|
llm_load_print_meta: f_norm_eps = 0.0e+00 |
|
llm_load_print_meta: f_norm_rms_eps = 1.0e-06 |
|
llm_load_print_meta: f_clamp_kqv = 0.0e+00 |
|
llm_load_print_meta: f_max_alibi_bias = 0.0e+00 |
|
llm_load_print_meta: f_logit_scale = 0.0e+00 |
|
llm_load_print_meta: n_ff = 14336 |
|
llm_load_print_meta: n_expert = 0 |
|
llm_load_print_meta: n_expert_used = 0 |
|
llm_load_print_meta: causal attn = 1 |
|
llm_load_print_meta: pooling type = 0 |
|
llm_load_print_meta: rope type = 2 |
|
llm_load_print_meta: rope scaling = linear |
|
llm_load_print_meta: freq_base_train = 10000.0 |
|
llm_load_print_meta: freq_scale_train = 1 |
|
llm_load_print_meta: n_ctx_orig_yarn = 8192 |
|
llm_load_print_meta: rope_finetuned = unknown |
|
llm_load_print_meta: ssm_d_conv = 0 |
|
llm_load_print_meta: ssm_d_inner = 0 |
|
llm_load_print_meta: ssm_d_state = 0 |
|
llm_load_print_meta: ssm_dt_rank = 0 |
|
llm_load_print_meta: model type = 9B |
|
llm_load_print_meta: model ftype = Q8_0 |
|
llm_load_print_meta: model params = 9.24 B |
|
llm_load_print_meta: model size = 9.15 GiB (8.50 BPW) |
|
llm_load_print_meta: general.name = Shieldgemma 9b |
|
llm_load_print_meta: BOS token = 2 '<bos>' |
|
llm_load_print_meta: EOS token = 1 '<eos>' |
|
llm_load_print_meta: UNK token = 3 '<unk>' |
|
llm_load_print_meta: PAD token = 0 '<pad>' |
|
llm_load_print_meta: LF token = 227 '<0x0A>' |
|
llm_load_print_meta: EOT token = 107 '<end_of_turn>' |
|
llm_load_print_meta: max token length = 48 |
|
ggml_cuda_init: GGML_CUDA_FORCE_MMQ: no |
|
ggml_cuda_init: GGML_CUDA_FORCE_CUBLAS: no |
|
ggml_cuda_init: found 1 CUDA devices: |
|
Device 0: NVIDIA GeForce RTX 4090, compute capability 8.9, VMM: yes |
|
llm_load_tensors: ggml ctx size = 0.41 MiB |
|
llm_load_tensors: offloading 42 repeating layers to GPU |
|
llm_load_tensors: offloading non-repeating layers to GPU |
|
llm_load_tensors: offloaded 43/43 layers to GPU |
|
llm_load_tensors: CPU buffer size = 929.69 MiB |
|
llm_load_tensors: CUDA0 buffer size = 9366.12 MiB |
|
.................................................................................... |
|
llama_new_context_with_model: n_ctx = 512 |
|
llama_new_context_with_model: n_batch = 512 |
|
llama_new_context_with_model: n_ubatch = 512 |
|
llama_new_context_with_model: flash_attn = 0 |
|
llama_new_context_with_model: freq_base = 10000.0 |
|
llama_new_context_with_model: freq_scale = 1 |
|
llama_kv_cache_init: CUDA0 KV buffer size = 168.00 MiB |
|
llama_new_context_with_model: KV self size = 168.00 MiB, K (f16): 84.00 MiB, V (f16): 84.00 MiB |
|
llama_new_context_with_model: CUDA_Host output buffer size = 0.98 MiB |
|
llama_new_context_with_model: CUDA0 compute buffer size = 507.00 MiB |
|
llama_new_context_with_model: CUDA_Host compute buffer size = 9.01 MiB |
|
llama_new_context_with_model: graph nodes = 1690 |
|
llama_new_context_with_model: graph splits = 2 |
|
|
|
system_info: n_threads = 25 / 32 | AVX = 1 | AVX_VNNI = 0 | AVX2 = 1 | AVX512 = 1 | AVX512_VBMI = 1 | AVX512_VNNI = 1 | AVX512_BF16 = 1 | FMA = 1 | NEON = 0 | SVE = 0 | ARM_FMA = 0 | F16C = 1 | FP16_VA = 0 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 1 | SSSE3 = 1 | VSX = 0 | MATMUL_INT8 = 0 | LLAMAFILE = 1 | |
|
compute_imatrix: tokenizing the input .. |
|
compute_imatrix: tokenization took 127.069 ms |
|
compute_imatrix: computing over 128 chunks with batch_size 512 |
|
compute_imatrix: 0.83 seconds per pass - ETA 1.77 minutes |
|
[1]7.3374,[2]5.0984,[3]4.5613,[4]5.6956,[5]5.8577,[6]4.9333,[7]5.4379,[8]5.7817,[9]6.0045, |
|
save_imatrix: stored collected data after 10 chunks in shieldgemma-9b-IMat-GGUF/imatrix.dat |
|
[10]5.3221,[11]5.4437,[12]6.0204,[13]6.5565,[14]6.7867,[15]7.3188,[16]7.6393,[17]7.7817,[18]8.1130,[19]7.7774, |
|
save_imatrix: stored collected data after 20 chunks in shieldgemma-9b-IMat-GGUF/imatrix.dat |
|
[20]7.9485,[21]8.1086,[22]8.0617,[23]8.2047,[24]8.3050,[25]8.4663,[26]8.1978,[27]8.4473,[28]8.6323,[29]8.5557, |
|
save_imatrix: stored collected data after 30 chunks in shieldgemma-9b-IMat-GGUF/imatrix.dat |
|
[30]8.4874,[31]7.9852,[32]7.7378,[33]7.6474,[34]7.5178,[35]7.4549,[36]7.4790,[37]7.4819,[38]7.5538,[39]7.7192, |
|
save_imatrix: stored collected data after 40 chunks in shieldgemma-9b-IMat-GGUF/imatrix.dat |
|
[40]7.8833,[41]8.0263,[42]8.3116,[43]8.6135,[44]8.8847,[45]9.0421,[46]8.8968,[47]8.9272,[48]9.1215,[49]9.2696, |
|
save_imatrix: stored collected data after 50 chunks in shieldgemma-9b-IMat-GGUF/imatrix.dat |
|
[50]9.0588,[51]9.0860,[52]9.1306,[53]9.2732,[54]9.4697,[55]9.5793,[56]9.6371,[57]9.6371,[58]9.6535,[59]9.4988, |
|
save_imatrix: stored collected data after 60 chunks in shieldgemma-9b-IMat-GGUF/imatrix.dat |
|
[60]9.3814,[61]9.2582,[62]9.2137,[63]9.2545,[64]9.2488,[65]9.2318,[66]9.2642,[67]9.2104,[68]9.1374,[69]9.1585, |
|
save_imatrix: stored collected data after 70 chunks in shieldgemma-9b-IMat-GGUF/imatrix.dat |
|
[70]9.1244,[71]9.1132,[72]9.1258,[73]9.1023,[74]9.0472,[75]9.0081,[76]9.0088,[77]9.0233,[78]9.0102,[79]8.9599, |
|
save_imatrix: stored collected data after 80 chunks in shieldgemma-9b-IMat-GGUF/imatrix.dat |
|
[80]9.0174,[81]9.0655,[82]9.0441,[83]9.0438,[84]9.0963,[85]8.9669,[86]8.9308,[87]8.8686,[88]8.8771,[89]8.9035, |
|
save_imatrix: stored collected data after 90 chunks in shieldgemma-9b-IMat-GGUF/imatrix.dat |
|
[90]8.9240,[91]8.8541,[92]8.7786,[93]8.6921,[94]8.6069,[95]8.5458,[96]8.4692,[97]8.4011,[98]8.3388,[99]8.3775, |
|
save_imatrix: stored collected data after 100 chunks in shieldgemma-9b-IMat-GGUF/imatrix.dat |
|
[100]8.4012,[101]8.4919,[102]8.5715,[103]8.6479,[104]8.8050,[105]8.9196,[106]8.9416,[107]8.9679,[108]8.9875,[109]8.9641, |
|
save_imatrix: stored collected data after 110 chunks in shieldgemma-9b-IMat-GGUF/imatrix.dat |
|
[110]8.9429,[111]8.8727,[112]8.7988,[113]8.8436,[114]8.8606,[115]8.8644,[116]8.8585,[117]8.9005,[118]8.9193,[119]8.9265, |
|
save_imatrix: stored collected data after 120 chunks in shieldgemma-9b-IMat-GGUF/imatrix.dat |
|
[120]8.9359,[121]8.9646,[122]8.9229,[123]8.9784,[124]9.0340,[125]9.0724,[126]9.1437,[127]9.2022,[128]9.2556, |
|
save_imatrix: stored collected data after 128 chunks in shieldgemma-9b-IMat-GGUF/imatrix.dat |
|
|
|
llama_print_timings: load time = 2647.18 ms |
|
llama_print_timings: sample time = 0.00 ms / 1 runs ( 0.00 ms per token, inf tokens per second) |
|
llama_print_timings: prompt eval time = 87099.37 ms / 65536 tokens ( 1.33 ms per token, 752.43 tokens per second) |
|
llama_print_timings: eval time = 0.00 ms / 1 runs ( 0.00 ms per token, inf tokens per second) |
|
llama_print_timings: total time = 90378.31 ms / 65537 tokens |
|
|
|
Final estimate: PPL = 9.2556 +/- 0.15057 |
|
|