From f5f4cdef4e37dc127bdbef271f94a0aa686e240c Mon Sep 17 00:00:00 2001 From: Francis Couture-Harpin Date: Sat, 24 Aug 2024 10:25:39 -0400 Subject: [PATCH] llama : fix qs.n_attention_wv for DeepSeek-V2 --- src/llama.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/llama.cpp b/src/llama.cpp index 869b584aa2862..0593c484bb6fc 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -16820,7 +16820,8 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s // TODO: avoid hardcoded tensor names - use the TN_* constants if (name.find("attn_v.weight") != std::string::npos || - name.find("attn_qkv.weight") != std::string::npos) { + name.find("attn_qkv.weight") != std::string::npos || + name.find("attn_kv_b.weight")!= std::string::npos) { ++qs.n_attention_wv; } else if (name == LLM_TN(model.arch)(LLM_TENSOR_OUTPUT, "weight")) { qs.has_output = true;