From 8afe2280009ecbfc9de2c93b8f41283dc810609a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20G=C3=A4=C3=9Fler?= Date: Wed, 30 Aug 2023 21:46:19 +0200 Subject: [PATCH] CUDA: mul_mat_q=true llama_context_params default (#2912) --- llama.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llama.cpp b/llama.cpp index fcd6f276a0655..95ee6ffe41c3a 100644 --- a/llama.cpp +++ b/llama.cpp @@ -5287,7 +5287,7 @@ struct llama_context_params llama_context_default_params() { /*.progress_callback =*/ nullptr, /*.progress_callback_user_data =*/ nullptr, /*.low_vram =*/ false, - /*.mul_mat_q =*/ false, + /*.mul_mat_q =*/ true, /*.f16_kv =*/ true, /*.logits_all =*/ false, /*.vocab_only =*/ false,