From a3ebc531fe68d72c75c4a328faff2ef569488248 Mon Sep 17 00:00:00 2001 From: Lumpiasty Date: Fri, 6 Mar 2026 23:21:58 +0100 Subject: [PATCH] Add Q3_K_M variand of Qwen3.5-9B --- apps/llama/configs/config.yaml | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/apps/llama/configs/config.yaml b/apps/llama/configs/config.yaml index 8b757de..ae39649 100644 --- a/apps/llama/configs/config.yaml +++ b/apps/llama/configs/config.yaml @@ -677,3 +677,31 @@ models: --no-warmup --port ${PORT} --chat-template-kwargs "{\"enable_thinking\": false}" + + "Qwen3.5-9B-GGUF:Q3_K_M": + ttl: 600 + cmd: | + /app/llama-server + -hf unsloth/Qwen3.5-9B-GGUF:Q3_K_M + --ctx-size 16384 + --temp 0.6 + --top-p 0.95 + --top-k 20 + --min-p 0.00 + --no-warmup + --port ${PORT} + --chat-template-kwargs "{\"enable_thinking\": true}" + + "Qwen3.5-9B-GGUF-nothink:Q3_K_M": + ttl: 600 + cmd: | + /app/llama-server + -hf unsloth/Qwen3.5-9B-GGUF:Q3_K_M + --ctx-size 16384 + --temp 0.6 + --top-p 0.95 + --top-k 20 + --min-p 0.00 + --no-warmup + --port ${PORT} + --chat-template-kwargs "{\"enable_thinking\": false}"