From 46a7e24932c9a563b5f048dc3640a7fd2ae1cbc3 Mon Sep 17 00:00:00 2001 From: Lumpiasty Date: Fri, 6 Mar 2026 23:07:02 +0100 Subject: [PATCH] add 2B, 4B, 9B versions of Qwen3.5 in thinking + nonthinking variants --- apps/llama/configs/config.yaml | 94 ++++++++++++++++++++++++++++++++++ 1 file changed, 94 insertions(+) diff --git a/apps/llama/configs/config.yaml b/apps/llama/configs/config.yaml index 1ae1384..c7fde83 100644 --- a/apps/llama/configs/config.yaml +++ b/apps/llama/configs/config.yaml @@ -579,3 +579,97 @@ models: --no-warmup --port ${PORT} --chat-template-kwargs "{\"enable_thinking\": false}" + + "Qwen3.5-0.8B-GGUF:Q4_K_XL": + ttl: 0 + cmd: | + /app/llama-server + -hf unsloth/Qwen3.5-0.8B-GGUF:Q4_K_XL + --ctx-size 16384 + --temp 0.6 + --top-p 0.95 + --top-k 20 + --min-p 0.00 + --no-warmup + --port ${PORT} + + "Qwen3.5-2B-GGUF:Q4_K_M": + ttl: 600 + cmd: | + /app/llama-server + -hf unsloth/Qwen3.5-2B-GGUF:Q4_K_M + --ctx-size 16384 + --temp 0.6 + --top-p 0.95 + --top-k 20 + --min-p 0.00 + --no-warmup + --port ${PORT} + + "Qwen3.5-2B-GGUF-nothink:Q4_K_M": + ttl: 600 + cmd: | + /app/llama-server + -hf unsloth/Qwen3.5-2B-GGUF:Q4_K_M + --ctx-size 16384 + --temp 0.6 + --top-p 0.95 + --top-k 20 + --min-p 0.00 + --no-warmup + --port ${PORT} + --chat-template-kwargs "{\"enable_thinking\": false}" + + "Qwen3.5-4B-GGUF:Q4_K_M": + ttl: 600 + cmd: | + /app/llama-server + -hf unsloth/Qwen3.5-4B-GGUF:Q4_K_M + --ctx-size 16384 + --temp 0.6 + --top-p 0.95 + --top-k 20 + --min-p 0.00 + --no-warmup + --port ${PORT} + + "Qwen3.5-4B-GGUF-nothink:Q4_K_M": + ttl: 600 + cmd: | + /app/llama-server + -hf unsloth/Qwen3.5-4B-GGUF:Q4_K_M + --ctx-size 16384 + --temp 0.6 + --top-p 0.95 + --top-k 20 + --min-p 0.00 + --no-warmup + --port ${PORT} + --chat-template-kwargs "{\"enable_thinking\": false}" + + "Qwen3.5-9B-GGUF:Q4_K_M": + ttl: 600 + cmd: | + /app/llama-server + -hf unsloth/Qwen3.5-9B-GGUF:Q4_K_M + --ctx-size 16384 + --temp 0.6 + --top-p 0.95 + --top-k 20 + --min-p 0.00 + --no-warmup + --port ${PORT} + + "Qwen3.5-9B-GGUF-nothink:Q4_K_M": + ttl: 600 + cmd: | + /app/llama-server + -hf unsloth/Qwen3.5-9B-GGUF:Q4_K_M + --ctx-size 16384 + --temp 0.6 + --top-p 0.95 + --top-k 20 + --min-p 0.00 + --no-warmup + --port ${PORT} + --chat-template-kwargs "{\"enable_thinking\": false}"