add 2B, 4B, 9B versions of Qwen3.5 in thinking + nonthinking variants

This commit is contained in:
2026-03-06 23:07:02 +01:00
parent cd7ebac6b9
commit 46a7e24932

View File

@@ -579,3 +579,97 @@ models:
--no-warmup --no-warmup
--port ${PORT} --port ${PORT}
--chat-template-kwargs "{\"enable_thinking\": false}" --chat-template-kwargs "{\"enable_thinking\": false}"
"Qwen3.5-0.8B-GGUF:Q4_K_XL":
ttl: 0
cmd: |
/app/llama-server
-hf unsloth/Qwen3.5-0.8B-GGUF:Q4_K_XL
--ctx-size 16384
--temp 0.6
--top-p 0.95
--top-k 20
--min-p 0.00
--no-warmup
--port ${PORT}
"Qwen3.5-2B-GGUF:Q4_K_M":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/Qwen3.5-2B-GGUF:Q4_K_M
--ctx-size 16384
--temp 0.6
--top-p 0.95
--top-k 20
--min-p 0.00
--no-warmup
--port ${PORT}
"Qwen3.5-2B-GGUF-nothink:Q4_K_M":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/Qwen3.5-2B-GGUF:Q4_K_M
--ctx-size 16384
--temp 0.6
--top-p 0.95
--top-k 20
--min-p 0.00
--no-warmup
--port ${PORT}
--chat-template-kwargs "{\"enable_thinking\": false}"
"Qwen3.5-4B-GGUF:Q4_K_M":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/Qwen3.5-4B-GGUF:Q4_K_M
--ctx-size 16384
--temp 0.6
--top-p 0.95
--top-k 20
--min-p 0.00
--no-warmup
--port ${PORT}
"Qwen3.5-4B-GGUF-nothink:Q4_K_M":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/Qwen3.5-4B-GGUF:Q4_K_M
--ctx-size 16384
--temp 0.6
--top-p 0.95
--top-k 20
--min-p 0.00
--no-warmup
--port ${PORT}
--chat-template-kwargs "{\"enable_thinking\": false}"
"Qwen3.5-9B-GGUF:Q4_K_M":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/Qwen3.5-9B-GGUF:Q4_K_M
--ctx-size 16384
--temp 0.6
--top-p 0.95
--top-k 20
--min-p 0.00
--no-warmup
--port ${PORT}
"Qwen3.5-9B-GGUF-nothink:Q4_K_M":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/Qwen3.5-9B-GGUF:Q4_K_M
--ctx-size 16384
--temp 0.6
--top-p 0.95
--top-k 20
--min-p 0.00
--no-warmup
--port ${PORT}
--chat-template-kwargs "{\"enable_thinking\": false}"