diff --git a/apps/llama/configs/config.yaml b/apps/llama/configs/config.yaml index 6f5fb40..17a2f9d 100644 --- a/apps/llama/configs/config.yaml +++ b/apps/llama/configs/config.yaml @@ -6,6 +6,7 @@ macros: base_args: "--no-warmup --port ${PORT}" common_args: "--fit-target 1536 --no-warmup --port ${PORT}" gemma3_ctx_128k: "--ctx-size 131072" + qwen35_ctx_128k: "--ctx-size 131072" qwen35_ctx_256k: "--ctx-size 262144" gemma_sampling: "--prio 2 --temp 1.0 --repeat-penalty 1.0 --min-p 0.00 --top-k 64 --top-p 0.95" qwen35_sampling: "--temp 0.6 --top-p 0.95 --top-k 20 --min-p 0.00 -ctk q8_0 -ctv q8_0" @@ -163,7 +164,7 @@ models: cmd: | /app/llama-server -hf unsloth/Qwen3.5-4B-GGUF:Q4_K_M - ${qwen35_ctx_256k} + ${qwen35_ctx_128k} ${qwen35_sampling} ${common_args} ${thinking_on} @@ -172,7 +173,7 @@ models: cmd: | /app/llama-server -hf unsloth/Qwen3.5-4B-GGUF:Q4_K_M - ${qwen35_ctx_256k} + ${qwen35_ctx_128k} ${qwen35_sampling} ${common_args} ${thinking_off} @@ -182,7 +183,7 @@ models: /app/llama-server -hf mradermacher/Qwen3.5-4B-heretic-GGUF:Q4_K_M ${qwen35_4b_heretic_mmproj} - ${qwen35_ctx_256k} + ${qwen35_ctx_128k} ${qwen35_sampling} ${common_args} ${thinking_on} @@ -192,7 +193,7 @@ models: /app/llama-server -hf mradermacher/Qwen3.5-4B-heretic-GGUF:Q4_K_M ${qwen35_4b_heretic_mmproj} - ${qwen35_ctx_256k} + ${qwen35_ctx_128k} ${qwen35_sampling} ${common_args} ${thinking_off}