diff --git a/apps/llama/configs/config.yaml b/apps/llama/configs/config.yaml index 1f29f3b..5bbef19 100644 --- a/apps/llama/configs/config.yaml +++ b/apps/llama/configs/config.yaml @@ -4,10 +4,10 @@ logToStdout: "both" # proxy and upstream macros: base_args: "--no-warmup --port ${PORT}" - common_args: "--fit-target 1536 --fit-ctx 32768 --no-warmup --port ${PORT}" + common_args: "--fit-target 1536 --fit-ctx 65536 --no-warmup --port ${PORT}" gemma_sampling: "--prio 2 --temp 1.0 --repeat-penalty 1.0 --min-p 0.00 --top-k 64 --top-p 0.95" - qwen35_sampling: "--temp 0.6 --top-p 0.95 --top-k 20 --min-p 0.00" - qwen35_35b_args: "--temp 1.0 --min-p 0.00 --top-p 0.95 --top-k 20" + qwen35_sampling: "--temp 0.6 --top-p 0.95 --top-k 20 --min-p 0.00 -ctk q4_0 -ctv q4_0" + qwen35_35b_args: "--temp 1.0 --min-p 0.00 --top-p 0.95 --top-k 20 -ctk q4_0 -ctv q4_0" qwen35_35b_heretic_mmproj: "--mmproj-url https://huggingface.co/unsloth/Qwen3.5-35B-A3B-GGUF/resolve/main/mmproj-F16.gguf --mmproj /root/.cache/llama.cpp/unsloth_Qwen3.5-35B-A3B-GGUF_mmproj-F16.gguf" qwen35_4b_heretic_mmproj: "--mmproj-url https://huggingface.co/unsloth/Qwen3.5-4B-GGUF/resolve/main/mmproj-F16.gguf --mmproj /root/.cache/llama.cpp/unsloth_Qwen3.5-4B-GGUF_mmproj-F16.gguf" thinking_on: "--chat-template-kwargs '{\"enable_thinking\": true}'" @@ -75,6 +75,7 @@ models: --top-p 0.95 --top-k 40 --repeat-penalty 1.0 + -ctk q4_0 -ctv q4_0 ${common_args} "Qwen3.5-35B-A3B-GGUF:Q4_K_M":