# yaml-language-server: $schema=https://raw.githubusercontent.com/mostlygeek/llama-swap/refs/heads/main/config-schema.json healthCheckTimeout: 600 logToStdout: "both" # proxy and upstream macros: base_args: "--no-warmup --port ${PORT}" common_args: "--fit-target 1536 --fit-ctx 32768 --no-warmup --port ${PORT}" gemma_sampling: "--prio 2 --temp 1.0 --repeat-penalty 1.0 --min-p 0.00 --top-k 64 --top-p 0.95" qwen35_sampling: "--temp 0.6 --top-p 0.95 --top-k 20 --min-p 0.00" qwen35_35b_args: "--temp 1.0 --min-p 0.00 --top-p 0.95 --top-k 20" qwen35_35b_heretic_mmproj: "--mmproj-url https://huggingface.co/unsloth/Qwen3.5-35B-A3B-GGUF/resolve/main/mmproj-F16.gguf --mmproj /root/.cache/llama.cpp/unsloth_Qwen3.5-35B-A3B-GGUF_mmproj-F16.gguf" qwen35_4b_heretic_mmproj: "--mmproj-url https://huggingface.co/unsloth/Qwen3.5-4B-GGUF/resolve/main/mmproj-F16.gguf --mmproj /root/.cache/llama.cpp/unsloth_Qwen3.5-4B-GGUF_mmproj-F16.gguf" thinking_on: "--chat-template-kwargs '{\"enable_thinking\": true}'" thinking_off: "--chat-template-kwargs '{\"enable_thinking\": false}'" peers: openrouter: proxy: https://openrouter.ai/api apiKey: ${env.OPENROUTER_API_KEY} models: - z-ai/glm-5 hooks: on_startup: preload: - "Qwen3.5-0.8B-GGUF-nothink:Q4_K_XL" groups: always: persistent: true exclusive: false swap: false members: - "Qwen3.5-0.8B-GGUF-nothink:Q4_K_XL" models: "gemma3-12b": ttl: 600 cmd: | /app/llama-server -hf unsloth/gemma-3-12b-it-GGUF:Q4_K_M ${gemma_sampling} ${common_args} "gemma3-12b-novision": ttl: 600 cmd: | /app/llama-server -hf unsloth/gemma-3-12b-it-GGUF:Q4_K_M ${gemma_sampling} --no-mmproj ${common_args} "gemma3-4b": ttl: 600 cmd: | /app/llama-server -hf unsloth/gemma-3-4b-it-GGUF:Q4_K_M ${gemma_sampling} ${common_args} "gemma3-4b-novision": ttl: 600 cmd: | /app/llama-server -hf unsloth/gemma-3-4b-it-GGUF:Q4_K_M ${gemma_sampling} --no-mmproj ${common_args} "Qwen3-Coder-Next-GGUF:Q4_K_M": ttl: 600 cmd: | /app/llama-server -hf unsloth/Qwen3-Coder-Next-GGUF:Q4_K_M --ctx-size 65536 --predict 8192 --temp 1.0 --min-p 0.01 --top-p 0.95 --top-k 40 --repeat-penalty 1.0 ${common_args} "Qwen3.5-35B-A3B-GGUF:Q4_K_M": ttl: 600 cmd: | /app/llama-server -hf unsloth/Qwen3.5-35B-A3B-GGUF:Q4_K_M ${qwen35_35b_args} ${common_args} "Qwen3.5-35B-A3B-GGUF-nothink:Q4_K_M": ttl: 600 cmd: | /app/llama-server -hf unsloth/Qwen3.5-35B-A3B-GGUF:Q4_K_M ${qwen35_35b_args} ${common_args} ${thinking_off} # The "heretic" version does not provide the mmproj # so providing url to the one from the non-heretic version. "Qwen3.5-35B-A3B-heretic-GGUF:Q4_K_M": ttl: 600 cmd: | /app/llama-server -hf mradermacher/Qwen3.5-35B-A3B-heretic-GGUF:Q4_K_M ${qwen35_35b_heretic_mmproj} ${qwen35_35b_args} ${common_args} "Qwen3.5-35B-A3B-heretic-GGUF-nothink:Q4_K_M": ttl: 600 cmd: | /app/llama-server -hf mradermacher/Qwen3.5-35B-A3B-heretic-GGUF:Q4_K_M ${qwen35_35b_heretic_mmproj} ${qwen35_35b_args} ${common_args} ${thinking_off} "Qwen3.5-0.8B-GGUF:Q4_K_XL": ttl: 0 cmd: | /app/llama-server -hf unsloth/Qwen3.5-0.8B-GGUF:Q4_K_XL ${qwen35_sampling} ${base_args} ${thinking_on} "Qwen3.5-0.8B-GGUF-nothink:Q4_K_XL": ttl: 0 cmd: | /app/llama-server -hf unsloth/Qwen3.5-0.8B-GGUF:Q4_K_XL --ctx-size 4096 ${qwen35_sampling} ${base_args} ${thinking_off} "Qwen3.5-2B-GGUF:Q4_K_M": ttl: 600 cmd: | /app/llama-server -hf unsloth/Qwen3.5-2B-GGUF:Q4_K_M ${qwen35_sampling} ${common_args} ${thinking_on} "Qwen3.5-2B-GGUF-nothink:Q4_K_M": ttl: 600 cmd: | /app/llama-server -hf unsloth/Qwen3.5-2B-GGUF:Q4_K_M ${qwen35_sampling} ${common_args} ${thinking_off} "Qwen3.5-4B-GGUF:Q4_K_M": ttl: 600 cmd: | /app/llama-server -hf unsloth/Qwen3.5-4B-GGUF:Q4_K_M ${qwen35_sampling} ${common_args} ${thinking_on} "Qwen3.5-4B-GGUF-nothink:Q4_K_M": ttl: 600 cmd: | /app/llama-server -hf unsloth/Qwen3.5-4B-GGUF:Q4_K_M ${qwen35_sampling} ${common_args} ${thinking_off} "Qwen3.5-4B-heretic-GGUF:Q4_K_M": ttl: 600 cmd: | /app/llama-server -hf mradermacher/Qwen3.5-4B-heretic-GGUF:Q4_K_M ${qwen35_4b_heretic_mmproj} ${qwen35_sampling} ${common_args} ${thinking_on} "Qwen3.5-4B-heretic-GGUF-nothink:Q4_K_M": ttl: 600 cmd: | /app/llama-server -hf mradermacher/Qwen3.5-4B-heretic-GGUF:Q4_K_M ${qwen35_4b_heretic_mmproj} ${qwen35_sampling} ${common_args} ${thinking_off} "Qwen3.5-9B-GGUF:Q4_K_M": ttl: 600 cmd: | /app/llama-server -hf unsloth/Qwen3.5-9B-GGUF:Q4_K_M ${qwen35_sampling} ${common_args} ${thinking_on} "Qwen3.5-9B-GGUF-nothink:Q4_K_M": ttl: 600 cmd: | /app/llama-server -hf unsloth/Qwen3.5-9B-GGUF:Q4_K_M ${qwen35_sampling} ${common_args} ${thinking_off} "Qwen3.5-9B-GGUF:Q3_K_M": ttl: 600 cmd: | /app/llama-server -hf unsloth/Qwen3.5-9B-GGUF:Q3_K_M ${qwen35_sampling} ${common_args} ${thinking_on} "Qwen3.5-9B-GGUF-nothink:Q3_K_M": ttl: 600 cmd: | /app/llama-server -hf unsloth/Qwen3.5-9B-GGUF:Q3_K_M ${qwen35_sampling} ${common_args} ${thinking_off}