add gemma 4 models
All checks were successful
ci/woodpecker/push/flux-reconcile-source Pipeline was successful

This commit is contained in:
2026-04-04 02:48:02 +02:00
parent ad3b2229c2
commit 8160a52176

View File

@@ -5,15 +5,15 @@ logToStdout: "both" # proxy and upstream
macros: macros:
base_args: "--no-warmup --port ${PORT}" base_args: "--no-warmup --port ${PORT}"
common_args: "--fit-target 1536 --no-warmup --port ${PORT}" common_args: "--fit-target 1536 --no-warmup --port ${PORT}"
gemma3_ctx_128k: "--ctx-size 131072" ctx_128k: "--ctx-size 131072"
qwen35_ctx_128k: "--ctx-size 131072" ctx_256k: "--ctx-size 262144"
qwen35_ctx_256k: "--ctx-size 262144"
gemma_sampling: "--prio 2 --temp 1.0 --repeat-penalty 1.0 --min-p 0.00 --top-k 64 --top-p 0.95" gemma_sampling: "--prio 2 --temp 1.0 --repeat-penalty 1.0 --min-p 0.00 --top-k 64 --top-p 0.95"
qwen35_sampling: "--temp 0.6 --top-p 0.95 --top-k 20 --min-p 0.00 -ctk q8_0 -ctv q8_0" qwen35_sampling: "--temp 0.6 --top-p 0.95 --top-k 20 --min-p 0.00 -ctk q8_0 -ctv q8_0"
qwen35_35b_args: "--temp 1.0 --min-p 0.00 --top-p 0.95 --top-k 20 -ctk q8_0 -ctv q8_0" qwen35_35b_args: "--temp 1.0 --min-p 0.00 --top-p 0.95 --top-k 20 -ctk q8_0 -ctv q8_0"
qwen35_35b_heretic_mmproj: "--mmproj-url https://huggingface.co/unsloth/Qwen3.5-35B-A3B-GGUF/resolve/main/mmproj-F16.gguf --mmproj /root/.cache/llama.cpp/unsloth_Qwen3.5-35B-A3B-GGUF_mmproj-F16.gguf" qwen35_35b_heretic_mmproj: "--mmproj-url https://huggingface.co/unsloth/Qwen3.5-35B-A3B-GGUF/resolve/main/mmproj-F16.gguf --mmproj /root/.cache/llama.cpp/unsloth_Qwen3.5-35B-A3B-GGUF_mmproj-F16.gguf"
qwen35_4b_heretic_mmproj: "--mmproj-url https://huggingface.co/unsloth/Qwen3.5-4B-GGUF/resolve/main/mmproj-F16.gguf --mmproj /root/.cache/llama.cpp/unsloth_Qwen3.5-4B-GGUF_mmproj-F16.gguf" qwen35_4b_heretic_mmproj: "--mmproj-url https://huggingface.co/unsloth/Qwen3.5-4B-GGUF/resolve/main/mmproj-F16.gguf --mmproj /root/.cache/llama.cpp/unsloth_Qwen3.5-4B-GGUF_mmproj-F16.gguf"
glm47_flash_args: "--temp 0.7 --top-p 1.0 --min-p 0.01 --repeat-penalty 1.0" glm47_flash_args: "--temp 0.7 --top-p 1.0 --min-p 0.01 --repeat-penalty 1.0"
gemma4_sampling: "--temp 1.0 --top-p 0.95 --top-k 64"
thinking_on: "--chat-template-kwargs '{\"enable_thinking\": true}'" thinking_on: "--chat-template-kwargs '{\"enable_thinking\": true}'"
thinking_off: "--chat-template-kwargs '{\"enable_thinking\": false}'" thinking_off: "--chat-template-kwargs '{\"enable_thinking\": false}'"
@@ -35,7 +35,7 @@ models:
cmd: | cmd: |
/app/llama-server /app/llama-server
-hf unsloth/gemma-3-12b-it-GGUF:Q4_K_M -hf unsloth/gemma-3-12b-it-GGUF:Q4_K_M
${gemma3_ctx_128k} ${ctx_128k}
${gemma_sampling} ${gemma_sampling}
${common_args} ${common_args}
@@ -43,7 +43,7 @@ models:
cmd: | cmd: |
/app/llama-server /app/llama-server
-hf unsloth/gemma-3-12b-it-GGUF:Q4_K_M -hf unsloth/gemma-3-12b-it-GGUF:Q4_K_M
${gemma3_ctx_128k} ${ctx_128k}
${gemma_sampling} ${gemma_sampling}
--no-mmproj --no-mmproj
${common_args} ${common_args}
@@ -52,7 +52,7 @@ models:
cmd: | cmd: |
/app/llama-server /app/llama-server
-hf unsloth/gemma-3-4b-it-GGUF:Q4_K_M -hf unsloth/gemma-3-4b-it-GGUF:Q4_K_M
${gemma3_ctx_128k} ${ctx_128k}
${gemma_sampling} ${gemma_sampling}
${common_args} ${common_args}
@@ -60,7 +60,7 @@ models:
cmd: | cmd: |
/app/llama-server /app/llama-server
-hf unsloth/gemma-3-4b-it-GGUF:Q4_K_M -hf unsloth/gemma-3-4b-it-GGUF:Q4_K_M
${gemma3_ctx_128k} ${ctx_128k}
${gemma_sampling} ${gemma_sampling}
--no-mmproj --no-mmproj
${common_args} ${common_args}
@@ -83,7 +83,7 @@ models:
cmd: | cmd: |
/app/llama-server /app/llama-server
-hf unsloth/Qwen3.5-35B-A3B-GGUF:Q4_K_M -hf unsloth/Qwen3.5-35B-A3B-GGUF:Q4_K_M
${qwen35_ctx_256k} ${ctx_256k}
${qwen35_35b_args} ${qwen35_35b_args}
${common_args} ${common_args}
@@ -91,7 +91,7 @@ models:
cmd: | cmd: |
/app/llama-server /app/llama-server
-hf unsloth/Qwen3.5-35B-A3B-GGUF:Q4_K_M -hf unsloth/Qwen3.5-35B-A3B-GGUF:Q4_K_M
${qwen35_ctx_256k} ${ctx_256k}
${qwen35_35b_args} ${qwen35_35b_args}
${common_args} ${common_args}
${thinking_off} ${thinking_off}
@@ -103,7 +103,7 @@ models:
/app/llama-server /app/llama-server
-hf mradermacher/Qwen3.5-35B-A3B-heretic-GGUF:Q4_K_M -hf mradermacher/Qwen3.5-35B-A3B-heretic-GGUF:Q4_K_M
${qwen35_35b_heretic_mmproj} ${qwen35_35b_heretic_mmproj}
${qwen35_ctx_256k} ${ctx_256k}
${qwen35_35b_args} ${qwen35_35b_args}
${common_args} ${common_args}
@@ -112,7 +112,7 @@ models:
/app/llama-server /app/llama-server
-hf mradermacher/Qwen3.5-35B-A3B-heretic-GGUF:Q4_K_M -hf mradermacher/Qwen3.5-35B-A3B-heretic-GGUF:Q4_K_M
${qwen35_35b_heretic_mmproj} ${qwen35_35b_heretic_mmproj}
${qwen35_ctx_256k} ${ctx_256k}
${qwen35_35b_args} ${qwen35_35b_args}
${common_args} ${common_args}
${thinking_off} ${thinking_off}
@@ -121,7 +121,7 @@ models:
cmd: | cmd: |
/app/llama-server /app/llama-server
-hf unsloth/Qwen3.5-0.8B-GGUF:Q4_K_XL -hf unsloth/Qwen3.5-0.8B-GGUF:Q4_K_XL
${qwen35_ctx_256k} ${ctx_256k}
${qwen35_sampling} ${qwen35_sampling}
${base_args} ${base_args}
${thinking_on} ${thinking_on}
@@ -139,7 +139,7 @@ models:
cmd: | cmd: |
/app/llama-server /app/llama-server
-hf unsloth/Qwen3.5-2B-GGUF:Q4_K_M -hf unsloth/Qwen3.5-2B-GGUF:Q4_K_M
${qwen35_ctx_256k} ${ctx_256k}
${qwen35_sampling} ${qwen35_sampling}
${common_args} ${common_args}
${thinking_on} ${thinking_on}
@@ -148,7 +148,7 @@ models:
cmd: | cmd: |
/app/llama-server /app/llama-server
-hf unsloth/Qwen3.5-2B-GGUF:Q4_K_M -hf unsloth/Qwen3.5-2B-GGUF:Q4_K_M
${qwen35_ctx_256k} ${ctx_256k}
${qwen35_sampling} ${qwen35_sampling}
${common_args} ${common_args}
${thinking_off} ${thinking_off}
@@ -157,7 +157,7 @@ models:
cmd: | cmd: |
/app/llama-server /app/llama-server
-hf unsloth/Qwen3.5-4B-GGUF:Q4_K_M -hf unsloth/Qwen3.5-4B-GGUF:Q4_K_M
${qwen35_ctx_128k} ${ctx_128k}
${qwen35_sampling} ${qwen35_sampling}
${common_args} ${common_args}
${thinking_on} ${thinking_on}
@@ -166,7 +166,7 @@ models:
cmd: | cmd: |
/app/llama-server /app/llama-server
-hf unsloth/Qwen3.5-4B-GGUF:Q4_K_M -hf unsloth/Qwen3.5-4B-GGUF:Q4_K_M
${qwen35_ctx_128k} ${ctx_128k}
${qwen35_sampling} ${qwen35_sampling}
${common_args} ${common_args}
${thinking_off} ${thinking_off}
@@ -176,7 +176,7 @@ models:
/app/llama-server /app/llama-server
-hf mradermacher/Qwen3.5-4B-heretic-GGUF:Q4_K_M -hf mradermacher/Qwen3.5-4B-heretic-GGUF:Q4_K_M
${qwen35_4b_heretic_mmproj} ${qwen35_4b_heretic_mmproj}
${qwen35_ctx_128k} ${ctx_128k}
${qwen35_sampling} ${qwen35_sampling}
${common_args} ${common_args}
${thinking_on} ${thinking_on}
@@ -186,7 +186,7 @@ models:
/app/llama-server /app/llama-server
-hf mradermacher/Qwen3.5-4B-heretic-GGUF:Q4_K_M -hf mradermacher/Qwen3.5-4B-heretic-GGUF:Q4_K_M
${qwen35_4b_heretic_mmproj} ${qwen35_4b_heretic_mmproj}
${qwen35_ctx_128k} ${ctx_128k}
${qwen35_sampling} ${qwen35_sampling}
${common_args} ${common_args}
${thinking_off} ${thinking_off}
@@ -195,7 +195,7 @@ models:
cmd: | cmd: |
/app/llama-server /app/llama-server
-hf unsloth/Qwen3.5-9B-GGUF:Q4_K_M -hf unsloth/Qwen3.5-9B-GGUF:Q4_K_M
${qwen35_ctx_256k} ${ctx_256k}
${qwen35_sampling} ${qwen35_sampling}
${common_args} ${common_args}
${thinking_on} ${thinking_on}
@@ -204,7 +204,7 @@ models:
cmd: | cmd: |
/app/llama-server /app/llama-server
-hf unsloth/Qwen3.5-9B-GGUF:Q4_K_M -hf unsloth/Qwen3.5-9B-GGUF:Q4_K_M
${qwen35_ctx_256k} ${ctx_256k}
${qwen35_sampling} ${qwen35_sampling}
${common_args} ${common_args}
${thinking_off} ${thinking_off}
@@ -213,7 +213,7 @@ models:
cmd: | cmd: |
/app/llama-server /app/llama-server
-hf unsloth/Qwen3.5-9B-GGUF:Q3_K_M -hf unsloth/Qwen3.5-9B-GGUF:Q3_K_M
${qwen35_ctx_256k} ${ctx_256k}
${qwen35_sampling} ${qwen35_sampling}
${common_args} ${common_args}
${thinking_on} ${thinking_on}
@@ -222,7 +222,7 @@ models:
cmd: | cmd: |
/app/llama-server /app/llama-server
-hf unsloth/Qwen3.5-9B-GGUF:Q3_K_M -hf unsloth/Qwen3.5-9B-GGUF:Q3_K_M
${qwen35_ctx_256k} ${ctx_256k}
${qwen35_sampling} ${qwen35_sampling}
${common_args} ${common_args}
${thinking_off} ${thinking_off}
@@ -231,7 +231,7 @@ models:
cmd: | cmd: |
/app/llama-server /app/llama-server
-hf unsloth/Qwen3.5-27B-GGUF:Q3_K_M -hf unsloth/Qwen3.5-27B-GGUF:Q3_K_M
${qwen35_ctx_256k} ${ctx_256k}
${qwen35_sampling} ${qwen35_sampling}
${common_args} ${common_args}
${thinking_on} ${thinking_on}
@@ -240,7 +240,7 @@ models:
cmd: | cmd: |
/app/llama-server /app/llama-server
-hf unsloth/Qwen3.5-27B-GGUF:Q3_K_M -hf unsloth/Qwen3.5-27B-GGUF:Q3_K_M
${qwen35_ctx_256k} ${ctx_256k}
${qwen35_sampling} ${qwen35_sampling}
${common_args} ${common_args}
${thinking_off} ${thinking_off}
@@ -251,3 +251,35 @@ models:
-hf unsloth/GLM-4.7-Flash-GGUF:Q4_K_M -hf unsloth/GLM-4.7-Flash-GGUF:Q4_K_M
${glm47_flash_args} ${glm47_flash_args}
${common_args} ${common_args}
"gemma-4-26B-A4B-it:UD-Q4_K_XL":
cmd: |
/app/llama-server
-hf unsloth/gemma-4-26B-A4B-it-GGUF:UD-Q4_K_XL \
${ctx_256k}
${gemma4_sampling}
${common_args}
"gemma-4-26B-A4B-it:UD-Q2_K_XL":
cmd: |
/app/llama-server
-hf unsloth/gemma-4-26B-A4B-it-GGUF:UD-Q2_K_XL \
${ctx_256k}
${gemma4_sampling}
${common_args}
"unsloth/gemma-4-E4B-it-GGUF:UD-Q4_K_XL":
cmd: |
/app/llama-server
-hf unsloth/gemma-4-E4B-it-GGUF:UD-Q4_K_XL \
${ctx_128k}
${gemma4_sampling}
${common_args}
"unsloth/gemma-4-E2B-it-GGUF:UD-Q4_K_XL":
cmd: |
/app/llama-server
-hf unsloth/gemma-4-E2B-it-GGUF:UD-Q4_K_XL \
${ctx_128k}
${gemma4_sampling}
${common_args}