From 8160a52176f2abb8948e97459f44225e26ccb053 Mon Sep 17 00:00:00 2001 From: Lumpiasty Date: Sat, 4 Apr 2026 02:48:02 +0200 Subject: [PATCH] add gemma 4 models --- apps/llama/configs/config.yaml | 80 ++++++++++++++++++++++++---------- 1 file changed, 56 insertions(+), 24 deletions(-) diff --git a/apps/llama/configs/config.yaml b/apps/llama/configs/config.yaml index 59fb4a6..3661837 100644 --- a/apps/llama/configs/config.yaml +++ b/apps/llama/configs/config.yaml @@ -5,15 +5,15 @@ logToStdout: "both" # proxy and upstream macros: base_args: "--no-warmup --port ${PORT}" common_args: "--fit-target 1536 --no-warmup --port ${PORT}" - gemma3_ctx_128k: "--ctx-size 131072" - qwen35_ctx_128k: "--ctx-size 131072" - qwen35_ctx_256k: "--ctx-size 262144" + ctx_128k: "--ctx-size 131072" + ctx_256k: "--ctx-size 262144" gemma_sampling: "--prio 2 --temp 1.0 --repeat-penalty 1.0 --min-p 0.00 --top-k 64 --top-p 0.95" qwen35_sampling: "--temp 0.6 --top-p 0.95 --top-k 20 --min-p 0.00 -ctk q8_0 -ctv q8_0" qwen35_35b_args: "--temp 1.0 --min-p 0.00 --top-p 0.95 --top-k 20 -ctk q8_0 -ctv q8_0" qwen35_35b_heretic_mmproj: "--mmproj-url https://huggingface.co/unsloth/Qwen3.5-35B-A3B-GGUF/resolve/main/mmproj-F16.gguf --mmproj /root/.cache/llama.cpp/unsloth_Qwen3.5-35B-A3B-GGUF_mmproj-F16.gguf" qwen35_4b_heretic_mmproj: "--mmproj-url https://huggingface.co/unsloth/Qwen3.5-4B-GGUF/resolve/main/mmproj-F16.gguf --mmproj /root/.cache/llama.cpp/unsloth_Qwen3.5-4B-GGUF_mmproj-F16.gguf" glm47_flash_args: "--temp 0.7 --top-p 1.0 --min-p 0.01 --repeat-penalty 1.0" + gemma4_sampling: "--temp 1.0 --top-p 0.95 --top-k 64" thinking_on: "--chat-template-kwargs '{\"enable_thinking\": true}'" thinking_off: "--chat-template-kwargs '{\"enable_thinking\": false}'" @@ -35,7 +35,7 @@ models: cmd: | /app/llama-server -hf unsloth/gemma-3-12b-it-GGUF:Q4_K_M - ${gemma3_ctx_128k} + ${ctx_128k} ${gemma_sampling} ${common_args} @@ -43,7 +43,7 @@ models: cmd: | /app/llama-server -hf unsloth/gemma-3-12b-it-GGUF:Q4_K_M - ${gemma3_ctx_128k} + ${ctx_128k} ${gemma_sampling} --no-mmproj ${common_args} @@ -52,7 +52,7 @@ models: cmd: | /app/llama-server -hf unsloth/gemma-3-4b-it-GGUF:Q4_K_M - ${gemma3_ctx_128k} + ${ctx_128k} ${gemma_sampling} ${common_args} @@ -60,7 +60,7 @@ models: cmd: | /app/llama-server -hf unsloth/gemma-3-4b-it-GGUF:Q4_K_M - ${gemma3_ctx_128k} + ${ctx_128k} ${gemma_sampling} --no-mmproj ${common_args} @@ -83,7 +83,7 @@ models: cmd: | /app/llama-server -hf unsloth/Qwen3.5-35B-A3B-GGUF:Q4_K_M - ${qwen35_ctx_256k} + ${ctx_256k} ${qwen35_35b_args} ${common_args} @@ -91,7 +91,7 @@ models: cmd: | /app/llama-server -hf unsloth/Qwen3.5-35B-A3B-GGUF:Q4_K_M - ${qwen35_ctx_256k} + ${ctx_256k} ${qwen35_35b_args} ${common_args} ${thinking_off} @@ -103,7 +103,7 @@ models: /app/llama-server -hf mradermacher/Qwen3.5-35B-A3B-heretic-GGUF:Q4_K_M ${qwen35_35b_heretic_mmproj} - ${qwen35_ctx_256k} + ${ctx_256k} ${qwen35_35b_args} ${common_args} @@ -112,7 +112,7 @@ models: /app/llama-server -hf mradermacher/Qwen3.5-35B-A3B-heretic-GGUF:Q4_K_M ${qwen35_35b_heretic_mmproj} - ${qwen35_ctx_256k} + ${ctx_256k} ${qwen35_35b_args} ${common_args} ${thinking_off} @@ -121,7 +121,7 @@ models: cmd: | /app/llama-server -hf unsloth/Qwen3.5-0.8B-GGUF:Q4_K_XL - ${qwen35_ctx_256k} + ${ctx_256k} ${qwen35_sampling} ${base_args} ${thinking_on} @@ -139,7 +139,7 @@ models: cmd: | /app/llama-server -hf unsloth/Qwen3.5-2B-GGUF:Q4_K_M - ${qwen35_ctx_256k} + ${ctx_256k} ${qwen35_sampling} ${common_args} ${thinking_on} @@ -148,7 +148,7 @@ models: cmd: | /app/llama-server -hf unsloth/Qwen3.5-2B-GGUF:Q4_K_M - ${qwen35_ctx_256k} + ${ctx_256k} ${qwen35_sampling} ${common_args} ${thinking_off} @@ -157,7 +157,7 @@ models: cmd: | /app/llama-server -hf unsloth/Qwen3.5-4B-GGUF:Q4_K_M - ${qwen35_ctx_128k} + ${ctx_128k} ${qwen35_sampling} ${common_args} ${thinking_on} @@ -166,7 +166,7 @@ models: cmd: | /app/llama-server -hf unsloth/Qwen3.5-4B-GGUF:Q4_K_M - ${qwen35_ctx_128k} + ${ctx_128k} ${qwen35_sampling} ${common_args} ${thinking_off} @@ -176,7 +176,7 @@ models: /app/llama-server -hf mradermacher/Qwen3.5-4B-heretic-GGUF:Q4_K_M ${qwen35_4b_heretic_mmproj} - ${qwen35_ctx_128k} + ${ctx_128k} ${qwen35_sampling} ${common_args} ${thinking_on} @@ -186,7 +186,7 @@ models: /app/llama-server -hf mradermacher/Qwen3.5-4B-heretic-GGUF:Q4_K_M ${qwen35_4b_heretic_mmproj} - ${qwen35_ctx_128k} + ${ctx_128k} ${qwen35_sampling} ${common_args} ${thinking_off} @@ -195,7 +195,7 @@ models: cmd: | /app/llama-server -hf unsloth/Qwen3.5-9B-GGUF:Q4_K_M - ${qwen35_ctx_256k} + ${ctx_256k} ${qwen35_sampling} ${common_args} ${thinking_on} @@ -204,7 +204,7 @@ models: cmd: | /app/llama-server -hf unsloth/Qwen3.5-9B-GGUF:Q4_K_M - ${qwen35_ctx_256k} + ${ctx_256k} ${qwen35_sampling} ${common_args} ${thinking_off} @@ -213,7 +213,7 @@ models: cmd: | /app/llama-server -hf unsloth/Qwen3.5-9B-GGUF:Q3_K_M - ${qwen35_ctx_256k} + ${ctx_256k} ${qwen35_sampling} ${common_args} ${thinking_on} @@ -222,7 +222,7 @@ models: cmd: | /app/llama-server -hf unsloth/Qwen3.5-9B-GGUF:Q3_K_M - ${qwen35_ctx_256k} + ${ctx_256k} ${qwen35_sampling} ${common_args} ${thinking_off} @@ -231,7 +231,7 @@ models: cmd: | /app/llama-server -hf unsloth/Qwen3.5-27B-GGUF:Q3_K_M - ${qwen35_ctx_256k} + ${ctx_256k} ${qwen35_sampling} ${common_args} ${thinking_on} @@ -240,7 +240,7 @@ models: cmd: | /app/llama-server -hf unsloth/Qwen3.5-27B-GGUF:Q3_K_M - ${qwen35_ctx_256k} + ${ctx_256k} ${qwen35_sampling} ${common_args} ${thinking_off} @@ -251,3 +251,35 @@ models: -hf unsloth/GLM-4.7-Flash-GGUF:Q4_K_M ${glm47_flash_args} ${common_args} + + "gemma-4-26B-A4B-it:UD-Q4_K_XL": + cmd: | + /app/llama-server + -hf unsloth/gemma-4-26B-A4B-it-GGUF:UD-Q4_K_XL \ + ${ctx_256k} + ${gemma4_sampling} + ${common_args} + + "gemma-4-26B-A4B-it:UD-Q2_K_XL": + cmd: | + /app/llama-server + -hf unsloth/gemma-4-26B-A4B-it-GGUF:UD-Q2_K_XL \ + ${ctx_256k} + ${gemma4_sampling} + ${common_args} + + "unsloth/gemma-4-E4B-it-GGUF:UD-Q4_K_XL": + cmd: | + /app/llama-server + -hf unsloth/gemma-4-E4B-it-GGUF:UD-Q4_K_XL \ + ${ctx_128k} + ${gemma4_sampling} + ${common_args} + + "unsloth/gemma-4-E2B-it-GGUF:UD-Q4_K_XL": + cmd: | + /app/llama-server + -hf unsloth/gemma-4-E2B-it-GGUF:UD-Q4_K_XL \ + ${ctx_128k} + ${gemma4_sampling} + ${common_args}