14 Commits

6 changed files with 107 additions and 11 deletions

View File

@@ -18,7 +18,7 @@ spec:
chart: chart:
spec: spec:
chart: immich chart: immich
version: 0.7.0 version: 0.7.1
sourceRef: sourceRef:
kind: HelmRepository kind: HelmRepository
name: secustor name: secustor
@@ -58,6 +58,7 @@ spec:
annotations: annotations:
cert-manager.io/cluster-issuer: letsencrypt cert-manager.io/cluster-issuer: letsencrypt
nginx.org/client-max-body-size: "0" nginx.org/client-max-body-size: "0"
nginx.org/websocket-services: immich-server
hosts: hosts:
- host: immich.lumpiasty.xyz - host: immich.lumpiasty.xyz
paths: paths:

View File

@@ -9,5 +9,5 @@ spec:
- ReadWriteOnce - ReadWriteOnce
resources: resources:
requests: requests:
storage: 50Gi storage: 150Gi
storageClassName: mayastor-single-hdd storageClassName: mayastor-single-hdd

View File

@@ -2,11 +2,11 @@
apiVersion: source.toolkit.fluxcd.io/v1 apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository kind: HelmRepository
metadata: metadata:
name: bat-librechat name: dynomite567-charts
namespace: librechat namespace: librechat
spec: spec:
interval: 24h interval: 24h
url: https://charts.blue-atlas.de url: https://dynomite567.github.io/helm-charts/
--- ---
apiVersion: helm.toolkit.fluxcd.io/v2 apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease kind: HelmRelease
@@ -18,10 +18,10 @@ spec:
chart: chart:
spec: spec:
chart: librechat chart: librechat
version: 1.8.10 version: 1.8.9
sourceRef: sourceRef:
kind: HelmRepository kind: HelmRepository
name: bat-librechat name: dynomite567-charts
values: values:
global: global:
librechat: librechat:
@@ -61,13 +61,18 @@ spec:
modelDisplayLabel: "Ollama" modelDisplayLabel: "Ollama"
- name: "Llama.cpp" - name: "Llama.cpp"
apiKey: "llama" apiKey: "llama"
baseURL: "http://llama.llama.svc.cluster.local:11434/v1/chat/completions" baseURL: "http://llama.llama.svc.cluster.local:11434/v1"
models: models:
default: [ default: [
"DeepSeek-R1-0528-Qwen3-8B-GGUF", "DeepSeek-R1-0528-Qwen3-8B-GGUF",
"Qwen3-8B-GGUF", "Qwen3-8B-GGUF",
"Qwen3-8B-GGUF-no-thinking", "Qwen3-8B-GGUF-no-thinking",
"gemma3n" "gemma3n-e4b",
"gemma3-12b",
"gemma3-12b-q2",
"gemma3-12b-novision",
"gemma3-4b",
"gemma3-4b-novision"
] ]
titleConvo: true titleConvo: true
titleModel: "current_model" titleModel: "current_model"
@@ -85,6 +90,9 @@ spec:
className: nginx className: nginx
annotations: annotations:
cert-manager.io/cluster-issuer: letsencrypt cert-manager.io/cluster-issuer: letsencrypt
nginx.org/client-max-body-size: "0"
nginx.ingress.kubernetes.io/proxy-buffering: "false"
nginx.org/proxy-read-timeout: 30m
hosts: hosts:
- host: librechat.lumpiasty.xyz - host: librechat.lumpiasty.xyz
paths: paths:

View File

@@ -1,24 +1,33 @@
healthCheckTimeout: 600
models: models:
"DeepSeek-R1-0528-Qwen3-8B-GGUF": "DeepSeek-R1-0528-Qwen3-8B-GGUF":
ttl: 600
cmd: | cmd: |
/app/llama-server /app/llama-server
-hf unsloth/DeepSeek-R1-0528-Qwen3-8B-GGUF:Q4_K_M -hf unsloth/DeepSeek-R1-0528-Qwen3-8B-GGUF:Q4_K_M
-ngl 37 -c 16384 -ngl 37 -c 16384
--no-warmup
--port ${PORT} --port ${PORT}
"Qwen3-8B-GGUF": "Qwen3-8B-GGUF":
ttl: 600
cmd: | cmd: |
/app/llama-server /app/llama-server
-hf unsloth/Qwen3-8B-GGUF:Q4_K_M -hf unsloth/Qwen3-8B-GGUF:Q4_K_M
-ngl 37 -c 16384 -ngl 37 -c 16384
--no-warmup
--port ${PORT} --port ${PORT}
"Qwen3-8B-GGUF-no-thinking": "Qwen3-8B-GGUF-no-thinking":
ttl: 600
cmd: | cmd: |
/app/llama-server /app/llama-server
-hf unsloth/Qwen3-8B-GGUF:Q4_K_M -hf unsloth/Qwen3-8B-GGUF:Q4_K_M
-ngl 37 -c 16384 -ngl 37 -c 16384
--jinja --chat-template-file /config/qwen_nothink_chat_template.jinja --jinja --chat-template-file /config/qwen_nothink_chat_template.jinja
--no-warmup
--port ${PORT} --port ${PORT}
"gemma3n": "gemma3n-e3b":
ttl: 600
cmd: | cmd: |
/app/llama-server /app/llama-server
-hf unsloth/gemma-3n-E4B-it-GGUF:UD-Q4_K_XL -hf unsloth/gemma-3n-E4B-it-GGUF:UD-Q4_K_XL
@@ -31,4 +40,82 @@ models:
--min-p 0.00 --min-p 0.00
--top-k 64 --top-k 64
--top-p 0.95 --top-p 0.95
--no-warmup
--port ${PORT}
"gemma3-12b":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/gemma-3-12b-it-GGUF:Q4_K_M
--ctx-size 16384
--n-gpu-layers 99
--prio 2
--temp 1.0
--repeat-penalty 1.0
--min-p 0.00
--top-k 64
--top-p 0.95
--no-warmup
--port ${PORT}
"gemma3-12b-novision":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/gemma-3-12b-it-GGUF:Q4_K_M
--ctx-size 16384
--n-gpu-layers 99
--prio 2
--temp 1.0
--repeat-penalty 1.0
--min-p 0.00
--top-k 64
--top-p 0.95
--no-mmproj
--no-warmup
--port ${PORT}
"gemma3-12b-q2":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/gemma-3-12b-it-GGUF:Q2_K_L
--ctx-size 16384
--n-gpu-layers 99
--prio 2
--temp 1.0
--repeat-penalty 1.0
--min-p 0.00
--top-k 64
--top-p 0.95
--no-warmup
--port ${PORT}
"gemma3-4b":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/gemma-3-4b-it-GGUF:Q4_K_M
--ctx-size 16384
--n-gpu-layers 99
--prio 2
--temp 1.0
--repeat-penalty 1.0
--min-p 0.00
--top-k 64
--top-p 0.95
--no-warmup
--port ${PORT}
"gemma3-4b-novision":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/gemma-3-4b-it-GGUF:Q4_K_M
--ctx-size 16384
--n-gpu-layers 99
--prio 2
--temp 1.0
--repeat-penalty 1.0
--min-p 0.00
--top-k 64
--top-p 0.95
--no-mmproj
--no-warmup
--port ${PORT} --port ${PORT}

View File

@@ -18,7 +18,7 @@ spec:
chart: chart:
spec: spec:
chart: ollama chart: ollama
version: 1.24.0 version: 1.25.0
sourceRef: sourceRef:
kind: HelmRepository kind: HelmRepository
name: ollama-helm name: ollama-helm

View File

@@ -15,7 +15,7 @@ spec:
- name: renovate - name: renovate
# Update this to the latest available and then enable Renovate on # Update this to the latest available and then enable Renovate on
# the manifest # the manifest
image: renovate/renovate:41.43.5-full image: renovate/renovate:41.51.0-full
envFrom: envFrom:
- secretRef: - secretRef:
name: renovate-gitea-token name: renovate-gitea-token