disable librechat release, it's using bitnami's mongodb

This commit is contained in:
2026-01-17 22:55:28 +01:00
parent d2cfd7b73d
commit b6efe42dc2

View File

@@ -8,113 +8,113 @@ spec:
interval: 24h interval: 24h
url: https://dynomite567.github.io/helm-charts/ url: https://dynomite567.github.io/helm-charts/
--- ---
apiVersion: helm.toolkit.fluxcd.io/v2 # apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease # kind: HelmRelease
metadata: # metadata:
name: librechat # name: librechat
namespace: librechat # namespace: librechat
spec: # spec:
interval: 30m # interval: 30m
chart: # chart:
spec: # spec:
chart: librechat # chart: librechat
version: 1.9.1 # version: 1.9.1
sourceRef: # sourceRef:
kind: HelmRepository # kind: HelmRepository
name: dynomite567-charts # name: dynomite567-charts
values: # values:
global: # global:
librechat: # librechat:
existingSecretName: librechat # existingSecretName: librechat
librechat: # librechat:
configEnv: # configEnv:
PLUGIN_MODELS: null # PLUGIN_MODELS: null
ALLOW_REGISTRATION: "false" # ALLOW_REGISTRATION: "false"
TRUST_PROXY: "1" # TRUST_PROXY: "1"
DOMAIN_CLIENT: https://librechat.lumpiasty.xyz # DOMAIN_CLIENT: https://librechat.lumpiasty.xyz
SEARCH: "true" # SEARCH: "true"
existingSecretName: librechat # existingSecretName: librechat
configYamlContent: | # configYamlContent: |
version: 1.0.3 # version: 1.0.3
endpoints: # endpoints:
custom: # custom:
- name: "Llama.cpp" # - name: "Llama.cpp"
apiKey: "llama" # apiKey: "llama"
baseURL: "http://llama.llama.svc.cluster.local:11434/v1" # baseURL: "http://llama.llama.svc.cluster.local:11434/v1"
models: # models:
default: [ # default: [
"DeepSeek-R1-0528-Qwen3-8B-GGUF", # "DeepSeek-R1-0528-Qwen3-8B-GGUF",
"Qwen3-8B-GGUF", # "Qwen3-8B-GGUF",
"Qwen3-8B-GGUF-no-thinking", # "Qwen3-8B-GGUF-no-thinking",
"gemma3n-e4b", # "gemma3n-e4b",
"gemma3-12b", # "gemma3-12b",
"gemma3-12b-q2", # "gemma3-12b-q2",
"gemma3-12b-novision", # "gemma3-12b-novision",
"gemma3-4b", # "gemma3-4b",
"gemma3-4b-novision", # "gemma3-4b-novision",
"Qwen3-4B-Thinking-2507", # "Qwen3-4B-Thinking-2507",
"Qwen3-4B-Thinking-2507-long-ctx", # "Qwen3-4B-Thinking-2507-long-ctx",
"Qwen2.5-VL-7B-Instruct-GGUF", # "Qwen2.5-VL-7B-Instruct-GGUF",
"Qwen2.5-VL-32B-Instruct-GGUF-IQ1_S", # "Qwen2.5-VL-32B-Instruct-GGUF-IQ1_S",
"Qwen2.5-VL-32B-Instruct-GGUF-Q2_K_L", # "Qwen2.5-VL-32B-Instruct-GGUF-Q2_K_L",
"Qwen3-VL-2B-Instruct-GGUF", # "Qwen3-VL-2B-Instruct-GGUF",
"Qwen3-VL-2B-Instruct-GGUF-unslothish", # "Qwen3-VL-2B-Instruct-GGUF-unslothish",
"Qwen3-VL-2B-Thinking-GGUF", # "Qwen3-VL-2B-Thinking-GGUF",
"Qwen3-VL-4B-Instruct-GGUF", # "Qwen3-VL-4B-Instruct-GGUF",
"Qwen3-VL-4B-Instruct-GGUF-unslothish", # "Qwen3-VL-4B-Instruct-GGUF-unslothish",
"Qwen3-VL-4B-Thinking-GGUF", # "Qwen3-VL-4B-Thinking-GGUF",
"Qwen3-VL-8B-Instruct-GGUF", # "Qwen3-VL-8B-Instruct-GGUF",
"Qwen3-VL-8B-Instruct-GGUF-unslothish", # "Qwen3-VL-8B-Instruct-GGUF-unslothish",
"Qwen3-VL-8B-Thinking-GGUF", # "Qwen3-VL-8B-Thinking-GGUF",
"Huihui-Qwen3-VL-8B-Instruct-abliterated-GGUF", # "Huihui-Qwen3-VL-8B-Instruct-abliterated-GGUF",
"Huihui-Qwen3-VL-8B-Thinking-abliterated-GGUF" # "Huihui-Qwen3-VL-8B-Thinking-abliterated-GGUF"
] # ]
titleConvo: true # titleConvo: true
titleModel: "gemma3-4b-novision" # titleModel: "gemma3-4b-novision"
summarize: false # summarize: false
summaryModel: "gemma3-4b-novision" # summaryModel: "gemma3-4b-novision"
forcePrompt: false # forcePrompt: false
modelDisplayLabel: "Llama.cpp" # modelDisplayLabel: "Llama.cpp"
# ✨ IMPORTANT: let llama-swap/llama-server own all these # # ✨ IMPORTANT: let llama-swap/llama-server own all these
dropParams: # dropParams:
- "temperature" # - "temperature"
- "top_p" # - "top_p"
- "top_k" # - "top_k"
- "presence_penalty" # - "presence_penalty"
- "frequency_penalty" # - "frequency_penalty"
- "stop" # - "stop"
- "max_tokens" # - "max_tokens"
imageVolume: # imageVolume:
enabled: true # enabled: true
size: 10G # size: 10G
accessModes: ReadWriteOnce # accessModes: ReadWriteOnce
storageClassName: mayastor-single-hdd # storageClassName: mayastor-single-hdd
ingress: # ingress:
enabled: true # enabled: true
className: nginx-ingress # className: nginx-ingress
annotations: # annotations:
cert-manager.io/cluster-issuer: letsencrypt # cert-manager.io/cluster-issuer: letsencrypt
nginx.ingress.kubernetes.io/proxy-body-size: "0" # nginx.ingress.kubernetes.io/proxy-body-size: "0"
nginx.ingress.kubernetes.io/proxy-buffering: "false" # nginx.ingress.kubernetes.io/proxy-buffering: "false"
nginx.ingress.kubernetes.io/proxy-read-timeout: 30m # nginx.ingress.kubernetes.io/proxy-read-timeout: 30m
hosts: # hosts:
- host: librechat.lumpiasty.xyz # - host: librechat.lumpiasty.xyz
paths: # paths:
- path: / # - path: /
pathType: ImplementationSpecific # pathType: ImplementationSpecific
tls: # tls:
- hosts: # - hosts:
- librechat.lumpiasty.xyz # - librechat.lumpiasty.xyz
secretName: librechat-ingress # secretName: librechat-ingress
mongodb: # mongodb:
persistence: # persistence:
storageClass: mayastor-single-hdd # storageClass: mayastor-single-hdd
meilisearch: # meilisearch:
persistence: # persistence:
storageClass: mayastor-single-hdd # storageClass: mayastor-single-hdd
auth: # auth:
existingMasterKeySecret: librechat # existingMasterKeySecret: librechat