1 Commits

Author SHA1 Message Date
f0c69e0d86 Update renovate/renovate Docker tag to v41.43.3 2025-07-26 00:03:26 +00:00
50 changed files with 283 additions and 652 deletions

View File

@@ -107,8 +107,8 @@ spec:
# Uparty false positive
mask: 0.739,0.725,0.856,0.76,0.862,0.659,0.746,0.614
# ffmpeg:
# hwaccel_args: preset-vaapi
ffmpeg:
hwaccel_args: preset-vaapi
detectors:
ov_0:
@@ -139,7 +139,7 @@ spec:
skipuninstall: true
config:
enabled: true
size: 5Gi
size: 1Gi
storageClass: mayastor-single-hdd
skipuninstall: true
envFromSecrets:
@@ -147,9 +147,9 @@ spec:
ingress:
enabled: true
ingressClassName: nginx-ingress
annotations:
cert-manager.io/cluster-issuer: letsencrypt
nginx.org/websocket-services: frigate
hosts:
- host: frigate.lumpiasty.xyz
paths:
@@ -164,16 +164,16 @@ spec:
kubernetes.io/hostname: anapistula-delrosalae
# GPU access
# extraVolumes:
# - name: dri
# hostPath:
# path: /dev/dri/renderD128
# type: CharDevice
extraVolumes:
- name: dri
hostPath:
path: /dev/dri/renderD128
type: CharDevice
# extraVolumeMounts:
# - name: dri
# mountPath: /dev/dri/renderD128
extraVolumeMounts:
- name: dri
mountPath: /dev/dri/renderD128
# securityContext:
# # Not ideal
# privileged: true
securityContext:
# Not ideal
privileged: true

View File

@@ -17,7 +17,7 @@ spec:
chart:
spec:
chart: gitea
version: 12.4.0
version: 12.1.2
sourceRef:
kind: HelmRepository
name: gitea-charts
@@ -79,8 +79,8 @@ spec:
ssh:
annotations:
lbipam.cilium.io/sharing-key: gitea
lbipam.cilium.io/sharing-cross-namespace: nginx-ingress
lbipam.cilium.io/ips: 10.44.0.6,2001:470:61a3:400::6
lbipam.cilium.io/sharing-cross-namespace: nginx-ingress-controller
lbipam.cilium.io/ips: 10.44.0.0,2001:470:61a3:400::1
type: LoadBalancer
port: 22
# Requirement for sharing ip with other service
@@ -89,7 +89,7 @@ spec:
ingress:
enabled: true
className: nginx-ingress
className: nginx
annotations:
cert-manager.io/cluster-issuer: letsencrypt
acme.cert-manager.io/http01-edit-in-place: "true"

View File

@@ -4,6 +4,5 @@ resources:
- namespace.yaml
- volume.yaml
- redis.yaml
- postgres-password.yaml
- postgres-cluster.yaml
- release.yaml
# - release.yaml

View File

@@ -2,7 +2,7 @@
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: immich-db
name: immich-postgres
namespace: immich
spec:
imageName: ghcr.io/tensorchord/cloudnative-vectorchord:14-0.4.3
@@ -12,21 +12,3 @@ spec:
storage:
size: 10Gi
storageClass: mayastor-single-hdd
bootstrap:
initdb:
# Defaults of immich chart
database: immich
owner: immich
# We need to create custom role because default one does not allow to set up
# vectorchord extension
managed:
roles:
- name: immich
createdb: true
login: true
superuser: true
# We need to manually create secret
# https://github.com/cloudnative-pg/cloudnative-pg/issues/3788
passwordSecret:
name: immich-db-immich

View File

@@ -19,7 +19,7 @@ spec:
chart:
spec:
chart: redis
version: 23.1.3
version: 21.2.13
sourceRef:
kind: HelmRepository
name: bitnami

View File

@@ -2,11 +2,12 @@
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository
metadata:
name: secustor
name: immich
namespace: immich
spec:
interval: 24h
url: https://secustor.dev/helm-charts
type: "oci"
url: oci://ghcr.io/immich-app/immich-charts
---
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
@@ -18,52 +19,21 @@ spec:
chart:
spec:
chart: immich
version: 1.0.2
version: 0.9.3
sourceRef:
kind: HelmRepository
name: secustor
name: immich
values:
common:
config:
vecotrExtension: vectorchord
postgres:
host: immich-db-rw
existingSecret:
enabled: true
secretName: immich-db-immich
usernameKey: username
passwordKey: password
redis:
host: redis-master
existingSecret:
enabled: true
secretName: redis
passwordKey: redis-password
immich:
persistence:
library:
existingClaim: library
server:
volumeMounts:
- mountPath: /usr/src/app/upload
name: uploads
volumes:
- name: uploads
persistentVolumeClaim:
claimName: library
machineLearning:
redis:
enabled: true
ingress:
enabled: true
className: nginx-ingress
annotations:
cert-manager.io/cluster-issuer: letsencrypt
nginx.ingress.kubernetes.io/proxy-body-size: "0"
hosts:
- host: immich.lumpiasty.xyz
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- immich.lumpiasty.xyz
secretName: immich-ingress
postgresql:
global:
postgresql:
auth:
existingSecret:

View File

@@ -9,5 +9,5 @@ spec:
- ReadWriteOnce
resources:
requests:
storage: 150Gi
storage: 50Gi
storageClassName: mayastor-single-hdd

View File

@@ -4,9 +4,8 @@ resources:
- gitea
- registry
- renovate
- ollama
- librechat
- frigate
- llama
- immich
- nas
- searxng

View File

@@ -2,11 +2,11 @@
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository
metadata:
name: dynomite567-charts
name: bat-librechat
namespace: librechat
spec:
interval: 24h
url: https://dynomite567.github.io/helm-charts/
url: https://charts.blue-atlas.de
---
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
@@ -18,10 +18,10 @@ spec:
chart:
spec:
chart: librechat
version: 1.9.1
version: 1.8.10
sourceRef:
kind: HelmRepository
name: dynomite567-charts
name: bat-librechat
values:
global:
librechat:
@@ -39,30 +39,40 @@ spec:
endpoints:
custom:
- name: "Ollama"
apiKey: "ollama"
baseURL: "http://ollama.ollama.svc.cluster.local:11434/v1/chat/completions"
models:
default: [
"llama2",
"mistral",
"codellama",
"dolphin-mixtral",
"mistral-openorca"
]
# fetching list of models is supported but the `name` field must start
# with `ollama` (case-insensitive), as it does in this example.
fetch: true
titleConvo: true
titleModel: "current_model"
summarize: false
summaryModel: "current_model"
forcePrompt: false
modelDisplayLabel: "Ollama"
- name: "Llama.cpp"
apiKey: "llama"
baseURL: "http://llama.llama.svc.cluster.local:11434/v1"
baseURL: "http://llama.llama.svc.cluster.local:11434/v1/chat/completions"
models:
default: [
"DeepSeek-R1-0528-Qwen3-8B-GGUF",
"Qwen3-8B-GGUF",
"Qwen3-8B-GGUF-no-thinking",
"gemma3n-e4b",
"gemma3-12b",
"gemma3-12b-q2",
"gemma3-12b-novision",
"gemma3-4b",
"gemma3-4b-novision",
"Qwen3-4B-Thinking-2507",
"Qwen3-4B-Thinking-2507-long-ctx",
"Qwen2.5-VL-7B-Instruct-GGUF",
"Qwen2.5-VL-32B-Instruct-GGUF-IQ1_S",
"Qwen2.5-VL-32B-Instruct-GGUF-Q2_K_L"
"gemma3n"
]
titleConvo: true
titleModel: "gemma3-4b-novision"
titleModel: "current_model"
summarize: false
summaryModel: "gemma3-4b-novision"
summaryModel: "current_model"
forcePrompt: false
modelDisplayLabel: "Llama.cpp"
imageVolume:
@@ -72,12 +82,9 @@ spec:
storageClassName: mayastor-single-hdd
ingress:
enabled: true
className: nginx-ingress
className: nginx
annotations:
cert-manager.io/cluster-issuer: letsencrypt
nginx.ingress.kubernetes.io/proxy-body-size: "0"
nginx.ingress.kubernetes.io/proxy-buffering: "false"
nginx.ingress.kubernetes.io/proxy-read-timeout: 30m
hosts:
- host: librechat.lumpiasty.xyz
paths:

View File

@@ -16,7 +16,7 @@ spec:
spec:
containers:
- name: caddy
image: caddy:2.10.2-alpine
image: caddy:2.10.0-alpine
imagePullPolicy: IfNotPresent
volumeMounts:
- mountPath: /etc/caddy

View File

@@ -1,33 +1,24 @@
healthCheckTimeout: 600
models:
"DeepSeek-R1-0528-Qwen3-8B-GGUF":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/DeepSeek-R1-0528-Qwen3-8B-GGUF:Q4_K_M
-ngl 37 -c 16384
--no-warmup
--port ${PORT}
"Qwen3-8B-GGUF":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/Qwen3-8B-GGUF:Q4_K_M
-ngl 37 -c 16384
--no-warmup
--port ${PORT}
"Qwen3-8B-GGUF-no-thinking":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/Qwen3-8B-GGUF:Q4_K_M
-ngl 37 -c 16384
--jinja --chat-template-file /config/qwen_nothink_chat_template.jinja
--no-warmup
--port ${PORT}
"gemma3n-e4b":
ttl: 600
"gemma3n":
cmd: |
/app/llama-server
-hf unsloth/gemma-3n-E4B-it-GGUF:UD-Q4_K_XL
@@ -40,177 +31,4 @@ models:
--min-p 0.00
--top-k 64
--top-p 0.95
--no-warmup
--port ${PORT}
"gemma3-12b":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/gemma-3-12b-it-GGUF:Q4_K_M
--ctx-size 16384
--n-gpu-layers 99
--prio 2
--temp 1.0
--repeat-penalty 1.0
--min-p 0.00
--top-k 64
--top-p 0.95
--no-warmup
--port ${PORT}
"gemma3-12b-novision":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/gemma-3-12b-it-GGUF:Q4_K_M
--ctx-size 16384
--n-gpu-layers 99
--prio 2
--temp 1.0
--repeat-penalty 1.0
--min-p 0.00
--top-k 64
--top-p 0.95
--no-mmproj
--no-warmup
--port ${PORT}
"gemma3-12b-q2":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/gemma-3-12b-it-GGUF:Q2_K_L
--ctx-size 16384
--n-gpu-layers 99
--prio 2
--temp 1.0
--repeat-penalty 1.0
--min-p 0.00
--top-k 64
--top-p 0.95
--no-warmup
--port ${PORT}
"gemma3-4b":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/gemma-3-4b-it-GGUF:Q4_K_M
--ctx-size 16384
--n-gpu-layers 99
--prio 2
--temp 1.0
--repeat-penalty 1.0
--min-p 0.00
--top-k 64
--top-p 0.95
--no-warmup
--port ${PORT}
"gemma3-4b-novision":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/gemma-3-4b-it-GGUF:Q4_K_M
--ctx-size 16384
--n-gpu-layers 99
--prio 2
--temp 1.0
--repeat-penalty 1.0
--min-p 0.00
--top-k 64
--top-p 0.95
--no-mmproj
--no-warmup
--port ${PORT}
"Qwen3-4B-Thinking-2507":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/Qwen3-4B-Thinking-2507-GGUF:Q4_K_M
-ngl 99 -c 16384 --predict 8192
--temp 0.6
--min-p 0.00
--top-p 0.95
--top-k 20
--repeat-penalty 1.0
--no-warmup
--port ${PORT}
"Qwen3-4B-Thinking-2507-long-ctx":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/Qwen3-4B-Thinking-2507-GGUF:Q4_K_M
-ngl 99 -c 262144 --predict 81920
--temp 0.6
--min-p 0.00
--top-p 0.95
--top-k 20
--repeat-penalty 1.0
--no-warmup
--flash-attn
--cache-type-k q8_0 --cache-type-v q8_0
--port ${PORT}
"Qwen3-4B-Instruct-2507":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/Qwen3-4B-Instruct-2507-GGUF:Q4_K_M
-ngl 99 -c 16384 --predict 8192
--temp 0.7
--min-p 0.00
--top-p 0.8
--top-k 20
--repeat-penalty 1.0
--no-warmup
--port ${PORT}
"Qwen3-4B-Instruct-2507-long-ctx":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/Qwen3-4B-Instruct-2507-GGUF:Q4_K_M
-ngl 99 -c 262144 --predict 81920
--temp 0.7
--min-p 0.00
--top-p 0.8
--top-k 20
--repeat-penalty 1.0
--no-warmup
--flash-attn
--cache-type-k q8_0 --cache-type-v q8_0
--port ${PORT}
"Qwen2.5-VL-32B-Instruct-GGUF-IQ1_S":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/Qwen2.5-VL-32B-Instruct-GGUF:IQ1_S
-ngl 99 -c 16384 --predict 8192
--temp 0.7
--min-p 0.00
--top-p 0.8
--top-k 20
--repeat-penalty 1.0
--no-warmup
--port ${PORT}
"Qwen2.5-VL-32B-Instruct-GGUF-Q2_K_L":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/Qwen2.5-VL-32B-Instruct-GGUF:Q2_K_L
-ngl 99 -c 16384 --predict 8192
--temp 0.7
--min-p 0.00
--top-p 0.8
--top-k 20
--repeat-penalty 1.0
--no-warmup
--port ${PORT}
"Qwen2.5-VL-7B-Instruct-GGUF":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/Qwen2.5-VL-7B-Instruct-GGUF:Q4_K_M
-ngl 37 -c 16384 --predict 8192
--temp 0.7
--min-p 0.00
--top-p 0.8
--top-k 20
--repeat-penalty 1.0
--no-warmup
--port ${PORT}

View File

@@ -16,7 +16,7 @@ spec:
spec:
containers:
- name: llama-swap
image: ghcr.io/mostlygeek/llama-swap:v166-vulkan-b6795
image: ghcr.io/mostlygeek/llama-swap:v139-vulkan-b5957
imagePullPolicy: IfNotPresent
command:
- /app/llama-swap
@@ -41,7 +41,7 @@ spec:
volumes:
- name: models
persistentVolumeClaim:
claimName: llama-models
claimName: models
- name: kfd
hostPath:
path: /dev/kfd

View File

@@ -8,9 +8,9 @@ metadata:
cert-manager.io/cluster-issuer: letsencrypt
acme.cert-manager.io/http01-edit-in-place: "true"
nginx.ingress.kubernetes.io/proxy-buffering: "false"
nginx.ingress.kubernetes.io/proxy-read-timeout: 30m
nginx.org/proxy-read-timeout: 30m
spec:
ingressClassName: nginx-ingress
ingressClassName: nginx
rules:
- host: llama.lumpiasty.xyz
http:

View File

@@ -3,11 +3,11 @@ apiVersion: v1
kind: PersistentVolumeClaim
metadata:
namespace: llama
name: llama-models
name: models
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 200Gi
storageClassName: mayastor-single-ssd
storageClassName: mayastor-single-hdd

View File

@@ -1,14 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: nas-sshd-config
namespace: nas
data:
00-chroot.conf: |
Subsystem sftp internal-sftp
Match User nas
ChrootDirectory /config
ForceCommand internal-sftp -d /data
AllowTcpForwarding no
X11Forwarding no
PermitTunnel no

View File

@@ -1,94 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: nas-ssh
namespace: nas
spec:
replicas: 1
selector:
matchLabels:
app: nas-ssh
template:
metadata:
labels:
app: nas-ssh
spec:
securityContext:
fsGroup: 1000
initContainers:
- name: prepare-config
image: alpine:3.20.3
imagePullPolicy: IfNotPresent
command:
- /bin/sh
- -c
- |
set -euo pipefail
chown root:root /config
chmod 755 /config
mkdir -p /config/data
chown 1000:1000 /config/data
chmod 750 /config/data
mkdir -p /config/ssh_host_keys
chown root:root /config/ssh_host_keys
chmod 700 /config/ssh_host_keys
for key in /config/ssh_host_keys/*; do
[ -f "$key" ] || continue
chown root:root "$key"
chmod 600 "$key"
done
mkdir -p /config/sshd/sshd_config.d
cp /defaults/00-chroot.conf /config/sshd/sshd_config.d/00-chroot.conf
chown root:root /config/sshd/sshd_config.d/00-chroot.conf
chmod 644 /config/sshd/sshd_config.d/00-chroot.conf
volumeMounts:
- name: data
mountPath: /config
- name: sshd-config
mountPath: /defaults/00-chroot.conf
subPath: 00-chroot.conf
readOnly: true
containers:
- name: ssh
image: lscr.io/linuxserver/openssh-server:version-10.0_p1-r9
imagePullPolicy: IfNotPresent
env:
- name: PUID
value: "1000"
- name: PGID
value: "1000"
- name: TZ
value: Etc/UTC
- name: USER_NAME
value: nas
- name: SUDO_ACCESS
value: "false"
- name: PASSWORD_ACCESS
value: "false"
- name: LOG_STDOUT
value: "true"
- name: PUBLIC_KEY
valueFrom:
secretKeyRef:
name: nas-ssh-authorized-keys
key: public_key
ports:
- containerPort: 2222
name: ssh
protocol: TCP
volumeMounts:
- name: data
mountPath: /config
resources:
requests:
cpu: 50m
memory: 128Mi
limits:
memory: 512Mi
volumes:
- name: data
persistentVolumeClaim:
claimName: nas-data
- name: sshd-config
configMap:
name: nas-sshd-config

View File

@@ -1,4 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: nas

View File

@@ -1,12 +0,0 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nas-data
namespace: nas
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 500Gi
storageClassName: mayastor-single-hdd

View File

@@ -1,9 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: nas-ssh-authorized-keys
namespace: nas
type: Opaque
stringData:
public_key: |
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCresbDFZijI+rZMgd3LdciPjpb4x4S5B7y0U+EoYPaz6hILT72fyz3QdcgKJJv8JUJI6g0811/yFRuOzCXgWaA922c/S/t6HMUrorh7mPVQMTN2dc/SVBvMa7S2M9NYBj6z1X2LRHs+g1JTMCtL202PIjes/E9qu0as0Vx6n/6HHNmtmA9LrpiAmurbeKXDmrYe2yWg/FA6cX5d86SJb21Dj8WqdCd3Hz0Pi6FzMKXhpWvs5Hfei1htsjsRzCxkpSTjlgFEFVfmHIXPfB06Sa6aCnkxAFnE7N+xNa9RIWeZmOXdA74LsfSKQ9eAXSrsC/IRxo2ce8cBzXJy+Itxw24fUqGYXBiCgx8i3ZA9IdwI1u71xYo9lyNjav5VykzKnAHRAYnDm9UsCf8k04reBevcLdtxL11vPCtind3xn76Nhy2b45dcp/MdYFANGsCcXJOMb6Aisb03HPGhs/aU3tCAQbTVe195mL9FWhGqIK2wBmF1SKW+4ssX2bIU6YaCYc= cardno:23_671_999

View File

@@ -1,15 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: nas-ssh
namespace: nas
spec:
type: LoadBalancer
externalTrafficPolicy: Cluster
ports:
- name: ssh
port: 22
targetPort: 2222
protocol: TCP
selector:
app: nas-ssh

View File

@@ -0,0 +1,68 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: ollama-proxy
namespace: ollama
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: ollama-proxy
template:
metadata:
labels:
app.kubernetes.io/name: ollama-proxy
spec:
containers:
- name: caddy
image: caddy:2.10.0-alpine
imagePullPolicy: IfNotPresent
volumeMounts:
- mountPath: /etc/caddy
name: proxy-config
env:
- name: API_KEY
valueFrom:
secretKeyRef:
name: ollama-api-key
key: API_KEY
volumes:
- name: proxy-config
configMap:
name: ollama-proxy-config
---
apiVersion: v1
kind: ConfigMap
metadata:
namespace: ollama
name: ollama-proxy-config
data:
Caddyfile: |
http://ollama.lumpiasty.xyz {
@requireAuth {
not header Authorization "Bearer {env.API_KEY}"
}
respond @requireAuth "Unauthorized" 401
reverse_proxy ollama:11434 {
flush_interval -1
}
}
---
apiVersion: v1
kind: Service
metadata:
namespace: ollama
name: ollama-proxy
spec:
type: ClusterIP
selector:
app.kubernetes.io/name: ollama-proxy
ports:
- name: http
port: 80
targetPort: 80
protocol: TCP

28
apps/ollama/ingress.yaml Normal file
View File

@@ -0,0 +1,28 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
namespace: ollama
name: ollama
annotations:
cert-manager.io/cluster-issuer: letsencrypt
acme.cert-manager.io/http01-edit-in-place: "true"
nginx.ingress.kubernetes.io/proxy-buffering: "false"
nginx.org/proxy-read-timeout: 30m
spec:
ingressClassName: nginx
rules:
- host: ollama.lumpiasty.xyz
http:
paths:
- backend:
service:
name: ollama-proxy
port:
number: 80
path: /
pathType: Prefix
tls:
- hosts:
- ollama.lumpiasty.xyz
secretName: ollama-ingress

View File

@@ -2,8 +2,7 @@ apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- release.yaml
- secret.yaml
- configmap.yaml
- pvc.yaml
- deployment.yaml
- service.yaml
- auth-proxy.yaml
- ingress.yaml

View File

@@ -2,4 +2,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: searxng
name: ollama

60
apps/ollama/release.yaml Normal file
View File

@@ -0,0 +1,60 @@
---
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository
metadata:
name: ollama-helm
namespace: ollama
spec:
interval: 24h
url: https://otwld.github.io/ollama-helm/
---
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: ollama
namespace: ollama
spec:
interval: 30m
chart:
spec:
chart: ollama
version: 1.24.0
sourceRef:
kind: HelmRepository
name: ollama-helm
namespace: ollama
interval: 12h
values:
ollama:
gpu:
enabled: false
persistentVolume:
enabled: true
storageClass: mayastor-single-hdd
size: 200Gi
# GPU support
# Rewrite of options in
# https://hub.docker.com/r/grinco/ollama-amd-apu
image:
repository: grinco/ollama-amd-apu
tag: vulkan
securityContext:
# Not ideal
privileged: true
capabilities:
add:
- PERFMON
volumeMounts:
- name: kfd
mountPath: /dev/kfd
- name: dri
mountPath: /dev/dri
volumes:
- name: kfd
hostPath:
path: /dev/kfd
type: CharDevice
- name: dri
hostPath:
path: /dev/dri
type: Directory

View File

@@ -2,37 +2,37 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: immich-password
namespace: immich
name: ollama-proxy
namespace: ollama
---
apiVersion: secrets.hashicorp.com/v1beta1
kind: VaultAuth
metadata:
name: immich
namespace: immich
name: ollama
namespace: ollama
spec:
method: kubernetes
mount: kubernetes
kubernetes:
role: immich
serviceAccount: immich-password
role: ollama-proxy
serviceAccount: ollama-proxy
---
apiVersion: secrets.hashicorp.com/v1beta1
kind: VaultStaticSecret
metadata:
name: immich-db
namespace: immich
name: ollama-api-key
namespace: ollama
spec:
type: kv-v2
mount: secret
path: immich-db
path: ollama
destination:
create: true
name: immich-db-immich
type: kubernetes.io/basic-auth
name: ollama-api-key
type: Opaque
transformation:
excludeRaw: true
vaultAuthRef: immich
vaultAuthRef: ollama

View File

@@ -6,9 +6,9 @@ metadata:
name: registry
annotations:
cert-manager.io/cluster-issuer: letsencrypt
nginx.ingress.kubernetes.io/proxy-body-size: "0"
nginx.org/client-max-body-size: "0"
spec:
ingressClassName: nginx-ingress
ingressClassName: nginx
rules:
- host: registry.lumpiasty.xyz
http:

View File

@@ -15,7 +15,7 @@ spec:
- name: renovate
# Update this to the latest available and then enable Renovate on
# the manifest
image: renovate/renovate:41.169.1-full
image: renovate/renovate:41.43.3-full
envFrom:
- secretRef:
name: renovate-gitea-token

View File

@@ -1 +0,0 @@
use_default_settings: true

View File

@@ -1,42 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: searxng
namespace: searxng
spec:
replicas: 1
selector:
matchLabels:
app: searxng
template:
metadata:
labels:
app: searxng
spec:
containers:
- name: searxng
image: searxng/searxng:2025.8.12-6b1516d
ports:
- containerPort: 8080
env:
- name: SEARXNG_SECRET
valueFrom:
secretKeyRef:
name: searxng-secret
key: SEARXNG_SECRET
optional: false
volumeMounts:
- name: config-volume
mountPath: /etc/searxng/settings.yml
subPath: settings.yml
readOnly: true
- name: searxng-persistent-data
mountPath: /var/cache/searxng
volumes:
- name: config-volume
configMap:
name: searxng-config
- name: searxng-persistent-data
persistentVolumeClaim:
claimName: searxng-persistent-data

View File

@@ -1,25 +0,0 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
namespace: searxng
name: searxng
annotations:
cert-manager.io/cluster-issuer: letsencrypt
spec:
ingressClassName: nginx-ingress
rules:
- host: searxng.lumpiasty.xyz
http:
paths:
- backend:
service:
name: searxng
port:
number: 8080
path: /
pathType: Prefix
tls:
- hosts:
- searxng.lumpiasty.xyz
secretName: searxng-ingress

View File

@@ -1,13 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- pvc.yaml
- deployment.yaml
- service.yaml
- ingress.yaml
configMapGenerator:
- name: searxng-config
namespace: searxng
files:
- settings.yml=configs/settings.yml

View File

@@ -1,13 +0,0 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
namespace: searxng
name: searxng-persistent-data
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
storageClassName: mayastor-single-ssd

View File

@@ -1,14 +0,0 @@
---
apiVersion: v1
kind: Service
metadata:
name: searxng
namespace: searxng
spec:
selector:
app: searxng
ports:
- protocol: TCP
port: 8080
targetPort: 8080
type: ClusterIP

View File

@@ -1,18 +0,0 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: mayastor-single-ssd
parameters:
protocol: nvmf
# Single replica
repl: "1"
# Thin provision volumes
thin: "true"
# Generate new filesystem's uuid when cloning
cloneFsIdAsVolumeId: "true"
# Schedule this sconly on ssd
poolAffinityTopologyLabel: |
type: ssd
provisioner: io.openebs.csi-mayastor
# Allow expansion of volumes
allowVolumeExpansion: true

View File

@@ -18,14 +18,14 @@ spec:
chart:
spec:
chart: cert-manager-webhook-ovh
version: 0.8.0
version: 0.7.5
sourceRef:
kind: HelmRepository
name: cert-manager-webhook-ovh
namespace: cert-manager
interval: 12h
values:
configVersion: 0.0.2
configVersion: 0.0.1
groupName: lumpiasty-homelab
certManager:
namespace: cert-manager
@@ -38,7 +38,6 @@ spec:
acmeServerUrl: https://acme-v02.api.letsencrypt.org/directory
email: arek.dzski@gmail.com
ovhEndpointName: ovh-eu
ovhAuthenticationMethod: application
ovhAuthenticationRef:
applicationKeyRef:
name: ovh-credentials
@@ -46,6 +45,6 @@ spec:
applicationSecretRef:
name: ovh-credentials
key: applicationSecret
applicationConsumerKeyRef:
consumerKeyRef:
name: ovh-credentials
key: consumerKey

View File

@@ -23,7 +23,7 @@ spec:
chart:
spec:
chart: cert-manager
version: v1.19.1
version: v1.18.2
sourceRef:
kind: HelmRepository
name: cert-manager

View File

@@ -23,7 +23,7 @@ spec:
chart:
spec:
chart: cilium
version: 1.18.2
version: 1.17.6
sourceRef:
kind: HelmRepository
name: cilium

View File

@@ -23,7 +23,7 @@ spec:
chart:
spec:
chart: cloudnative-pg
version: 0.26.0
version: 0.24.0
sourceRef:
kind: HelmRepository
name: cnpg

View File

@@ -97,7 +97,7 @@ spec:
env:
- name: GOMEMLIMIT
value: 161MiB
image: registry.k8s.io/coredns/coredns:v1.13.0
image: registry.k8s.io/coredns/coredns:v1.12.2
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 5

View File

@@ -23,7 +23,7 @@ spec:
chart:
spec:
chart: k8up
version: 4.8.6
version: 4.8.4
sourceRef:
kind: HelmRepository
name: k8up-io

View File

@@ -2,32 +2,32 @@
apiVersion: v1
kind: Namespace
metadata:
name: nginx-ingress
name: nginx-ingress-controller
---
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository
metadata:
name: ingress-nginx
namespace: nginx-ingress
name: nginx
namespace: nginx-ingress-controller
spec:
interval: 24h
url: https://kubernetes.github.io/ingress-nginx
url: https://helm.nginx.com/stable
---
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: nginx-ingress
namespace: nginx-ingress
namespace: nginx-ingress-controller
spec:
interval: 30m
chart:
spec:
chart: ingress-nginx
version: 4.13.3
chart: nginx-ingress
version: 2.2.1
sourceRef:
kind: HelmRepository
name: ingress-nginx
namespace: nginx-ingress
name: nginx
namespace: nginx-ingress-controller
interval: 12h
values:
controller:
@@ -39,11 +39,9 @@ spec:
cpu: 100m
memory: 128Mi
ingressClass: "nginx-ingress"
ingressClassResource:
name: "nginx-ingress"
enabled: true
default: false
ingressClass:
create: true
setAsDefaultIngress: true
service:
create: true
@@ -51,11 +49,11 @@ spec:
# Requirement for sharing ip with other service
externalTrafficPolicy: Cluster
ipFamilyPolicy: RequireDualStack
ipFamilies:
- IPv4
- IPv6
annotations:
# Share IP with gitea ssh so we can have the same domain for both port
lbipam.cilium.io/sharing-key: gitea
lbipam.cilium.io/sharing-cross-namespace: gitea
lbipam.cilium.io/ips: 10.44.0.6,2001:470:61a3:400::6
lbipam.cilium.io/ips: 10.44.0.0,2001:470:61a3:400::1
config:
entries:
proxy-buffering: "false"

View File

@@ -23,7 +23,7 @@ spec:
chart:
spec:
chart: openbao
version: 0.19.0
version: 0.16.2
sourceRef:
kind: HelmRepository
name: openbao

View File

@@ -23,7 +23,7 @@ spec:
chart:
spec:
chart: openebs
version: 4.3.3
version: 4.3.2
sourceRef:
kind: HelmRepository
name: openebs

View File

@@ -23,7 +23,7 @@ spec:
chart:
spec:
chart: vault-secrets-operator
version: 1.0.1
version: 0.10.0
sourceRef:
kind: HelmRepository
name: hashicorp

View File

@@ -1,11 +0,0 @@
apiVersion: "openebs.io/v1beta3"
kind: DiskPool
metadata:
name: anapistula-delrosalae-ssd
namespace: openebs
spec:
node: anapistula-delrosalae
disks: ["aio:///dev/disk/by-id/nvme-eui.000000000000000000a07501ead1ebdb"]
topology:
labelled:
type: ssd

View File

@@ -3,7 +3,7 @@ kind: Kustomization
resources:
- controllers/k8up-crd-4.8.3.yaml
- controllers/cilium.yaml
- controllers/nginx-ingress.yaml
- controllers/nginx.yaml
- controllers/dns-public.yaml
- controllers/cert-manager.yaml
- controllers/cert-manager-webhook-ovh.yaml
@@ -15,11 +15,9 @@ resources:
- controllers/mongodb-operator.yaml
- controllers/cloudnative-pg.yaml
- diskpools/anapistula-delrosalae-hdd.yaml
- diskpools/anapistula-delrosalae-ssd.yaml
- configs/bgp-cluster-config.yaml
- configs/loadbalancer-ippool.yaml
- configs/single-hdd-sc.yaml
- configs/single-ssd-sc.yaml
- configs/mayastor-snapshotclass.yaml
- configs/openbao-cert.yaml
- configs/ovh-cert-manager-secret.yaml

View File

@@ -10,14 +10,14 @@ machine:
gateway: 2001:470:61a3:100:ffff:ffff:ffff:ffff
- network: 0.0.0.0/0
gateway: 192.168.1.1
mtu: 1280
mtu: 1500
install:
diskSelector:
wwid: t10.ATA SSDPR-CX400-256 GUH039914
# Generated on https://factory.talos.dev/
# amd-ucode and amdgpu
image: factory.talos.dev/metal-installer/9c1d1b442d73f96dcd04e81463eb20000ab014062d22e1b083e1773336bc1dd5:v1.10.6
# intel-ucode and amdgpu
image: factory.talos.dev/installer/06deebb947b815afa53f04c450d355d3c8bc28927a387c754db1622a0a06349e:v1.9.5
extraKernelArgs:
- cpufreq.default_governor=performance
sysfs:
@@ -27,3 +27,4 @@ machine:
devices.system.cpu.cpu6.cpufreq.scaling_max_freq: "550000"
devices.system.cpu.cpu7.cpufreq.scaling_max_freq: "550000"
devices.system.cpu.cpu8.cpufreq.scaling_max_freq: "550000"

View File

@@ -1,6 +0,0 @@
bound_service_account_names:
- immich-password
bound_service_account_namespaces:
- immich
token_policies:
- immich

View File

@@ -1,4 +0,0 @@
path "secret/data/immich-db" {
capabilities = ["read"]
}