133 Commits

Author SHA1 Message Date
223642ca21 Update renovate/renovate Docker tag to v41.169.1 2025-11-03 00:00:29 +00:00
3a57ef6953 add nas deployment 2025-11-03 00:56:36 +01:00
f4a865ce7a update llama-swap docker image 2025-10-19 20:38:39 +02:00
e7b3b220aa Merge pull request 'Update caddy Docker tag to v2.10.2' (#89) from renovate/caddy-2.x into fresh-start
Reviewed-on: #89
2025-10-19 18:32:21 +00:00
0642d29ed5 Merge pull request 'Update Helm release cert-manager to v1.19.1' (#91) from renovate/cert-manager-1.x into fresh-start
Reviewed-on: #91
2025-10-19 18:31:37 +00:00
3f044670e0 Merge pull request 'Update renovate/renovate Docker tag to v41.152.7' (#93) from renovate/renovate-renovate-41.x into fresh-start
Reviewed-on: #93
2025-10-19 18:29:15 +00:00
122770b128 Merge pull request 'Update Helm release immich to v1' (#94) from renovate/immich-1.x into fresh-start
Reviewed-on: #94
2025-10-19 18:25:55 +00:00
d894d42129 Merge pull request 'Update Helm release openbao to v0.19.0' (#92) from renovate/openbao-0.x into fresh-start
Reviewed-on: #92
2025-10-19 18:23:55 +00:00
3426b1215d Merge pull request 'Update Helm release vault-secrets-operator to v1' (#95) from renovate/vault-secrets-operator-1.x into fresh-start
Reviewed-on: #95
2025-10-19 18:21:53 +00:00
73a189f4e8 Merge pull request 'Update redis Docker tag to v23' (#96) from renovate/redis-23.x into fresh-start
Reviewed-on: #96
2025-10-19 18:19:55 +00:00
4518cdda22 Update redis Docker tag to v23 2025-10-19 18:18:51 +00:00
3682e4d5bf Update Helm release vault-secrets-operator to v1 2025-10-19 18:18:49 +00:00
3135514f6d Update Helm release immich to v1 2025-10-19 18:18:47 +00:00
5e39cc9082 Update renovate/renovate Docker tag to v41.152.7 2025-10-19 18:18:45 +00:00
6eed078d30 Update Helm release openbao to v0.19.0 2025-10-19 18:18:42 +00:00
0bb805eaaa Update Helm release cert-manager to v1.19.1 2025-10-19 18:18:40 +00:00
c0f9670837 Update caddy Docker tag to v2.10.2 2025-10-19 18:18:35 +00:00
69728501e1 Merge pull request 'Update Helm release immich to v0.9.7' (#77) from renovate/immich-0.x into fresh-start
Reviewed-on: #77
2025-10-19 18:13:37 +00:00
0a516b3798 Merge pull request 'Update Helm release librechat to v1.9.1' (#79) from renovate/librechat-1.x into fresh-start
Reviewed-on: #79
2025-10-19 18:09:08 +00:00
c9bb63b373 Merge pull request 'Update Helm release openebs to v4.3.3' (#82) from renovate/openebs-4.x into fresh-start
Reviewed-on: #82
2025-10-19 18:04:21 +00:00
5b5043755d Merge pull request 'Update registry.k8s.io/coredns/coredns Docker tag to v1.13.0' (#83) from renovate/registry.k8s.io-coredns-coredns-1.x into fresh-start
Reviewed-on: #83
2025-10-19 18:03:59 +00:00
e0fcaeaad4 Merge pull request 'Update Helm release ingress-nginx to v4.13.3' (#84) from renovate/ingress-nginx-4.x into fresh-start
Reviewed-on: #84
2025-10-19 18:00:46 +00:00
102efd1254 Merge pull request 'Update Helm release k8up to v4.8.6' (#85) from renovate/k8up-4.x into fresh-start
Reviewed-on: #85
2025-10-19 17:59:40 +00:00
5400c69771 Merge pull request 'Update Helm release cilium to v1.18.2' (#86) from renovate/cilium-1.x into fresh-start
Reviewed-on: #86
2025-10-19 17:57:55 +00:00
b6c70c9931 fix cert-manager-webhook-ovh config after update 2025-10-19 19:56:13 +02:00
2710996a19 Merge pull request 'Update Helm release cert-manager-webhook-ovh to v0.8.0' (#87) from renovate/cert-manager-webhook-ovh-0.x into fresh-start
Reviewed-on: #87
2025-10-19 17:52:55 +00:00
32f8ccfeb8 update values to current values schema 2025-10-19 19:49:54 +02:00
12aab2bf0e Merge pull request 'Update Helm release gitea to v12.4.0' (#88) from renovate/gitea-12.x into fresh-start
Reviewed-on: #88
2025-10-19 17:42:42 +00:00
957b6dab43 Update registry.k8s.io/coredns/coredns Docker tag to v1.13.0 2025-10-17 00:00:35 +00:00
d1b30c7e61 Update Helm release librechat to v1.9.1 2025-10-07 00:02:23 +00:00
d880c342a5 Update Helm release gitea to v12.4.0 2025-10-07 00:01:47 +00:00
ae38951164 Update Helm release k8up to v4.8.6 2025-10-04 00:00:49 +00:00
1e363acfca Update Helm release immich to v0.9.7 2025-10-01 00:00:41 +00:00
c78ca0933c Update Helm release ingress-nginx to v4.13.3 2025-10-01 00:00:30 +00:00
9a31c6bf15 Update Helm release cert-manager-webhook-ovh to v0.8.0 2025-09-25 00:00:28 +00:00
45aa92fe10 Update Helm release cilium to v1.18.2 2025-09-25 00:00:22 +00:00
708ffe203c Add Qwen2.5-VL models 2025-09-13 02:42:21 +02:00
e2c75d2f22 Update Helm release openebs to v4.3.3 2025-08-29 00:00:57 +00:00
3ceec2f10c Merge pull request 'Update renovate/renovate Docker tag to v41.82.10' (#66) from renovate/renovate-renovate-41.x into fresh-start
Reviewed-on: #66
2025-08-25 00:33:25 +00:00
95cfbfbe66 Update renovate/renovate Docker tag to v41.82.10 2025-08-25 00:32:46 +00:00
bf9aefb44a remove ollama 2025-08-25 02:30:47 +02:00
5ffb171821 Merge pull request 'Update Helm release gitea to v12.2.0' (#67) from renovate/gitea-12.x into fresh-start
Reviewed-on: #67
2025-08-25 00:23:50 +00:00
a35116aa31 Merge pull request 'Update redis Docker tag to v22' (#70) from renovate/redis-22.x into fresh-start
Reviewed-on: #70
2025-08-25 00:23:19 +00:00
b32337a2ba Merge pull request 'Update Helm release ingress-nginx to v4.13.1' (#71) from renovate/ingress-nginx-4.x into fresh-start
Reviewed-on: #71
2025-08-25 00:22:58 +00:00
d27b43715c Merge pull request 'Update Helm release immich to v0.7.5' (#73) from renovate/immich-0.x into fresh-start
Reviewed-on: #73
2025-08-25 00:22:24 +00:00
4b0ce7a2e3 Merge pull request 'Update Helm release openbao to v0.16.3' (#75) from renovate/openbao-0.x into fresh-start
Reviewed-on: #75
2025-08-25 00:22:18 +00:00
7f2ef7270c Merge pull request 'Update Helm release cloudnative-pg to v0.26.0' (#72) from renovate/cloudnative-pg-0.x into fresh-start
Reviewed-on: #72
2025-08-25 00:18:53 +00:00
73a9b275a7 Merge pull request 'Update Helm release cilium to v1.18.1' (#74) from renovate/cilium-1.x into fresh-start
Reviewed-on: #74
2025-08-25 00:17:27 +00:00
8a61a936c6 Update redis Docker tag to v22 2025-08-24 00:00:34 +00:00
1c2f77927f Update Helm release immich to v0.7.5 2025-08-23 00:00:29 +00:00
4f5b25d910 increase frigate config volume to 5Gi 2025-08-22 16:59:46 +02:00
7c5fafd54e Update Helm release openbao to v0.16.3 2025-08-22 00:00:30 +00:00
de11ec0d1b Update Helm release gitea to v12.2.0 2025-08-20 00:00:40 +00:00
07c32643e7 add searxng 2025-08-18 03:26:54 +02:00
9c61d47fda add qwen3-4b-2507 model 2025-08-18 02:50:46 +02:00
0f24f1dd7b Update Helm release cilium to v1.18.1 2025-08-16 00:00:28 +00:00
83e5cada3f decreate mtu on anapistuala delrosalae to 1280, hack 2025-08-15 20:56:12 +02:00
ccf6302924 Update Helm release cloudnative-pg to v0.26.0 2025-08-14 00:00:36 +00:00
5eb0362788 Update Helm release ingress-nginx to v4.13.1 2025-08-13 00:00:40 +00:00
0985832c2d disable gpu accel in frigate 2025-08-11 20:24:32 +02:00
db86abff25 remove old nginx ingress controller 2025-08-03 19:14:11 +02:00
a1b40a6a21 Revert "add cameras vlan"
This reverts commit 9269f21692.
2025-08-03 18:42:17 +02:00
444c4faf96 move all ingresses to new nginx ingress 2025-08-03 18:17:37 +02:00
9f304af879 update gitea to new ingress 2025-08-03 17:59:54 +02:00
c0524510b8 add nginx-ingress 2025-08-03 17:40:25 +02:00
a26a351396 update llama-swap 2025-08-03 17:16:25 +02:00
9269f21692 add cameras vlan 2025-08-03 16:39:38 +02:00
9d6a9ff304 Merge pull request 'Update Helm release immich to v0.7.2' (#65) from renovate/immich-0.x into fresh-start
Reviewed-on: #65
2025-08-03 14:00:33 +00:00
3cd094007e Merge pull request 'Update renovate/renovate Docker tag to v41.51.0' (#61) from renovate/renovate-renovate-41.x into fresh-start
Reviewed-on: #61
2025-08-03 14:00:19 +00:00
94a57daaf8 Merge pull request 'Update Helm release cilium to v1.18.0' (#62) from renovate/cilium-1.x into fresh-start
Reviewed-on: #62
2025-08-03 14:00:00 +00:00
6fec8d29a6 Update renovate/renovate Docker tag to v41.51.0 2025-08-03 00:00:50 +00:00
3a94da6021 Update Helm release immich to v0.7.2 2025-08-03 00:00:45 +00:00
70511ff9bc Merge pull request 'Update Helm release ollama to v1.25.0' (#63) from renovate/ollama-1.x into fresh-start
Reviewed-on: #63
2025-08-02 14:29:07 +00:00
e8b37d90d8 Merge pull request 'Update Helm release immich to v0.7.1' (#64) from renovate/immich-0.x into fresh-start
Reviewed-on: #64
2025-08-02 14:28:59 +00:00
30b7a78360 Update Helm release immich to v0.7.1 2025-08-02 00:01:07 +00:00
2561b354d1 Update Helm release ollama to v1.25.0 2025-07-30 00:00:32 +00:00
949d8b11db Update Helm release cilium to v1.18.0 2025-07-30 00:00:29 +00:00
6c46b20dba fix nginx disconnecting too fast 2025-07-29 19:49:15 +02:00
f0f9cb4d34 fix api endpoint in librechat 2025-07-29 18:54:07 +02:00
8386e21722 fix image upload in librechat 2025-07-29 18:50:13 +02:00
c871dae045 change chart source and update librechat 2025-07-29 18:36:19 +02:00
70e4967497 increase immich uploads volume 2025-07-29 04:16:28 +02:00
8e68c45573 allow websockets to immich 2025-07-29 03:25:43 +02:00
c4628523bc llama automatic unloading and longer start timeout 2025-07-29 02:31:39 +02:00
071e87ee44 disable warmups 2025-07-29 02:24:14 +02:00
9e17aadb56 add gemma3 model 2025-07-29 02:22:52 +02:00
3ca4ddc233 use immich chart provided ingress 2025-07-29 00:50:44 +02:00
215a2ac1fb Merge pull request 'Update Helm release cloudnative-pg to v0.25.0' (#59) from renovate/cloudnative-pg-0.x into fresh-start
Reviewed-on: #59
2025-07-28 22:46:07 +00:00
5b8a861daa Merge pull request 'Update renovate/renovate Docker tag to v41.43.5' (#58) from renovate/renovate-renovate-41.x into fresh-start
Reviewed-on: #58
2025-07-28 22:45:53 +00:00
319e3bafbe Merge pull request 'Update Helm release immich to v0.7.0' (#60) from renovate/immich-0.x into fresh-start
Reviewed-on: #60
2025-07-28 22:45:29 +00:00
ad1c60a049 Update Helm release immich to v0.7.0 2025-07-28 00:00:34 +00:00
41020f8c79 install immich 2025-07-27 22:38:45 +02:00
60c7dd4bdc Update renovate/renovate Docker tag to v41.43.5 2025-07-27 00:00:38 +00:00
0fde3108d6 move llama models to ssd 2025-07-26 17:54:23 +02:00
a299c2cc2b add ssd 2025-07-26 17:52:34 +02:00
a4ea45a39c Update Helm release cloudnative-pg to v0.25.0 2025-07-26 00:03:30 +00:00
30bae60308 fix immich postgres cluster 2025-07-25 23:09:58 +02:00
2f3b7af0da redis for immich 2025-07-25 22:43:21 +02:00
30efd5ae6e Merge pull request 'Update renovate/renovate Docker tag to v41.43.2' (#57) from renovate/renovate-renovate-41.x into fresh-start
Reviewed-on: #57
2025-07-25 20:15:37 +00:00
0e1279473f Update renovate/renovate Docker tag to v41.43.2 2025-07-25 00:00:45 +00:00
718a0d7e33 add immich 2025-07-24 02:50:34 +02:00
9765f1cf86 add gemma3n 2025-07-23 23:46:44 +02:00
5f3a00b382 add qwen3 no thinking 2025-07-23 22:56:52 +02:00
b379c181f2 increase context size 2025-07-23 22:06:45 +02:00
e1801347f2 add qwen3 2025-07-23 20:15:37 +02:00
d53db88fd2 gpu offload in llama.cpp 2025-07-23 19:55:48 +02:00
5fb2bcfc7e add llama.cpp to librechat 2025-07-23 19:19:43 +02:00
f5da3b52a2 Merge pull request 'Update Helm release ollama to v1.24.0' (#53) from renovate/ollama-1.x into fresh-start
Reviewed-on: #53
2025-07-23 17:13:28 +00:00
c3dbb0a608 Merge pull request 'Update Helm release openbao to v0.16.2' (#52) from renovate/openbao-0.x into fresh-start
Reviewed-on: #52
2025-07-23 17:13:09 +00:00
a520c62277 Merge pull request 'Update renovate/renovate Docker tag to v41.42.9' (#51) from renovate/renovate-renovate-41.x into fresh-start
Reviewed-on: #51
2025-07-23 17:12:49 +00:00
6cf45eda17 Merge pull request 'Update Helm release cilium to v1.17.6' (#55) from renovate/cilium-1.x into fresh-start
Reviewed-on: #55
2025-07-23 17:12:35 +00:00
753d43b643 Merge pull request 'Update Helm release nginx-ingress to v2.2.1' (#54) from renovate/nginx-ingress-2.x into fresh-start
Reviewed-on: #54
2025-07-23 17:12:11 +00:00
263b60018d Merge pull request 'Update Helm release gitea to v12.1.2' (#56) from renovate/gitea-12.x into fresh-start
Reviewed-on: #56
2025-07-23 17:10:14 +00:00
0816b6e434 Update renovate/renovate Docker tag to v41.42.9 2025-07-23 00:01:56 +00:00
18eb912f03 llama-swap 2025-07-23 00:18:45 +02:00
a2c23c5f97 Update Helm release gitea to v12.1.2 2025-07-20 00:00:54 +00:00
15ce411c3e Update Helm release nginx-ingress to v2.2.1 2025-07-18 00:00:57 +00:00
04a8c98d63 Update Helm release cilium to v1.17.6 2025-07-17 00:00:45 +00:00
f46219f87e Update Helm release ollama to v1.24.0 2025-07-13 00:00:52 +00:00
53154eeed7 adjust motion masks 2025-07-10 22:06:58 +02:00
2ad310c550 Update Helm release openbao to v0.16.2 2025-07-10 00:00:37 +00:00
d32d94eb00 introduce person mask 2025-07-07 00:02:09 +02:00
5b62f7e386 Merge pull request 'Update renovate/renovate Docker tag to v41.23.1' (#48) from renovate/renovate-renovate-41.x into fresh-start
Reviewed-on: #48
2025-07-06 18:40:21 +00:00
52124193e2 Merge pull request 'Update Helm release ollama to v1.23.0' (#49) from renovate/ollama-1.x into fresh-start
Reviewed-on: #49
2025-07-06 18:40:12 +00:00
0f8ee9e53d Merge pull request 'Update Helm release cert-manager to v1.18.2' (#50) from renovate/cert-manager-1.x into fresh-start
Reviewed-on: #50
2025-07-06 18:40:05 +00:00
122c70d0a8 fix config validation error 2025-07-06 20:06:30 +02:00
5463d76771 run renovate once daily 2025-07-06 20:03:42 +02:00
60f2056806 update nix flake 2025-07-06 19:48:03 +02:00
6119ac7271 Update renovate/renovate Docker tag to v41.23.1 2025-07-06 17:00:32 +00:00
1a01f82e30 tune detection objects and retention 2025-07-06 18:58:29 +02:00
74c9ddad62 add motion mask on cameras 2025-07-06 18:15:41 +02:00
caf62609d3 Update Helm release ollama to v1.23.0 2025-07-05 05:00:41 +00:00
d5622416de Update Helm release cert-manager to v1.18.2 2025-07-02 14:00:59 +00:00
57 changed files with 1120 additions and 185 deletions

View File

@@ -3,7 +3,17 @@ install-router:
gen-talos-config: gen-talos-config:
mkdir -p talos/generated mkdir -p talos/generated
talosctl gen config --with-secrets secrets.yaml --config-patch @talos/patches/controlplane.patch --config-patch @talos/patches/openebs.patch --config-patch @talos/patches/openbao.patch --config-patch @talos/patches/ollama.patch --config-patch @talos/patches/frigate.patch --config-patch @talos/patches/anapistula-delrosalae.patch --output-types controlplane -o talos/generated/anapistula-delrosalae.yaml homelab https://kube-api.homelab.lumpiasty.xyz:6443 talosctl gen config \
--with-secrets secrets.yaml \
--config-patch @talos/patches/controlplane.patch \
--config-patch @talos/patches/openebs.patch \
--config-patch @talos/patches/openbao.patch \
--config-patch @talos/patches/ollama.patch \
--config-patch @talos/patches/llama.patch \
--config-patch @talos/patches/frigate.patch \
--config-patch @talos/patches/anapistula-delrosalae.patch \
--output-types controlplane -o talos/generated/anapistula-delrosalae.yaml \
homelab https://kube-api.homelab.lumpiasty.xyz:6443
talosctl gen config --with-secrets secrets.yaml --config-patch @talos/patches/controlplane.patch --output-types worker -o talos/generated/worker.yaml homelab https://kube-api.homelab.lumpiasty.xyz:6443 talosctl gen config --with-secrets secrets.yaml --config-patch @talos/patches/controlplane.patch --output-types worker -o talos/generated/worker.yaml homelab https://kube-api.homelab.lumpiasty.xyz:6443
talosctl gen config --with-secrets secrets.yaml --output-types talosconfig -o talos/generated/talosconfig homelab https://kube-api.homelab.lumpiasty.xyz:6443 talosctl gen config --with-secrets secrets.yaml --output-types talosconfig -o talos/generated/talosconfig homelab https://kube-api.homelab.lumpiasty.xyz:6443
talosctl config endpoint kube-api.homelab.lumpiasty.xyz talosctl config endpoint kube-api.homelab.lumpiasty.xyz

View File

@@ -41,6 +41,33 @@ spec:
days: 90 days: 90
mode: motion mode: motion
objects:
track:
- person
- bicycle
- car
- motorcycle
- cat
- dog
- horse
- sheep
- cow
- bear
review:
alerts:
labels:
- person
- bicycle
- car
- motorcycle
- cat
- dog
- horse
- sheep
- cow
- bear
cameras: cameras:
dom: dom:
enabled: True enabled: True
@@ -53,6 +80,10 @@ spec:
- record - record
output_args: output_args:
record: preset-record-generic-audio-copy record: preset-record-generic-audio-copy
motion:
mask:
# Sasiad
- 0.436,0,0.421,0.072,0.424,0.124,0.304,0.242,0.295,0.194,0.035,0.497,0.035,0.6,0,0.664,0,0
garaz: garaz:
enabled: True enabled: True
ffmpeg: ffmpeg:
@@ -64,9 +95,20 @@ spec:
- record - record
output_args: output_args:
record: preset-record-generic-audio-copy record: preset-record-generic-audio-copy
motion:
mask:
# Sasiad
- 0.662,0.212,0.569,0.2,0.566,0.149,0.549,0.119,0.532,0.169,0.495,0.14,0.491,0,0.881,0,1,0.154,1,0.221,0.986,0.296,0.94,0.28,0.944,0.178,0.664,0.126
# Drzewo
- 0.087,0.032,0,0.174,0,0.508,0.139,0.226,0.12,0.108
objects:
filters:
person:
# Uparty false positive
mask: 0.739,0.725,0.856,0.76,0.862,0.659,0.746,0.614
ffmpeg: # ffmpeg:
hwaccel_args: preset-vaapi # hwaccel_args: preset-vaapi
detectors: detectors:
ov_0: ov_0:
@@ -97,7 +139,7 @@ spec:
skipuninstall: true skipuninstall: true
config: config:
enabled: true enabled: true
size: 1Gi size: 5Gi
storageClass: mayastor-single-hdd storageClass: mayastor-single-hdd
skipuninstall: true skipuninstall: true
envFromSecrets: envFromSecrets:
@@ -105,9 +147,9 @@ spec:
ingress: ingress:
enabled: true enabled: true
ingressClassName: nginx-ingress
annotations: annotations:
cert-manager.io/cluster-issuer: letsencrypt cert-manager.io/cluster-issuer: letsencrypt
nginx.org/websocket-services: frigate
hosts: hosts:
- host: frigate.lumpiasty.xyz - host: frigate.lumpiasty.xyz
paths: paths:
@@ -122,16 +164,16 @@ spec:
kubernetes.io/hostname: anapistula-delrosalae kubernetes.io/hostname: anapistula-delrosalae
# GPU access # GPU access
extraVolumes: # extraVolumes:
- name: dri # - name: dri
hostPath: # hostPath:
path: /dev/dri/renderD128 # path: /dev/dri/renderD128
type: CharDevice # type: CharDevice
extraVolumeMounts: # extraVolumeMounts:
- name: dri # - name: dri
mountPath: /dev/dri/renderD128 # mountPath: /dev/dri/renderD128
securityContext: # securityContext:
# Not ideal # # Not ideal
privileged: true # privileged: true

View File

@@ -17,7 +17,7 @@ spec:
chart: chart:
spec: spec:
chart: gitea chart: gitea
version: 12.1.1 version: 12.4.0
sourceRef: sourceRef:
kind: HelmRepository kind: HelmRepository
name: gitea-charts name: gitea-charts
@@ -79,8 +79,8 @@ spec:
ssh: ssh:
annotations: annotations:
lbipam.cilium.io/sharing-key: gitea lbipam.cilium.io/sharing-key: gitea
lbipam.cilium.io/sharing-cross-namespace: nginx-ingress-controller lbipam.cilium.io/sharing-cross-namespace: nginx-ingress
lbipam.cilium.io/ips: 10.44.0.0,2001:470:61a3:400::1 lbipam.cilium.io/ips: 10.44.0.6,2001:470:61a3:400::6
type: LoadBalancer type: LoadBalancer
port: 22 port: 22
# Requirement for sharing ip with other service # Requirement for sharing ip with other service
@@ -89,7 +89,7 @@ spec:
ingress: ingress:
enabled: true enabled: true
className: nginx className: nginx-ingress
annotations: annotations:
cert-manager.io/cluster-issuer: letsencrypt cert-manager.io/cluster-issuer: letsencrypt
acme.cert-manager.io/http01-edit-in-place: "true" acme.cert-manager.io/http01-edit-in-place: "true"

View File

@@ -0,0 +1,9 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- volume.yaml
- redis.yaml
- postgres-password.yaml
- postgres-cluster.yaml
- release.yaml

View File

@@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: immich

View File

@@ -0,0 +1,32 @@
---
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: immich-db
namespace: immich
spec:
imageName: ghcr.io/tensorchord/cloudnative-vectorchord:14-0.4.3
instances: 1
storage:
size: 10Gi
storageClass: mayastor-single-hdd
bootstrap:
initdb:
# Defaults of immich chart
database: immich
owner: immich
# We need to create custom role because default one does not allow to set up
# vectorchord extension
managed:
roles:
- name: immich
createdb: true
login: true
superuser: true
# We need to manually create secret
# https://github.com/cloudnative-pg/cloudnative-pg/issues/3788
passwordSecret:
name: immich-db-immich

View File

@@ -0,0 +1,38 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: immich-password
namespace: immich
---
apiVersion: secrets.hashicorp.com/v1beta1
kind: VaultAuth
metadata:
name: immich
namespace: immich
spec:
method: kubernetes
mount: kubernetes
kubernetes:
role: immich
serviceAccount: immich-password
---
apiVersion: secrets.hashicorp.com/v1beta1
kind: VaultStaticSecret
metadata:
name: immich-db
namespace: immich
spec:
type: kv-v2
mount: secret
path: immich-db
destination:
create: true
name: immich-db-immich
type: kubernetes.io/basic-auth
transformation:
excludeRaw: true
vaultAuthRef: immich

29
apps/immich/redis.yaml Normal file
View File

@@ -0,0 +1,29 @@
---
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository
metadata:
name: bitnami
namespace: immich
spec:
interval: 24h
type: "oci"
url: oci://registry-1.docker.io/bitnamicharts/
---
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: redis
namespace: immich
spec:
interval: 30m
chart:
spec:
chart: redis
version: 23.1.3
sourceRef:
kind: HelmRepository
name: bitnami
values:
global:
defaultStorageClass: mayastor-single-hdd
architecture: standalone

69
apps/immich/release.yaml Normal file
View File

@@ -0,0 +1,69 @@
---
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository
metadata:
name: secustor
namespace: immich
spec:
interval: 24h
url: https://secustor.dev/helm-charts
---
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: immich
namespace: immich
spec:
interval: 30m
chart:
spec:
chart: immich
version: 1.0.2
sourceRef:
kind: HelmRepository
name: secustor
values:
common:
config:
vecotrExtension: vectorchord
postgres:
host: immich-db-rw
existingSecret:
enabled: true
secretName: immich-db-immich
usernameKey: username
passwordKey: password
redis:
host: redis-master
existingSecret:
enabled: true
secretName: redis
passwordKey: redis-password
server:
volumeMounts:
- mountPath: /usr/src/app/upload
name: uploads
volumes:
- name: uploads
persistentVolumeClaim:
claimName: library
machineLearning:
enabled: true
ingress:
enabled: true
className: nginx-ingress
annotations:
cert-manager.io/cluster-issuer: letsencrypt
nginx.ingress.kubernetes.io/proxy-body-size: "0"
hosts:
- host: immich.lumpiasty.xyz
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- immich.lumpiasty.xyz
secretName: immich-ingress

13
apps/immich/volume.yaml Normal file
View File

@@ -0,0 +1,13 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: library
namespace: immich
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 150Gi
storageClassName: mayastor-single-hdd

View File

@@ -4,6 +4,9 @@ resources:
- gitea - gitea
- registry - registry
- renovate - renovate
- ollama
- librechat - librechat
- frigate - frigate
- llama
- immich
- nas
- searxng

View File

@@ -2,11 +2,11 @@
apiVersion: source.toolkit.fluxcd.io/v1 apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository kind: HelmRepository
metadata: metadata:
name: bat-librechat name: dynomite567-charts
namespace: librechat namespace: librechat
spec: spec:
interval: 24h interval: 24h
url: https://charts.blue-atlas.de url: https://dynomite567.github.io/helm-charts/
--- ---
apiVersion: helm.toolkit.fluxcd.io/v2 apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease kind: HelmRelease
@@ -18,10 +18,10 @@ spec:
chart: chart:
spec: spec:
chart: librechat chart: librechat
version: 1.8.10 version: 1.9.1
sourceRef: sourceRef:
kind: HelmRepository kind: HelmRepository
name: bat-librechat name: dynomite567-charts
values: values:
global: global:
librechat: librechat:
@@ -39,26 +39,32 @@ spec:
endpoints: endpoints:
custom: custom:
- name: "Ollama" - name: "Llama.cpp"
apiKey: "ollama" apiKey: "llama"
baseURL: "http://ollama.ollama.svc.cluster.local:11434/v1/chat/completions" baseURL: "http://llama.llama.svc.cluster.local:11434/v1"
models: models:
default: [ default: [
"llama2", "DeepSeek-R1-0528-Qwen3-8B-GGUF",
"mistral", "Qwen3-8B-GGUF",
"codellama", "Qwen3-8B-GGUF-no-thinking",
"dolphin-mixtral", "gemma3n-e4b",
"mistral-openorca" "gemma3-12b",
] "gemma3-12b-q2",
# fetching list of models is supported but the `name` field must start "gemma3-12b-novision",
# with `ollama` (case-insensitive), as it does in this example. "gemma3-4b",
fetch: true "gemma3-4b-novision",
"Qwen3-4B-Thinking-2507",
"Qwen3-4B-Thinking-2507-long-ctx",
"Qwen2.5-VL-7B-Instruct-GGUF",
"Qwen2.5-VL-32B-Instruct-GGUF-IQ1_S",
"Qwen2.5-VL-32B-Instruct-GGUF-Q2_K_L"
]
titleConvo: true titleConvo: true
titleModel: "current_model" titleModel: "gemma3-4b-novision"
summarize: false summarize: false
summaryModel: "current_model" summaryModel: "gemma3-4b-novision"
forcePrompt: false forcePrompt: false
modelDisplayLabel: "Ollama" modelDisplayLabel: "Llama.cpp"
imageVolume: imageVolume:
enabled: true enabled: true
size: 10G size: 10G
@@ -66,9 +72,12 @@ spec:
storageClassName: mayastor-single-hdd storageClassName: mayastor-single-hdd
ingress: ingress:
enabled: true enabled: true
className: nginx className: nginx-ingress
annotations: annotations:
cert-manager.io/cluster-issuer: letsencrypt cert-manager.io/cluster-issuer: letsencrypt
nginx.ingress.kubernetes.io/proxy-body-size: "0"
nginx.ingress.kubernetes.io/proxy-buffering: "false"
nginx.ingress.kubernetes.io/proxy-read-timeout: 30m
hosts: hosts:
- host: librechat.lumpiasty.xyz - host: librechat.lumpiasty.xyz
paths: paths:

View File

@@ -2,21 +2,21 @@
apiVersion: apps/v1 apiVersion: apps/v1
kind: Deployment kind: Deployment
metadata: metadata:
name: ollama-proxy name: llama-proxy
namespace: ollama namespace: llama
spec: spec:
replicas: 1 replicas: 1
selector: selector:
matchLabels: matchLabels:
app.kubernetes.io/name: ollama-proxy app.kubernetes.io/name: llama-proxy
template: template:
metadata: metadata:
labels: labels:
app.kubernetes.io/name: ollama-proxy app.kubernetes.io/name: llama-proxy
spec: spec:
containers: containers:
- name: caddy - name: caddy
image: caddy:2.10.0-alpine image: caddy:2.10.2-alpine
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
volumeMounts: volumeMounts:
- mountPath: /etc/caddy - mountPath: /etc/caddy
@@ -25,21 +25,21 @@ spec:
- name: API_KEY - name: API_KEY
valueFrom: valueFrom:
secretKeyRef: secretKeyRef:
name: ollama-api-key name: llama-api-key
key: API_KEY key: API_KEY
volumes: volumes:
- name: proxy-config - name: proxy-config
configMap: configMap:
name: ollama-proxy-config name: llama-proxy-config
--- ---
apiVersion: v1 apiVersion: v1
kind: ConfigMap kind: ConfigMap
metadata: metadata:
namespace: ollama namespace: llama
name: ollama-proxy-config name: llama-proxy-config
data: data:
Caddyfile: | Caddyfile: |
http://ollama.lumpiasty.xyz { http://llama.lumpiasty.xyz {
@requireAuth { @requireAuth {
not header Authorization "Bearer {env.API_KEY}" not header Authorization "Bearer {env.API_KEY}"
@@ -47,7 +47,7 @@ data:
respond @requireAuth "Unauthorized" 401 respond @requireAuth "Unauthorized" 401
reverse_proxy ollama:11434 { reverse_proxy llama:11434 {
flush_interval -1 flush_interval -1
} }
} }
@@ -55,12 +55,12 @@ data:
apiVersion: v1 apiVersion: v1
kind: Service kind: Service
metadata: metadata:
namespace: ollama namespace: llama
name: ollama-proxy name: llama-proxy
spec: spec:
type: ClusterIP type: ClusterIP
selector: selector:
app.kubernetes.io/name: ollama-proxy app.kubernetes.io/name: llama-proxy
ports: ports:
- name: http - name: http
port: 80 port: 80

View File

@@ -0,0 +1,216 @@
healthCheckTimeout: 600
models:
"DeepSeek-R1-0528-Qwen3-8B-GGUF":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/DeepSeek-R1-0528-Qwen3-8B-GGUF:Q4_K_M
-ngl 37 -c 16384
--no-warmup
--port ${PORT}
"Qwen3-8B-GGUF":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/Qwen3-8B-GGUF:Q4_K_M
-ngl 37 -c 16384
--no-warmup
--port ${PORT}
"Qwen3-8B-GGUF-no-thinking":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/Qwen3-8B-GGUF:Q4_K_M
-ngl 37 -c 16384
--jinja --chat-template-file /config/qwen_nothink_chat_template.jinja
--no-warmup
--port ${PORT}
"gemma3n-e4b":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/gemma-3n-E4B-it-GGUF:UD-Q4_K_XL
--ctx-size 16384
--n-gpu-layers 99
--seed 3407
--prio 2
--temp 1.0
--repeat-penalty 1.0
--min-p 0.00
--top-k 64
--top-p 0.95
--no-warmup
--port ${PORT}
"gemma3-12b":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/gemma-3-12b-it-GGUF:Q4_K_M
--ctx-size 16384
--n-gpu-layers 99
--prio 2
--temp 1.0
--repeat-penalty 1.0
--min-p 0.00
--top-k 64
--top-p 0.95
--no-warmup
--port ${PORT}
"gemma3-12b-novision":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/gemma-3-12b-it-GGUF:Q4_K_M
--ctx-size 16384
--n-gpu-layers 99
--prio 2
--temp 1.0
--repeat-penalty 1.0
--min-p 0.00
--top-k 64
--top-p 0.95
--no-mmproj
--no-warmup
--port ${PORT}
"gemma3-12b-q2":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/gemma-3-12b-it-GGUF:Q2_K_L
--ctx-size 16384
--n-gpu-layers 99
--prio 2
--temp 1.0
--repeat-penalty 1.0
--min-p 0.00
--top-k 64
--top-p 0.95
--no-warmup
--port ${PORT}
"gemma3-4b":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/gemma-3-4b-it-GGUF:Q4_K_M
--ctx-size 16384
--n-gpu-layers 99
--prio 2
--temp 1.0
--repeat-penalty 1.0
--min-p 0.00
--top-k 64
--top-p 0.95
--no-warmup
--port ${PORT}
"gemma3-4b-novision":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/gemma-3-4b-it-GGUF:Q4_K_M
--ctx-size 16384
--n-gpu-layers 99
--prio 2
--temp 1.0
--repeat-penalty 1.0
--min-p 0.00
--top-k 64
--top-p 0.95
--no-mmproj
--no-warmup
--port ${PORT}
"Qwen3-4B-Thinking-2507":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/Qwen3-4B-Thinking-2507-GGUF:Q4_K_M
-ngl 99 -c 16384 --predict 8192
--temp 0.6
--min-p 0.00
--top-p 0.95
--top-k 20
--repeat-penalty 1.0
--no-warmup
--port ${PORT}
"Qwen3-4B-Thinking-2507-long-ctx":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/Qwen3-4B-Thinking-2507-GGUF:Q4_K_M
-ngl 99 -c 262144 --predict 81920
--temp 0.6
--min-p 0.00
--top-p 0.95
--top-k 20
--repeat-penalty 1.0
--no-warmup
--flash-attn
--cache-type-k q8_0 --cache-type-v q8_0
--port ${PORT}
"Qwen3-4B-Instruct-2507":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/Qwen3-4B-Instruct-2507-GGUF:Q4_K_M
-ngl 99 -c 16384 --predict 8192
--temp 0.7
--min-p 0.00
--top-p 0.8
--top-k 20
--repeat-penalty 1.0
--no-warmup
--port ${PORT}
"Qwen3-4B-Instruct-2507-long-ctx":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/Qwen3-4B-Instruct-2507-GGUF:Q4_K_M
-ngl 99 -c 262144 --predict 81920
--temp 0.7
--min-p 0.00
--top-p 0.8
--top-k 20
--repeat-penalty 1.0
--no-warmup
--flash-attn
--cache-type-k q8_0 --cache-type-v q8_0
--port ${PORT}
"Qwen2.5-VL-32B-Instruct-GGUF-IQ1_S":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/Qwen2.5-VL-32B-Instruct-GGUF:IQ1_S
-ngl 99 -c 16384 --predict 8192
--temp 0.7
--min-p 0.00
--top-p 0.8
--top-k 20
--repeat-penalty 1.0
--no-warmup
--port ${PORT}
"Qwen2.5-VL-32B-Instruct-GGUF-Q2_K_L":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/Qwen2.5-VL-32B-Instruct-GGUF:Q2_K_L
-ngl 99 -c 16384 --predict 8192
--temp 0.7
--min-p 0.00
--top-p 0.8
--top-k 20
--repeat-penalty 1.0
--no-warmup
--port ${PORT}
"Qwen2.5-VL-7B-Instruct-GGUF":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/Qwen2.5-VL-7B-Instruct-GGUF:Q4_K_M
-ngl 37 -c 16384 --predict 8192
--temp 0.7
--min-p 0.00
--top-p 0.8
--top-k 20
--repeat-penalty 1.0
--no-warmup
--port ${PORT}

View File

@@ -0,0 +1,101 @@
{%- if not add_generation_prompt is defined %}
{%- set add_generation_prompt = false %}
{%- endif %}
{%- set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='', is_first_sp=true, is_last_user=false) %}
{%- for message in messages %}
{%- if message['role'] == 'system' %}
{%- if ns.is_first_sp %}
{%- set ns.system_prompt = ns.system_prompt + message['content'] %}
{%- set ns.is_first_sp = false %}
{%- else %}
{%- set ns.system_prompt = ns.system_prompt + '\n\n' + message['content'] %}
{%- endif %}
{%- endif %}
{%- endfor %}
{#- Adapted from https://github.com/sgl-project/sglang/blob/main/examples/chat_template/tool_chat_template_deepseekr1.jinja #}
{%- if tools is defined and tools is not none %}
{%- set tool_ns = namespace(text='You are a helpful assistant with tool calling capabilities. ' + 'When a tool call is needed, you MUST use the following format to issue the call:\n' + '<tool▁calls▁begin><tool▁call▁begin>function<tool▁sep>FUNCTION_NAME\n' + '```json\n{"param1": "value1", "param2": "value2"}\n```<tool▁call▁end><tool▁calls▁end>\n\n' + 'Make sure the JSON is valid.' + '## Tools\n\n### Function\n\nYou have the following functions available:\n\n') %}
{%- for tool in tools %}
{%- set tool_ns.text = tool_ns.text + '\n```json\n' + (tool | tojson) + '\n```\n' %}
{%- endfor %}
{%- if ns.system_prompt|length != 0 %}
{%- set ns.system_prompt = ns.system_prompt + '\n\n' + tool_ns.text %}
{%- else %}
{%- set ns.system_prompt = tool_ns.text %}
{%- endif %}
{%- endif %}
{{- bos_token }}
{{- '/no_think' + ns.system_prompt }}
{%- set last_index = (messages|length - 1) %}
{%- for message in messages %}
{%- set content = message['content'] %}
{%- if message['role'] == 'user' %}
{%- set ns.is_tool = false -%}
{%- set ns.is_first = false -%}
{%- set ns.is_last_user = true -%}
{%- if loop.index0 == last_index %}
{{- '<User>' + content }}
{%- else %}
{{- '<User>' + content + '<Assistant>'}}
{%- endif %}
{%- endif %}
{%- if message['role'] == 'assistant' %}
{%- if '</think>' in content %}
{%- set content = (content.split('</think>')|last) %}
{%- endif %}
{%- endif %}
{%- if message['role'] == 'assistant' and message['tool_calls'] is defined and message['tool_calls'] is not none %}
{%- set ns.is_last_user = false -%}
{%- if ns.is_tool %}
{{- '<tool▁outputs▁end>'}}
{%- endif %}
{%- set ns.is_first = false %}
{%- set ns.is_tool = false -%}
{%- set ns.is_output_first = true %}
{%- for tool in message['tool_calls'] %}
{%- set arguments = tool['function']['arguments'] %}
{%- if arguments is not string %}
{%- set arguments = arguments|tojson %}
{%- endif %}
{%- if not ns.is_first %}
{%- if content is none %}
{{- '<tool▁calls▁begin><tool▁call▁begin>' + tool['type'] + '<tool▁sep>' + tool['function']['name'] + '\n' + '```json' + '\n' + arguments + '\n' + '```' + '<tool▁call▁end>'}}
}
{%- else %}
{{- content + '<tool▁calls▁begin><tool▁call▁begin>' + tool['type'] + '<tool▁sep>' + tool['function']['name'] + '\n' + '```json' + '\n' + arguments + '\n' + '```' + '<tool▁call▁end>'}}
{%- endif %}
{%- set ns.is_first = true -%}
{%- else %}
{{- '\n' + '<tool▁call▁begin>' + tool['type'] + '<tool▁sep>' + tool['function']['name'] + '\n' + '```json' + '\n' + arguments + '\n' + '```' + '<tool▁call▁end>'}}
{%- endif %}
{%- endfor %}
{{- '<tool▁calls▁end><end▁of▁sentence>'}}
{%- endif %}
{%- if message['role'] == 'assistant' and (message['tool_calls'] is not defined or message['tool_calls'] is none) %}
{%- set ns.is_last_user = false -%}
{%- if ns.is_tool %}
{{- '<tool▁outputs▁end>' + content + '<end▁of▁sentence>'}}
{%- set ns.is_tool = false -%}
{%- else %}
{{- content + '<end▁of▁sentence>'}}
{%- endif %}
{%- endif %}
{%- if message['role'] == 'tool' %}
{%- set ns.is_last_user = false -%}
{%- set ns.is_tool = true -%}
{%- if ns.is_output_first %}
{{- '<tool▁outputs▁begin><tool▁output▁begin>' + content + '<tool▁output▁end>'}}
{%- set ns.is_output_first = false %}
{%- else %}
{{- '\n<tool▁output▁begin>' + content + '<tool▁output▁end>'}}
{%- endif %}
{%- endif %}
{%- endfor -%}
{%- if ns.is_tool %}
{{- '<tool▁outputs▁end>'}}
{%- endif %}
{#- if add_generation_prompt and not ns.is_last_user and not ns.is_tool #}
{%- if add_generation_prompt and not ns.is_tool %}
{{- '<Assistant>'}}
{%- endif %}

View File

@@ -0,0 +1,70 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: llama-swap
namespace: llama
spec:
replicas: 1
selector:
matchLabels:
app: llama-swap
template:
metadata:
labels:
app: llama-swap
spec:
containers:
- name: llama-swap
image: ghcr.io/mostlygeek/llama-swap:v166-vulkan-b6795
imagePullPolicy: IfNotPresent
command:
- /app/llama-swap
args:
- --config=/config/config.yaml
- --watch-config
ports:
- containerPort: 8080
name: http
protocol: TCP
volumeMounts:
- name: models
mountPath: /root/.cache
- mountPath: /dev/kfd
name: kfd
- mountPath: /dev/dri
name: dri
- mountPath: /config
name: config
securityContext:
privileged: true
volumes:
- name: models
persistentVolumeClaim:
claimName: llama-models
- name: kfd
hostPath:
path: /dev/kfd
type: CharDevice
- name: dri
hostPath:
path: /dev/dri
type: Directory
- name: config
configMap:
name: llama-swap
---
apiVersion: v1
kind: Service
metadata:
name: llama
namespace: llama
spec:
type: ClusterIP
ports:
- name: http
port: 11434
targetPort: 8080
protocol: TCP
selector:
app: llama-swap

View File

@@ -2,27 +2,27 @@
apiVersion: networking.k8s.io/v1 apiVersion: networking.k8s.io/v1
kind: Ingress kind: Ingress
metadata: metadata:
namespace: ollama namespace: llama
name: ollama name: llama
annotations: annotations:
cert-manager.io/cluster-issuer: letsencrypt cert-manager.io/cluster-issuer: letsencrypt
acme.cert-manager.io/http01-edit-in-place: "true" acme.cert-manager.io/http01-edit-in-place: "true"
nginx.ingress.kubernetes.io/proxy-buffering: "false" nginx.ingress.kubernetes.io/proxy-buffering: "false"
nginx.org/proxy-read-timeout: 30m nginx.ingress.kubernetes.io/proxy-read-timeout: 30m
spec: spec:
ingressClassName: nginx ingressClassName: nginx-ingress
rules: rules:
- host: ollama.lumpiasty.xyz - host: llama.lumpiasty.xyz
http: http:
paths: paths:
- backend: - backend:
service: service:
name: ollama-proxy name: llama-proxy
port: port:
number: 80 number: 80
path: / path: /
pathType: Prefix pathType: Prefix
tls: tls:
- hosts: - hosts:
- ollama.lumpiasty.xyz - llama.lumpiasty.xyz
secretName: ollama-ingress secretName: llama-ingress

View File

@@ -0,0 +1,15 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- secret.yaml
- auth-proxy.yaml
- ingress.yaml
- pvc.yaml
- deployment.yaml
configMapGenerator:
- name: llama-swap
namespace: llama
files:
- config.yaml=configs/config.yaml
- qwen_nothink_chat_template.jinja=configs/qwen_nothink_chat_template.jinja

View File

@@ -2,4 +2,4 @@
apiVersion: v1 apiVersion: v1
kind: Namespace kind: Namespace
metadata: metadata:
name: ollama name: llama

13
apps/llama/pvc.yaml Normal file
View File

@@ -0,0 +1,13 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
namespace: llama
name: llama-models
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 200Gi
storageClassName: mayastor-single-ssd

View File

@@ -2,26 +2,26 @@
apiVersion: v1 apiVersion: v1
kind: ServiceAccount kind: ServiceAccount
metadata: metadata:
name: ollama-proxy name: llama-proxy
namespace: ollama namespace: llama
--- ---
apiVersion: secrets.hashicorp.com/v1beta1 apiVersion: secrets.hashicorp.com/v1beta1
kind: VaultAuth kind: VaultAuth
metadata: metadata:
name: ollama name: llama
namespace: ollama namespace: llama
spec: spec:
method: kubernetes method: kubernetes
mount: kubernetes mount: kubernetes
kubernetes: kubernetes:
role: ollama-proxy role: llama-proxy
serviceAccount: ollama-proxy serviceAccount: llama-proxy
--- ---
apiVersion: secrets.hashicorp.com/v1beta1 apiVersion: secrets.hashicorp.com/v1beta1
kind: VaultStaticSecret kind: VaultStaticSecret
metadata: metadata:
name: ollama-api-key name: llama-api-key
namespace: ollama namespace: llama
spec: spec:
type: kv-v2 type: kv-v2
@@ -30,9 +30,9 @@ spec:
destination: destination:
create: true create: true
name: ollama-api-key name: llama-api-key
type: Opaque type: Opaque
transformation: transformation:
excludeRaw: true excludeRaw: true
vaultAuthRef: ollama vaultAuthRef: llama

14
apps/nas/configmap.yaml Normal file
View File

@@ -0,0 +1,14 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: nas-sshd-config
namespace: nas
data:
00-chroot.conf: |
Subsystem sftp internal-sftp
Match User nas
ChrootDirectory /config
ForceCommand internal-sftp -d /data
AllowTcpForwarding no
X11Forwarding no
PermitTunnel no

94
apps/nas/deployment.yaml Normal file
View File

@@ -0,0 +1,94 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: nas-ssh
namespace: nas
spec:
replicas: 1
selector:
matchLabels:
app: nas-ssh
template:
metadata:
labels:
app: nas-ssh
spec:
securityContext:
fsGroup: 1000
initContainers:
- name: prepare-config
image: alpine:3.20.3
imagePullPolicy: IfNotPresent
command:
- /bin/sh
- -c
- |
set -euo pipefail
chown root:root /config
chmod 755 /config
mkdir -p /config/data
chown 1000:1000 /config/data
chmod 750 /config/data
mkdir -p /config/ssh_host_keys
chown root:root /config/ssh_host_keys
chmod 700 /config/ssh_host_keys
for key in /config/ssh_host_keys/*; do
[ -f "$key" ] || continue
chown root:root "$key"
chmod 600 "$key"
done
mkdir -p /config/sshd/sshd_config.d
cp /defaults/00-chroot.conf /config/sshd/sshd_config.d/00-chroot.conf
chown root:root /config/sshd/sshd_config.d/00-chroot.conf
chmod 644 /config/sshd/sshd_config.d/00-chroot.conf
volumeMounts:
- name: data
mountPath: /config
- name: sshd-config
mountPath: /defaults/00-chroot.conf
subPath: 00-chroot.conf
readOnly: true
containers:
- name: ssh
image: lscr.io/linuxserver/openssh-server:version-10.0_p1-r9
imagePullPolicy: IfNotPresent
env:
- name: PUID
value: "1000"
- name: PGID
value: "1000"
- name: TZ
value: Etc/UTC
- name: USER_NAME
value: nas
- name: SUDO_ACCESS
value: "false"
- name: PASSWORD_ACCESS
value: "false"
- name: LOG_STDOUT
value: "true"
- name: PUBLIC_KEY
valueFrom:
secretKeyRef:
name: nas-ssh-authorized-keys
key: public_key
ports:
- containerPort: 2222
name: ssh
protocol: TCP
volumeMounts:
- name: data
mountPath: /config
resources:
requests:
cpu: 50m
memory: 128Mi
limits:
memory: 512Mi
volumes:
- name: data
persistentVolumeClaim:
claimName: nas-data
- name: sshd-config
configMap:
name: nas-sshd-config

View File

@@ -2,7 +2,8 @@ apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization kind: Kustomization
resources: resources:
- namespace.yaml - namespace.yaml
- release.yaml
- secret.yaml - secret.yaml
- auth-proxy.yaml - configmap.yaml
- ingress.yaml - pvc.yaml
- deployment.yaml
- service.yaml

4
apps/nas/namespace.yaml Normal file
View File

@@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: nas

12
apps/nas/pvc.yaml Normal file
View File

@@ -0,0 +1,12 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nas-data
namespace: nas
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 500Gi
storageClassName: mayastor-single-hdd

9
apps/nas/secret.yaml Normal file
View File

@@ -0,0 +1,9 @@
apiVersion: v1
kind: Secret
metadata:
name: nas-ssh-authorized-keys
namespace: nas
type: Opaque
stringData:
public_key: |
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCresbDFZijI+rZMgd3LdciPjpb4x4S5B7y0U+EoYPaz6hILT72fyz3QdcgKJJv8JUJI6g0811/yFRuOzCXgWaA922c/S/t6HMUrorh7mPVQMTN2dc/SVBvMa7S2M9NYBj6z1X2LRHs+g1JTMCtL202PIjes/E9qu0as0Vx6n/6HHNmtmA9LrpiAmurbeKXDmrYe2yWg/FA6cX5d86SJb21Dj8WqdCd3Hz0Pi6FzMKXhpWvs5Hfei1htsjsRzCxkpSTjlgFEFVfmHIXPfB06Sa6aCnkxAFnE7N+xNa9RIWeZmOXdA74LsfSKQ9eAXSrsC/IRxo2ce8cBzXJy+Itxw24fUqGYXBiCgx8i3ZA9IdwI1u71xYo9lyNjav5VykzKnAHRAYnDm9UsCf8k04reBevcLdtxL11vPCtind3xn76Nhy2b45dcp/MdYFANGsCcXJOMb6Aisb03HPGhs/aU3tCAQbTVe195mL9FWhGqIK2wBmF1SKW+4ssX2bIU6YaCYc= cardno:23_671_999

15
apps/nas/service.yaml Normal file
View File

@@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
name: nas-ssh
namespace: nas
spec:
type: LoadBalancer
externalTrafficPolicy: Cluster
ports:
- name: ssh
port: 22
targetPort: 2222
protocol: TCP
selector:
app: nas-ssh

View File

@@ -1,60 +0,0 @@
---
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository
metadata:
name: ollama-helm
namespace: ollama
spec:
interval: 24h
url: https://otwld.github.io/ollama-helm/
---
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: ollama
namespace: ollama
spec:
interval: 30m
chart:
spec:
chart: ollama
version: 1.21.0
sourceRef:
kind: HelmRepository
name: ollama-helm
namespace: ollama
interval: 12h
values:
ollama:
gpu:
enabled: false
persistentVolume:
enabled: true
storageClass: mayastor-single-hdd
size: 200Gi
# GPU support
# Rewrite of options in
# https://hub.docker.com/r/grinco/ollama-amd-apu
image:
repository: grinco/ollama-amd-apu
tag: vulkan
securityContext:
# Not ideal
privileged: true
capabilities:
add:
- PERFMON
volumeMounts:
- name: kfd
mountPath: /dev/kfd
- name: dri
mountPath: /dev/dri
volumes:
- name: kfd
hostPath:
path: /dev/kfd
type: CharDevice
- name: dri
hostPath:
path: /dev/dri
type: Directory

View File

@@ -6,9 +6,9 @@ metadata:
name: registry name: registry
annotations: annotations:
cert-manager.io/cluster-issuer: letsencrypt cert-manager.io/cluster-issuer: letsencrypt
nginx.org/client-max-body-size: "0" nginx.ingress.kubernetes.io/proxy-body-size: "0"
spec: spec:
ingressClassName: nginx ingressClassName: nginx-ingress
rules: rules:
- host: registry.lumpiasty.xyz - host: registry.lumpiasty.xyz
http: http:

View File

@@ -5,7 +5,7 @@ metadata:
name: renovate name: renovate
namespace: renovate namespace: renovate
spec: spec:
schedule: "@hourly" schedule: "@daily"
concurrencyPolicy: Forbid concurrencyPolicy: Forbid
jobTemplate: jobTemplate:
spec: spec:
@@ -15,7 +15,7 @@ spec:
- name: renovate - name: renovate
# Update this to the latest available and then enable Renovate on # Update this to the latest available and then enable Renovate on
# the manifest # the manifest
image: renovate/renovate:41.20.2-full image: renovate/renovate:41.169.1-full
envFrom: envFrom:
- secretRef: - secretRef:
name: renovate-gitea-token name: renovate-gitea-token

View File

@@ -0,0 +1 @@
use_default_settings: true

View File

@@ -0,0 +1,42 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: searxng
namespace: searxng
spec:
replicas: 1
selector:
matchLabels:
app: searxng
template:
metadata:
labels:
app: searxng
spec:
containers:
- name: searxng
image: searxng/searxng:2025.8.12-6b1516d
ports:
- containerPort: 8080
env:
- name: SEARXNG_SECRET
valueFrom:
secretKeyRef:
name: searxng-secret
key: SEARXNG_SECRET
optional: false
volumeMounts:
- name: config-volume
mountPath: /etc/searxng/settings.yml
subPath: settings.yml
readOnly: true
- name: searxng-persistent-data
mountPath: /var/cache/searxng
volumes:
- name: config-volume
configMap:
name: searxng-config
- name: searxng-persistent-data
persistentVolumeClaim:
claimName: searxng-persistent-data

25
apps/searxng/ingress.yaml Normal file
View File

@@ -0,0 +1,25 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
namespace: searxng
name: searxng
annotations:
cert-manager.io/cluster-issuer: letsencrypt
spec:
ingressClassName: nginx-ingress
rules:
- host: searxng.lumpiasty.xyz
http:
paths:
- backend:
service:
name: searxng
port:
number: 8080
path: /
pathType: Prefix
tls:
- hosts:
- searxng.lumpiasty.xyz
secretName: searxng-ingress

View File

@@ -0,0 +1,13 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- pvc.yaml
- deployment.yaml
- service.yaml
- ingress.yaml
configMapGenerator:
- name: searxng-config
namespace: searxng
files:
- settings.yml=configs/settings.yml

View File

@@ -0,0 +1,5 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: searxng

13
apps/searxng/pvc.yaml Normal file
View File

@@ -0,0 +1,13 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
namespace: searxng
name: searxng-persistent-data
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
storageClassName: mayastor-single-ssd

14
apps/searxng/service.yaml Normal file
View File

@@ -0,0 +1,14 @@
---
apiVersion: v1
kind: Service
metadata:
name: searxng
namespace: searxng
spec:
selector:
app: searxng
ports:
- protocol: TCP
port: 8080
targetPort: 8080
type: ClusterIP

24
flake.lock generated
View File

@@ -19,11 +19,11 @@
"systems": "systems" "systems": "systems"
}, },
"locked": { "locked": {
"lastModified": 1710146030, "lastModified": 1731533236,
"narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=", "narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
"owner": "numtide", "owner": "numtide",
"repo": "flake-utils", "repo": "flake-utils",
"rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a", "rev": "11707dc2f618dd54ca8739b309ec4fc024de578b",
"type": "github" "type": "github"
}, },
"original": { "original": {
@@ -42,11 +42,11 @@
"treefmt-nix": "treefmt-nix" "treefmt-nix": "treefmt-nix"
}, },
"locked": { "locked": {
"lastModified": 1738540903, "lastModified": 1751765453,
"narHash": "sha256-/C5RTu3yCpVFHIL7u3hL9ZRGrXmIrLg3iB4+z9A3E8A=", "narHash": "sha256-tgo3BwFM2UUYQz6dVARztbj5AjKfz4exlPxnKLS/ZRg=",
"owner": "a1994sc", "owner": "a1994sc",
"repo": "krew2nix", "repo": "krew2nix",
"rev": "5bc50d65d6496ad30f897a9fe5532f440fb143ef", "rev": "11f66e65a0146645388eeab68b6212de0b732ed9",
"type": "github" "type": "github"
}, },
"original": { "original": {
@@ -57,11 +57,11 @@
}, },
"nixpkgs": { "nixpkgs": {
"locked": { "locked": {
"lastModified": 1742069588, "lastModified": 1751637120,
"narHash": "sha256-C7jVfohcGzdZRF6DO+ybyG/sqpo1h6bZi9T56sxLy+k=", "narHash": "sha256-xVNy/XopSfIG9c46nRmPaKfH1Gn/56vQ8++xWA8itO4=",
"owner": "NixOS", "owner": "NixOS",
"repo": "nixpkgs", "repo": "nixpkgs",
"rev": "c80f6a7e10b39afcc1894e02ef785b1ad0b0d7e5", "rev": "5c724ed1388e53cc231ed98330a60eb2f7be4be3",
"type": "github" "type": "github"
}, },
"original": { "original": {
@@ -116,11 +116,11 @@
] ]
}, },
"locked": { "locked": {
"lastModified": 1715940852, "lastModified": 1750931469,
"narHash": "sha256-wJqHMg/K6X3JGAE9YLM0LsuKrKb4XiBeVaoeMNlReZg=", "narHash": "sha256-0IEdQB1nS+uViQw4k3VGUXntjkDp7aAlqcxdewb/hAc=",
"owner": "numtide", "owner": "numtide",
"repo": "treefmt-nix", "repo": "treefmt-nix",
"rev": "2fba33a182602b9d49f0b2440513e5ee091d838b", "rev": "ac8e6f32e11e9c7f153823abc3ab007f2a65d3e1",
"type": "github" "type": "github"
}, },
"original": { "original": {

View File

@@ -0,0 +1,18 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: mayastor-single-ssd
parameters:
protocol: nvmf
# Single replica
repl: "1"
# Thin provision volumes
thin: "true"
# Generate new filesystem's uuid when cloning
cloneFsIdAsVolumeId: "true"
# Schedule this sconly on ssd
poolAffinityTopologyLabel: |
type: ssd
provisioner: io.openebs.csi-mayastor
# Allow expansion of volumes
allowVolumeExpansion: true

View File

@@ -18,14 +18,14 @@ spec:
chart: chart:
spec: spec:
chart: cert-manager-webhook-ovh chart: cert-manager-webhook-ovh
version: 0.7.5 version: 0.8.0
sourceRef: sourceRef:
kind: HelmRepository kind: HelmRepository
name: cert-manager-webhook-ovh name: cert-manager-webhook-ovh
namespace: cert-manager namespace: cert-manager
interval: 12h interval: 12h
values: values:
configVersion: 0.0.1 configVersion: 0.0.2
groupName: lumpiasty-homelab groupName: lumpiasty-homelab
certManager: certManager:
namespace: cert-manager namespace: cert-manager
@@ -38,6 +38,7 @@ spec:
acmeServerUrl: https://acme-v02.api.letsencrypt.org/directory acmeServerUrl: https://acme-v02.api.letsencrypt.org/directory
email: arek.dzski@gmail.com email: arek.dzski@gmail.com
ovhEndpointName: ovh-eu ovhEndpointName: ovh-eu
ovhAuthenticationMethod: application
ovhAuthenticationRef: ovhAuthenticationRef:
applicationKeyRef: applicationKeyRef:
name: ovh-credentials name: ovh-credentials
@@ -45,6 +46,6 @@ spec:
applicationSecretRef: applicationSecretRef:
name: ovh-credentials name: ovh-credentials
key: applicationSecret key: applicationSecret
consumerKeyRef: applicationConsumerKeyRef:
name: ovh-credentials name: ovh-credentials
key: consumerKey key: consumerKey

View File

@@ -23,7 +23,7 @@ spec:
chart: chart:
spec: spec:
chart: cert-manager chart: cert-manager
version: v1.18.1 version: v1.19.1
sourceRef: sourceRef:
kind: HelmRepository kind: HelmRepository
name: cert-manager name: cert-manager

View File

@@ -23,7 +23,7 @@ spec:
chart: chart:
spec: spec:
chart: cilium chart: cilium
version: 1.17.5 version: 1.18.2
sourceRef: sourceRef:
kind: HelmRepository kind: HelmRepository
name: cilium name: cilium

View File

@@ -23,7 +23,7 @@ spec:
chart: chart:
spec: spec:
chart: cloudnative-pg chart: cloudnative-pg
version: 0.24.0 version: 0.26.0
sourceRef: sourceRef:
kind: HelmRepository kind: HelmRepository
name: cnpg name: cnpg

View File

@@ -97,7 +97,7 @@ spec:
env: env:
- name: GOMEMLIMIT - name: GOMEMLIMIT
value: 161MiB value: 161MiB
image: registry.k8s.io/coredns/coredns:v1.12.2 image: registry.k8s.io/coredns/coredns:v1.13.0
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
livenessProbe: livenessProbe:
failureThreshold: 5 failureThreshold: 5

View File

@@ -23,7 +23,7 @@ spec:
chart: chart:
spec: spec:
chart: k8up chart: k8up
version: 4.8.4 version: 4.8.6
sourceRef: sourceRef:
kind: HelmRepository kind: HelmRepository
name: k8up-io name: k8up-io

View File

@@ -2,32 +2,32 @@
apiVersion: v1 apiVersion: v1
kind: Namespace kind: Namespace
metadata: metadata:
name: nginx-ingress-controller name: nginx-ingress
--- ---
apiVersion: source.toolkit.fluxcd.io/v1 apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository kind: HelmRepository
metadata: metadata:
name: nginx name: ingress-nginx
namespace: nginx-ingress-controller namespace: nginx-ingress
spec: spec:
interval: 24h interval: 24h
url: https://helm.nginx.com/stable url: https://kubernetes.github.io/ingress-nginx
--- ---
apiVersion: helm.toolkit.fluxcd.io/v2 apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease kind: HelmRelease
metadata: metadata:
name: nginx-ingress name: nginx-ingress
namespace: nginx-ingress-controller namespace: nginx-ingress
spec: spec:
interval: 30m interval: 30m
chart: chart:
spec: spec:
chart: nginx-ingress chart: ingress-nginx
version: 2.1.0 version: 4.13.3
sourceRef: sourceRef:
kind: HelmRepository kind: HelmRepository
name: nginx name: ingress-nginx
namespace: nginx-ingress-controller namespace: nginx-ingress
interval: 12h interval: 12h
values: values:
controller: controller:
@@ -39,9 +39,11 @@ spec:
cpu: 100m cpu: 100m
memory: 128Mi memory: 128Mi
ingressClass: ingressClass: "nginx-ingress"
create: true ingressClassResource:
setAsDefaultIngress: true name: "nginx-ingress"
enabled: true
default: false
service: service:
create: true create: true
@@ -49,11 +51,11 @@ spec:
# Requirement for sharing ip with other service # Requirement for sharing ip with other service
externalTrafficPolicy: Cluster externalTrafficPolicy: Cluster
ipFamilyPolicy: RequireDualStack ipFamilyPolicy: RequireDualStack
ipFamilies:
- IPv4
- IPv6
annotations: annotations:
# Share IP with gitea ssh so we can have the same domain for both port # Share IP with gitea ssh so we can have the same domain for both port
lbipam.cilium.io/sharing-key: gitea lbipam.cilium.io/sharing-key: gitea
lbipam.cilium.io/sharing-cross-namespace: gitea lbipam.cilium.io/sharing-cross-namespace: gitea
lbipam.cilium.io/ips: 10.44.0.0,2001:470:61a3:400::1 lbipam.cilium.io/ips: 10.44.0.6,2001:470:61a3:400::6
config:
entries:
proxy-buffering: "false"

View File

@@ -23,7 +23,7 @@ spec:
chart: chart:
spec: spec:
chart: openbao chart: openbao
version: 0.16.1 version: 0.19.0
sourceRef: sourceRef:
kind: HelmRepository kind: HelmRepository
name: openbao name: openbao

View File

@@ -23,7 +23,7 @@ spec:
chart: chart:
spec: spec:
chart: openebs chart: openebs
version: 4.3.2 version: 4.3.3
sourceRef: sourceRef:
kind: HelmRepository kind: HelmRepository
name: openebs name: openebs

View File

@@ -23,7 +23,7 @@ spec:
chart: chart:
spec: spec:
chart: vault-secrets-operator chart: vault-secrets-operator
version: 0.10.0 version: 1.0.1
sourceRef: sourceRef:
kind: HelmRepository kind: HelmRepository
name: hashicorp name: hashicorp

View File

@@ -0,0 +1,11 @@
apiVersion: "openebs.io/v1beta3"
kind: DiskPool
metadata:
name: anapistula-delrosalae-ssd
namespace: openebs
spec:
node: anapistula-delrosalae
disks: ["aio:///dev/disk/by-id/nvme-eui.000000000000000000a07501ead1ebdb"]
topology:
labelled:
type: ssd

View File

@@ -3,7 +3,7 @@ kind: Kustomization
resources: resources:
- controllers/k8up-crd-4.8.3.yaml - controllers/k8up-crd-4.8.3.yaml
- controllers/cilium.yaml - controllers/cilium.yaml
- controllers/nginx.yaml - controllers/nginx-ingress.yaml
- controllers/dns-public.yaml - controllers/dns-public.yaml
- controllers/cert-manager.yaml - controllers/cert-manager.yaml
- controllers/cert-manager-webhook-ovh.yaml - controllers/cert-manager-webhook-ovh.yaml
@@ -15,9 +15,11 @@ resources:
- controllers/mongodb-operator.yaml - controllers/mongodb-operator.yaml
- controllers/cloudnative-pg.yaml - controllers/cloudnative-pg.yaml
- diskpools/anapistula-delrosalae-hdd.yaml - diskpools/anapistula-delrosalae-hdd.yaml
- diskpools/anapistula-delrosalae-ssd.yaml
- configs/bgp-cluster-config.yaml - configs/bgp-cluster-config.yaml
- configs/loadbalancer-ippool.yaml - configs/loadbalancer-ippool.yaml
- configs/single-hdd-sc.yaml - configs/single-hdd-sc.yaml
- configs/single-ssd-sc.yaml
- configs/mayastor-snapshotclass.yaml - configs/mayastor-snapshotclass.yaml
- configs/openbao-cert.yaml - configs/openbao-cert.yaml
- configs/ovh-cert-manager-secret.yaml - configs/ovh-cert-manager-secret.yaml

View File

@@ -10,14 +10,14 @@ machine:
gateway: 2001:470:61a3:100:ffff:ffff:ffff:ffff gateway: 2001:470:61a3:100:ffff:ffff:ffff:ffff
- network: 0.0.0.0/0 - network: 0.0.0.0/0
gateway: 192.168.1.1 gateway: 192.168.1.1
mtu: 1500 mtu: 1280
install: install:
diskSelector: diskSelector:
wwid: t10.ATA SSDPR-CX400-256 GUH039914 wwid: t10.ATA SSDPR-CX400-256 GUH039914
# Generated on https://factory.talos.dev/ # Generated on https://factory.talos.dev/
# intel-ucode and amdgpu # amd-ucode and amdgpu
image: factory.talos.dev/installer/06deebb947b815afa53f04c450d355d3c8bc28927a387c754db1622a0a06349e:v1.9.5 image: factory.talos.dev/metal-installer/9c1d1b442d73f96dcd04e81463eb20000ab014062d22e1b083e1773336bc1dd5:v1.10.6
extraKernelArgs: extraKernelArgs:
- cpufreq.default_governor=performance - cpufreq.default_governor=performance
sysfs: sysfs:
@@ -27,4 +27,3 @@ machine:
devices.system.cpu.cpu6.cpufreq.scaling_max_freq: "550000" devices.system.cpu.cpu6.cpufreq.scaling_max_freq: "550000"
devices.system.cpu.cpu7.cpufreq.scaling_max_freq: "550000" devices.system.cpu.cpu7.cpufreq.scaling_max_freq: "550000"
devices.system.cpu.cpu8.cpufreq.scaling_max_freq: "550000" devices.system.cpu.cpu8.cpufreq.scaling_max_freq: "550000"

11
talos/patches/llama.patch Normal file
View File

@@ -0,0 +1,11 @@
# CSI driver requirement
cluster:
apiServer:
admissionControl:
- name: PodSecurity
configuration:
apiVersion: pod-security.admission.config.k8s.io/v1beta1
kind: PodSecurityConfiguration
exemptions:
namespaces:
- llama

View File

@@ -0,0 +1,6 @@
bound_service_account_names:
- immich-password
bound_service_account_namespaces:
- immich
token_policies:
- immich

View File

@@ -0,0 +1,6 @@
bound_service_account_names:
- llama-proxy
bound_service_account_namespaces:
- llama
token_policies:
- ollama

4
vault/policy/immich.hcl Normal file
View File

@@ -0,0 +1,4 @@
path "secret/data/immich-db" {
capabilities = ["read"]
}