138 Commits

Author SHA1 Message Date
d20647c855 Update renovate/renovate Docker tag to v43.39.2 2026-02-26 00:00:50 +00:00
c7bc79f574 add Qwen3-Coder-Next model 2026-02-26 00:10:53 +01:00
6cba277b9d update llama-swap image 2026-02-25 19:07:10 +01:00
1ca79d5262 disable built in open-webui ingress 2026-02-25 18:20:27 +01:00
95ca2aa54f increase openwebui storage to 10Gi 2026-02-25 17:41:23 +01:00
bfb089aeff migrate llama models to ssd 2026-02-25 16:03:12 +01:00
ed83a66a83 add ssd volume for llama models 2026-02-25 15:43:42 +01:00
0d6c67fc27 add lvmpv ssd storage class 2026-02-25 15:23:55 +01:00
fa7b35326c add openwebui 2026-02-25 15:21:04 +01:00
719a87a6f5 add workaround for cert-manager-webhook-ovh 2026-02-22 20:07:24 +01:00
fe4e546d47 remove configVersion from cert-manager-webhook-ovh 2026-02-22 19:53:39 +01:00
85e83224dc Merge pull request 'Update Helm release cloudnative-pg to v0.27.1' (#130) from renovate/cloudnative-pg-0.x into fresh-start
Reviewed-on: #130
2026-02-22 18:50:44 +00:00
889ba49e6a Merge pull request 'Update renovate/renovate Docker tag to v43.31.1' (#131) from renovate/renovate-renovate-43.x into fresh-start
Reviewed-on: #131
2026-02-22 18:50:37 +00:00
0590c97717 Merge pull request 'Update Helm release cilium to v1.19.1' (#132) from renovate/cilium-1.x into fresh-start
Reviewed-on: #132
2026-02-22 18:50:30 +00:00
3c1b2da775 Merge pull request 'Update Helm release openbao to v0.25.5' (#135) from renovate/openbao-0.x into fresh-start
Reviewed-on: #135
2026-02-22 18:49:25 +00:00
2675b2f8eb Merge pull request 'Update Helm release cert-manager-webhook-ovh to v0.9.1' (#138) from renovate/cert-manager-webhook-ovh-0.x into fresh-start
Reviewed-on: #138
2026-02-22 18:49:13 +00:00
72d8d41e16 Merge pull request 'Update Helm release vault-secrets-operator to v1.3.0' (#137) from renovate/vault-secrets-operator-1.x into fresh-start
Reviewed-on: #137
2026-02-22 18:47:40 +00:00
31c809f3dd Merge pull request 'Update Helm release immich to v1.1.0' (#133) from renovate/immich-1.x into fresh-start
Reviewed-on: #133
2026-02-22 18:47:32 +00:00
f0c549a39e change router's ip to ::1 2026-02-22 19:24:05 +01:00
b66b08f151 update talos to 1.12.4 2026-02-22 18:42:55 +01:00
98e3050afa remove mayastor related talos config 2026-02-22 17:04:57 +01:00
fef37fca68 clean up old mayastor config 2026-02-22 16:23:35 +01:00
6fa292629c disable loki and alloy 2026-02-22 16:21:27 +01:00
95b52f3bf3 disable mayastor 2026-02-22 14:29:26 +01:00
d9a646b7fd remove mayastor storageclass, snapshotclass 2026-02-22 14:27:43 +01:00
7ac0029ced switch searxng persistent data to lvm hdd 2026-02-22 14:22:07 +01:00
2c11e4cec0 Update renovate/renovate Docker tag to v43.31.1 2026-02-22 00:00:43 +00:00
40613ce587 Update Helm release cert-manager-webhook-ovh to v0.9.1 2026-02-22 00:00:36 +00:00
b95c9e7c69 switch llama models dir to lvm hdd 2026-02-21 16:51:04 +01:00
05c28d0d46 add lvm hdd llama models pvc 2026-02-21 16:28:06 +01:00
09aeee2bd9 move openbao's data volume to lvm 2026-02-21 16:03:03 +01:00
d709989558 add lvm hdd openbao volume 2026-02-21 15:55:53 +01:00
93d1e579d3 remove docker registry 2026-02-21 15:17:29 +01:00
92bcd0ac34 clean up old library volume, postgres and redis 2026-02-21 15:14:34 +01:00
c1d08326f3 switch immich to new valkey 2026-02-21 15:12:23 +01:00
f9015ad561 add redis authentication 2026-02-21 15:11:29 +01:00
14d79a2cd0 add immich valkey server 2026-02-21 15:03:30 +01:00
da13987ce8 reconfigure immich to use new db 2026-02-21 14:37:54 +01:00
36aab3d935 add new postgres cluster 2026-02-21 14:30:04 +01:00
f50e8c10f7 migrate immich to new library pvc 2026-02-21 14:17:28 +01:00
3b3642faeb add new immich library volume 2026-02-21 13:52:36 +01:00
8a009bc117 add explicit volume for gitea valkey 2026-02-21 13:39:54 +01:00
868f96e390 migrate gitea shared storage to new volume 2026-02-21 13:25:53 +01:00
638fc960af add explicit gitea shared storage volume 2026-02-21 13:20:30 +01:00
1f77bd5176 remove old postgres cluster 2026-02-21 13:07:42 +01:00
ffd350afd0 migrate gitea to lvmhdd backed postgres 2026-02-21 13:05:34 +01:00
093208c3e4 fix fsType on gitea postgres volume 2026-02-21 12:40:20 +01:00
db07a48639 Update Helm release vault-secrets-operator to v1.3.0 2026-02-21 00:00:51 +00:00
f061fd0c81 Update Helm release openbao to v0.25.5 2026-02-21 00:00:45 +00:00
68c54e44fa fix storage class name on gitea postgres vol 2026-02-20 22:49:50 +01:00
3120e9ba60 add btrfs extension 2026-02-20 22:12:46 +01:00
e676b6524d Update Helm release immich to v1.1.0 2026-02-18 00:00:39 +00:00
5748f69dac Update Helm release cilium to v1.19.1 2026-02-18 00:00:36 +00:00
31d0559c57 add browse-pvc krew plugin 2026-02-18 00:04:30 +01:00
4b2d3faf2e use separate kubeconfig 2026-02-18 00:03:37 +01:00
31083e52bc Update Helm release cloudnative-pg to v0.27.1 2026-02-07 00:09:29 +00:00
b1f3337c98 Merge pull request 'Update redis Docker tag to v24.1.3' (#120) from renovate/redis-24.x into fresh-start
Reviewed-on: #120
2026-02-06 00:16:26 +00:00
e610e96d80 Merge pull request 'Update Helm release gitea to v12.5.0' (#122) from renovate/gitea-12.x into fresh-start
Reviewed-on: #122
2026-02-06 00:16:16 +00:00
c9997fb8a7 Merge pull request 'Update Helm release ingress-nginx to v4.14.3' (#123) from renovate/ingress-nginx-4.x into fresh-start
Reviewed-on: #123
2026-02-06 00:16:06 +00:00
b9cc44d7e8 Merge pull request 'Update Helm release openbao to v0.25.0' (#124) from renovate/openbao-0.x into fresh-start
Reviewed-on: #124
2026-02-06 00:15:58 +00:00
be884d07c6 Merge pull request 'Update registry.k8s.io/coredns/coredns Docker tag to v1.14.1' (#125) from renovate/registry.k8s.io-coredns-coredns-1.x into fresh-start
Reviewed-on: #125
2026-02-06 00:15:28 +00:00
2875d84f33 Merge pull request 'Update alpine Docker tag to v3.23.3' (#126) from renovate/alpine-3.x into fresh-start
Reviewed-on: #126
2026-02-06 00:15:07 +00:00
573601a7ec Merge pull request 'Update Helm release immich to v1.0.12' (#127) from renovate/immich-1.x into fresh-start
Reviewed-on: #127
2026-02-06 00:14:59 +00:00
fb60744c5a Merge pull request 'Update renovate/renovate Docker tag to v43' (#128) from renovate/renovate-renovate-43.x into fresh-start
Reviewed-on: #128
2026-02-06 00:14:51 +00:00
52ca68c4ce Merge pull request 'Update Helm release cert-manager to v1.19.3' (#129) from renovate/cert-manager-1.x into fresh-start
Reviewed-on: #129
2026-02-06 00:14:42 +00:00
0934a1130a Update renovate/renovate Docker tag to v43 2026-02-06 00:01:36 +00:00
3d28650c1b add pv for new postgres' gitea cluster 2026-02-06 00:58:44 +01:00
15063c9885 add backup volume snapshot class for girea postgress 2026-02-06 00:27:45 +01:00
ba3cb2571c Update Helm release openbao to v0.25.0 2026-02-05 00:00:54 +00:00
5edaeb123c Update Helm release immich to v1.0.12 2026-02-05 00:00:43 +00:00
0dc37f69cb Update redis Docker tag to v24.1.3 2026-02-04 00:00:42 +00:00
777239ccb5 Update Helm release ingress-nginx to v4.14.3 2026-02-03 00:00:54 +00:00
352af6f386 Update Helm release cert-manager to v1.19.3 2026-02-03 00:00:50 +00:00
230197e3c6 move frigate deployment to new pvcs 2026-02-01 23:07:20 +01:00
0c5e22f538 add temporary frigate volume to migrate data 2026-02-01 20:11:25 +01:00
e79386b4a5 migrate from raw flake to devenv 2026-02-01 02:00:14 +01:00
8f4932132a Update alpine Docker tag to v3.23.3 2026-01-29 00:00:45 +00:00
bb6272b16e Update registry.k8s.io/coredns/coredns Docker tag to v1.14.1 2026-01-28 00:00:43 +00:00
3a71410c19 enable ts3 after copying files 2026-01-25 01:39:14 +01:00
e5af5c3945 add utility to run temporary pod with pvc mounted 2026-01-25 01:38:32 +01:00
6de56bfd10 add ispeak3 ts3 server 2026-01-25 01:07:35 +01:00
d70a704f89 Update Helm release gitea to v12.5.0 2026-01-24 00:00:54 +00:00
5df94c4656 add pv-migrate to tools 2026-01-19 00:12:44 +01:00
a6772893d0 delete old nas pvc and use new 2026-01-18 19:05:52 +01:00
ba31945337 add secondary nas volume 2026-01-18 18:59:30 +01:00
fcaa28c95a add lvmpv-hdd storage class 2026-01-18 18:53:35 +01:00
a40f9a046a enable openebs lvm-localpv controller 2026-01-18 00:31:52 +01:00
80ed3358e8 Merge pull request 'Update Helm release cilium to v1.18.6' (#116) from renovate/cilium-1.x into fresh-start
Reviewed-on: #116
2026-01-17 22:30:28 +00:00
eae4ff426c Merge pull request 'Update registry.k8s.io/coredns/coredns Docker tag to v1.13.2' (#118) from renovate/registry.k8s.io-coredns-coredns-1.x into fresh-start
Reviewed-on: #118
2026-01-17 22:30:21 +00:00
0fadd95386 Merge pull request 'Update renovate/renovate Docker tag to v42.84.1' (#119) from renovate/renovate-renovate-42.x into fresh-start
Reviewed-on: #119
2026-01-17 22:30:11 +00:00
29e06c60eb Merge pull request 'Update Helm release immich to v1.0.9' (#117) from renovate/immich-1.x into fresh-start
Reviewed-on: #117
2026-01-17 22:29:59 +00:00
27ae162886 Update renovate/renovate Docker tag to v42.84.1 2026-01-17 22:29:20 +00:00
d96344b310 Update registry.k8s.io/coredns/coredns Docker tag to v1.13.2 2026-01-17 22:29:17 +00:00
e3483fcfe3 Update Helm release immich to v1.0.9 2026-01-17 22:29:14 +00:00
784b335f65 Update Helm release cilium to v1.18.6 2026-01-17 22:29:12 +00:00
9300e327df Merge pull request 'Update alpine Docker tag to v3.23.2' (#104) from renovate/alpine-3.x into fresh-start
Reviewed-on: #104
2026-01-17 22:21:11 +00:00
90fb555dc2 Merge pull request 'Update Helm release openebs to v4.4.0' (#109) from renovate/openebs-4.x into fresh-start
Reviewed-on: #109
2026-01-17 22:20:06 +00:00
78b3b6b400 Merge pull request 'Update redis Docker tag to v24' (#110) from renovate/redis-24.x into fresh-start
Reviewed-on: #110
2026-01-17 22:01:54 +00:00
90897daa27 Merge pull request 'Update Helm release cert-manager to v1.19.2' (#113) from renovate/cert-manager-1.x into fresh-start
Reviewed-on: #113
2026-01-17 22:00:51 +00:00
0368252850 Merge pull request 'Update Helm release openbao to v0.23.3' (#111) from renovate/openbao-0.x into fresh-start
Reviewed-on: #111
2026-01-17 22:00:42 +00:00
1503c57fbe Merge pull request 'Update Helm release ingress-nginx to v4.14.1' (#112) from renovate/ingress-nginx-4.x into fresh-start
Reviewed-on: #112
2026-01-17 21:59:35 +00:00
0f12840b35 Merge pull request 'Update Helm release cloudnative-pg to v0.27.0' (#114) from renovate/cloudnative-pg-0.x into fresh-start
Reviewed-on: #114
2026-01-17 21:59:24 +00:00
87a071925c Merge pull request 'Update Helm release vault-secrets-operator to v1.2.0' (#115) from renovate/vault-secrets-operator-1.x into fresh-start
Reviewed-on: #115
2026-01-17 21:57:21 +00:00
b6efe42dc2 disable librechat release, it's using bitnami's mongodb 2026-01-17 22:55:28 +01:00
d2cfd7b73d Merge pull request 'Update renovate/renovate Docker tag to v42.84.0' (#107) from renovate/renovate-renovate-42.x into fresh-start
Reviewed-on: #107
2026-01-17 21:35:55 +00:00
bf1cae3fc7 Update renovate/renovate Docker tag to v42.84.0 2026-01-17 21:34:32 +00:00
6712e94237 Update Helm release vault-secrets-operator to v1.2.0 2026-01-13 00:00:37 +00:00
6f8e10f3fc Update Helm release openbao to v0.23.3 2026-01-06 00:00:48 +00:00
3c04fd6b10 Update redis Docker tag to v24 2025-12-19 00:00:48 +00:00
ef353d635a Update alpine Docker tag to v3.23.2 2025-12-19 00:00:36 +00:00
0097d057d5 Update Helm release cloudnative-pg to v0.27.0 2025-12-10 00:00:36 +00:00
b454fc606f Update Helm release cert-manager to v1.19.2 2025-12-10 00:00:32 +00:00
7feb19b7fc update immich 2025-12-07 02:11:41 +01:00
b21f8e402b add abliterated versions of qwen3-vl 2025-12-06 23:33:56 +01:00
68f51b26b0 Update Helm release ingress-nginx to v4.14.1 2025-12-06 00:00:32 +00:00
1095d7ef4d Update Helm release openebs to v4.4.0 2025-11-22 00:00:29 +00:00
8d83c6dc83 increase free space limit on frigate to 24h and enable two-way sync 2025-11-17 01:43:17 +01:00
65e75a4d39 Add 8B and 2B variants of qwen3-vl 2025-11-15 22:21:10 +01:00
6c7457d095 fix Qwen3-VL-4B-Instruct-GGUF models looping issue 2025-11-15 20:40:27 +01:00
9b556e98a9 add qwen3-vl thinking variant 2025-11-15 19:31:53 +01:00
202ebc7b86 add qwen3-vl, fix librechat taking over settings and clean up llama config 2025-11-15 19:18:43 +01:00
ec61023f74 fix cache location after llama-swap update 2025-11-15 18:05:12 +01:00
05d3493bb7 update llama-swap 2025-11-15 17:57:46 +01:00
2a9f8c3092 Merge pull request 'Update Helm release cilium to v1.18.4' (#99) from renovate/cilium-1.x into fresh-start
Reviewed-on: #99
2025-11-15 16:49:56 +00:00
226ee59fa6 Merge pull request 'Update Helm release cloudnative-pg to v0.26.1' (#100) from renovate/cloudnative-pg-0.x into fresh-start
Reviewed-on: #100
2025-11-15 16:49:50 +00:00
c8f34c45ac Merge pull request 'Update Helm release openbao to v0.19.2' (#101) from renovate/openbao-0.x into fresh-start
Reviewed-on: #101
2025-11-15 16:49:41 +00:00
c0fa400159 Merge pull request 'Update registry.k8s.io/coredns/coredns Docker tag to v1.13.1' (#102) from renovate/registry.k8s.io-coredns-coredns-1.x into fresh-start
Reviewed-on: #102
2025-11-15 16:49:31 +00:00
6ccb00e86e Merge pull request 'Update Helm release immich to v1.0.6' (#103) from renovate/immich-1.x into fresh-start
Reviewed-on: #103
2025-11-15 16:49:17 +00:00
7b8fb8d8bb Merge pull request 'Update Helm release ingress-nginx to v4.14.0' (#105) from renovate/ingress-nginx-4.x into fresh-start
Reviewed-on: #105
2025-11-15 16:48:24 +00:00
0ae3181267 Merge pull request 'Update renovate/renovate Docker tag to v42' (#106) from renovate/renovate-renovate-42.x into fresh-start
Reviewed-on: #106
2025-11-15 16:47:38 +00:00
c0d83249b9 Update renovate/renovate Docker tag to v42 2025-11-15 00:00:31 +00:00
974d70a39e Update Helm release cilium to v1.18.4 2025-11-13 00:00:23 +00:00
4518fc674a Update Helm release openbao to v0.19.2 2025-11-07 00:00:23 +00:00
c3912af26b Update Helm release immich to v1.0.6 2025-11-06 00:00:36 +00:00
797b97496e Update Helm release ingress-nginx to v4.14.0 2025-11-04 00:00:49 +00:00
29457af188 add nas deployment 2025-11-03 02:31:02 +01:00
2a8e56824e Update registry.k8s.io/coredns/coredns Docker tag to v1.13.1 2025-10-28 00:00:30 +00:00
f71794de4d Update Helm release cloudnative-pg to v0.26.1 2025-10-24 00:00:24 +00:00
79 changed files with 1890 additions and 764 deletions

12
.envrc Normal file
View File

@@ -0,0 +1,12 @@
#!/usr/bin/env bash
export DIRENV_WARN_TIMEOUT=20s
eval "$(devenv direnvrc)"
# `use devenv` supports the same options as the `devenv shell` command.
#
# To silence all output, use `--quiet`.
#
# Example usage: use devenv --quiet --impure --option services.postgres.enable:bool true
use devenv

12
.gitignore vendored
View File

@@ -1,2 +1,12 @@
secrets.yaml
talos/generated
talos/generated
# Devenv
.devenv*
devenv.local.nix
devenv.local.yaml
# direnv
.direnv
# pre-commit
.pre-commit-config.yaml

View File

@@ -1,7 +1,7 @@
{
"recommendations": [
"arrterian.nix-env-selector",
"jnoortheen.nix-ide",
"detachhead.basedpyright"
"detachhead.basedpyright",
"mkhl.direnv"
]
}

View File

@@ -1,13 +1,4 @@
{
"nixEnvSelector.nixFile": "${workspaceFolder}/shell.nix",
"terminal.integrated.profiles.linux": {
"Nix Shell": {
"path": "nix",
"args": ["develop"],
"icon": "terminal-linux"
}
},
"terminal.integrated.defaultProfile.linux": "Nix Shell",
"ansible.python.interpreterPath": "/bin/python",
"python.defaultInterpreterPath": "${env:PYTHON_BIN}"
}

View File

@@ -20,3 +20,6 @@ gen-talos-config:
apply-talos-config:
talosctl -n anapistula-delrosalae apply-config -f talos/generated/anapistula-delrosalae.yaml
get-kubeconfig:
talosctl -n anapistula-delrosalae kubeconfig talos/generated/kubeconfig

View File

@@ -0,0 +1,49 @@
apiVersion: local.openebs.io/v1alpha1
kind: LVMVolume
metadata:
labels:
kubernetes.io/nodename: anapistula-delrosalae
name: frigate-config
namespace: openebs
spec:
capacity: 5Gi
ownerNodeID: anapistula-delrosalae
shared: "yes"
thinProvision: "no"
vgPattern: ^openebs-hdd$
volGroup: openebs-hdd
---
kind: PersistentVolume
apiVersion: v1
metadata:
name: frigate-config
spec:
capacity:
storage: 5Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: openebs-lvmpv
volumeMode: Filesystem
csi:
driver: local.csi.openebs.io
volumeHandle: frigate-config
claimRef:
apiVersion: v1
kind: PersistentVolumeClaim
namespace: frigate
name: frigate-config
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: frigate-config
namespace: frigate
spec:
storageClassName: openebs-lvmpv
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 5Gi
volumeName: frigate-config

View File

@@ -3,5 +3,7 @@ kind: Kustomization
resources:
- namespace.yaml
- secret.yaml
- config-pvc.yaml
- media-pvc.yaml
- release.yaml
- webrtc-svc.yaml

View File

@@ -0,0 +1,49 @@
apiVersion: local.openebs.io/v1alpha1
kind: LVMVolume
metadata:
labels:
kubernetes.io/nodename: anapistula-delrosalae
name: frigate-media
namespace: openebs
spec:
capacity: 500Gi
ownerNodeID: anapistula-delrosalae
shared: "yes"
thinProvision: "no"
vgPattern: ^openebs-hdd$
volGroup: openebs-hdd
---
kind: PersistentVolume
apiVersion: v1
metadata:
name: frigate-media
spec:
capacity:
storage: 500Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: openebs-lvmpv
volumeMode: Filesystem
csi:
driver: local.csi.openebs.io
volumeHandle: frigate-media
claimRef:
apiVersion: v1
kind: PersistentVolumeClaim
namespace: frigate
name: frigate-media
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: frigate-media
namespace: frigate
spec:
storageClassName: openebs-lvmpv
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 500Gi
volumeName: frigate-media

View File

@@ -36,6 +36,8 @@ spec:
cookie_secure: True
record:
expire_interval: 1440 # 24h
sync_recordings: True
enabled: True
retain:
days: 90

View File

@@ -0,0 +1,46 @@
---
apiVersion: local.openebs.io/v1alpha1
kind: LVMVolume
metadata:
labels:
kubernetes.io/nodename: anapistula-delrosalae
name: gitea-shared-storage-lvmhdd
namespace: openebs
spec:
capacity: 10Gi
ownerNodeID: anapistula-delrosalae
shared: "yes"
thinProvision: "no"
vgPattern: ^openebs-hdd$
volGroup: openebs-hdd
---
kind: PersistentVolume
apiVersion: v1
metadata:
name: gitea-shared-storage-lvmhdd
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: hdd-lvmpv
volumeMode: Filesystem
csi:
driver: local.csi.openebs.io
fsType: btrfs
volumeHandle: gitea-shared-storage-lvmhdd
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: gitea-shared-storage-lvmhdd
namespace: gitea
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
storageClassName: hdd-lvmpv
volumeName: gitea-shared-storage-lvmhdd

View File

@@ -2,7 +2,10 @@ apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- postgres-volume.yaml
- postgres-cluster.yaml
- gitea-shared-volume.yaml
- valkey-volume.yaml
- release.yaml
- secret.yaml
- backups.yaml

View File

@@ -2,11 +2,27 @@
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: gitea-postgresql-cluster
name: gitea-postgresql-cluster-lvmhdd
namespace: gitea
spec:
instances: 1
imageName: ghcr.io/cloudnative-pg/postgresql:17.4
storage:
size: 10Gi
storageClass: mayastor-single-hdd
pvcTemplate:
storageClassName: hdd-lvmpv
resources:
requests:
storage: 20Gi
volumeName: gitea-postgresql-cluster-lvmhdd-1
# Just to avoid bootstrapping the instance agian
# I migrated data manually using pv_migrate because this feature is broken
# when source and target volumes are in different storage classes
# CNPG just sets dataSource to the PVC and expects the underlying storage
# to handle the migration, but it doesn't work here
bootstrap:
recovery:
backup:
name: backup-migration

View File

@@ -0,0 +1,33 @@
apiVersion: local.openebs.io/v1alpha1
kind: LVMVolume
metadata:
labels:
kubernetes.io/nodename: anapistula-delrosalae
name: gitea-postgresql-cluster-lvmhdd-1
namespace: openebs
spec:
capacity: 20Gi
ownerNodeID: anapistula-delrosalae
shared: "yes"
thinProvision: "no"
vgPattern: ^openebs-hdd$
volGroup: openebs-hdd
---
kind: PersistentVolume
apiVersion: v1
metadata:
name: gitea-postgresql-cluster-lvmhdd-1
spec:
capacity:
storage: 20Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: hdd-lvmpv
volumeMode: Filesystem
csi:
driver: local.csi.openebs.io
fsType: btrfs
volumeHandle: gitea-postgresql-cluster-lvmhdd-1
---
# PVCs are dynamically created by the Postgres operator

View File

@@ -17,7 +17,7 @@ spec:
chart:
spec:
chart: gitea
version: 12.4.0
version: 12.5.0
sourceRef:
kind: HelmRepository
name: gitea-charts
@@ -45,26 +45,28 @@ spec:
primary:
persistence:
enabled: true
storageClass: mayastor-single-hdd
existingClaim: gitea-valkey-primary-lvmhdd-0
resources:
requests:
cpu: 0
persistence:
enabled: true
storageClass: mayastor-single-hdd
# We'll create PV and PVC manually
create: false
claimName: gitea-shared-storage-lvmhdd
gitea:
additionalConfigFromEnvs:
- name: GITEA__DATABASE__PASSWD
valueFrom:
secretKeyRef:
name: gitea-postgresql-cluster-app
name: gitea-postgresql-cluster-lvmhdd-app
key: password
config:
database:
DB_TYPE: postgres
HOST: gitea-postgresql-cluster-rw:5432
HOST: gitea-postgresql-cluster-lvmhdd-rw:5432
NAME: app
USER: app
indexer:

View File

@@ -0,0 +1,46 @@
---
apiVersion: local.openebs.io/v1alpha1
kind: LVMVolume
metadata:
labels:
kubernetes.io/nodename: anapistula-delrosalae
name: gitea-valkey-primary-lvmhdd-0
namespace: openebs
spec:
capacity: 1Gi
ownerNodeID: anapistula-delrosalae
shared: "yes"
thinProvision: "no"
vgPattern: ^openebs-hdd$
volGroup: openebs-hdd
---
kind: PersistentVolume
apiVersion: v1
metadata:
name: gitea-valkey-primary-lvmhdd-0
spec:
capacity:
storage: 1Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: hdd-lvmpv
volumeMode: Filesystem
csi:
driver: local.csi.openebs.io
fsType: btrfs
volumeHandle: gitea-valkey-primary-lvmhdd-0
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: gitea-valkey-primary-lvmhdd-0
namespace: gitea
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
storageClassName: hdd-lvmpv
volumeName: gitea-valkey-primary-lvmhdd-0

View File

@@ -0,0 +1,46 @@
---
apiVersion: local.openebs.io/v1alpha1
kind: LVMVolume
metadata:
labels:
kubernetes.io/nodename: anapistula-delrosalae
name: immich-library-lvmhdd
namespace: openebs
spec:
capacity: 150Gi
ownerNodeID: anapistula-delrosalae
shared: "yes"
thinProvision: "no"
vgPattern: ^openebs-hdd$
volGroup: openebs-hdd
---
kind: PersistentVolume
apiVersion: v1
metadata:
name: immich-library-lvmhdd
spec:
capacity:
storage: 150Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: hdd-lvmpv
volumeMode: Filesystem
csi:
driver: local.csi.openebs.io
fsType: btrfs
volumeHandle: immich-library-lvmhdd
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: library-lvmhdd
namespace: immich
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 150Gi
storageClassName: hdd-lvmpv
volumeName: immich-library-lvmhdd

View File

@@ -2,8 +2,10 @@ apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- volume.yaml
- valkey-volume.yaml
- redis.yaml
- postgres-password.yaml
- postgres-volume.yaml
- postgres-cluster.yaml
- immich-library.yaml
- release.yaml

View File

@@ -2,21 +2,31 @@
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: immich-db
name: immich-db-lvmhdd
namespace: immich
spec:
# TODO: Configure renovate to handle imageName
imageName: ghcr.io/tensorchord/cloudnative-vectorchord:14-0.4.3
instances: 1
storage:
size: 10Gi
storageClass: mayastor-single-hdd
pvcTemplate:
storageClassName: hdd-lvmpv
resources:
requests:
storage: 10Gi
volumeName: immich-db-lvmhdd-1
# Just to avoid bootstrapping the instance again
# I migrated data manually using pv_migrate because this feature is broken
# when source and target volumes are in different storage classes
# CNPG just sets dataSource to the PVC and expects the underlying storage
# to handle the migration, but it doesn't work here
bootstrap:
initdb:
# Defaults of immich chart
database: immich
owner: immich
recovery:
backup:
name: backup-migration
# We need to create custom role because default one does not allow to set up
# vectorchord extension

View File

@@ -0,0 +1,33 @@
apiVersion: local.openebs.io/v1alpha1
kind: LVMVolume
metadata:
labels:
kubernetes.io/nodename: anapistula-delrosalae
name: immich-db-lvmhdd-1
namespace: openebs
spec:
capacity: 10Gi
ownerNodeID: anapistula-delrosalae
shared: "yes"
thinProvision: "no"
vgPattern: ^openebs-hdd$
volGroup: openebs-hdd
---
kind: PersistentVolume
apiVersion: v1
metadata:
name: immich-db-lvmhdd-1
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: hdd-lvmpv
volumeMode: Filesystem
csi:
driver: local.csi.openebs.io
fsType: btrfs
volumeHandle: immich-db-lvmhdd-1
---
# PVCs are dynamically created by the Postgres operator

View File

@@ -2,28 +2,35 @@
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository
metadata:
name: bitnami
name: valkey
namespace: immich
spec:
interval: 24h
type: "oci"
url: oci://registry-1.docker.io/bitnamicharts/
url: https://valkey.io/valkey-helm/
---
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: redis
name: valkey
namespace: immich
spec:
interval: 30m
chart:
spec:
chart: redis
version: 23.1.3
chart: valkey
version: 0.9.3
sourceRef:
kind: HelmRepository
name: bitnami
name: valkey
values:
global:
defaultStorageClass: mayastor-single-hdd
architecture: standalone
dataStorage:
enabled: true
persistentVolumeClaimName: immich-valkey
auth:
enabled: true
usersExistingSecret: redis
aclUsers:
default:
passwordKey: redis-password
permissions: "~* &* +@all"

View File

@@ -18,7 +18,7 @@ spec:
chart:
spec:
chart: immich
version: 1.0.2
version: 1.1.0
sourceRef:
kind: HelmRepository
name: secustor
@@ -27,14 +27,14 @@ spec:
config:
vecotrExtension: vectorchord
postgres:
host: immich-db-rw
host: immich-db-lvmhdd-rw
existingSecret:
enabled: true
secretName: immich-db-immich
usernameKey: username
passwordKey: password
redis:
host: redis-master
host: valkey
existingSecret:
enabled: true
secretName: redis
@@ -47,7 +47,7 @@ spec:
volumes:
- name: uploads
persistentVolumeClaim:
claimName: library
claimName: library-lvmhdd
machineLearning:
enabled: true

View File

@@ -0,0 +1,46 @@
---
apiVersion: local.openebs.io/v1alpha1
kind: LVMVolume
metadata:
labels:
kubernetes.io/nodename: anapistula-delrosalae
name: immich-valkey
namespace: openebs
spec:
capacity: 1Gi
ownerNodeID: anapistula-delrosalae
shared: "yes"
thinProvision: "no"
vgPattern: ^openebs-hdd$
volGroup: openebs-hdd
---
kind: PersistentVolume
apiVersion: v1
metadata:
name: immich-valkey
spec:
capacity:
storage: 1Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: hdd-lvmpv
volumeMode: Filesystem
csi:
driver: local.csi.openebs.io
fsType: btrfs
volumeHandle: immich-valkey
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: immich-valkey
namespace: immich
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
storageClassName: hdd-lvmpv
volumeName: immich-valkey

View File

@@ -1,13 +0,0 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: library
namespace: immich
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 150Gi
storageClassName: mayastor-single-hdd

View File

@@ -0,0 +1,7 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- pvc.yaml
- statefulset.yaml
- service.yaml

View File

@@ -1,5 +1,4 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: registry
name: ispeak3

49
apps/ispeak3/pvc.yaml Normal file
View File

@@ -0,0 +1,49 @@
apiVersion: local.openebs.io/v1alpha1
kind: LVMVolume
metadata:
labels:
kubernetes.io/nodename: anapistula-delrosalae
name: ispeak3-ts3-data
namespace: openebs
spec:
capacity: 1Gi
ownerNodeID: anapistula-delrosalae
shared: "yes"
thinProvision: "no"
vgPattern: ^openebs-hdd$
volGroup: openebs-hdd
---
kind: PersistentVolume
apiVersion: v1
metadata:
name: ispeak3-ts3-data
spec:
capacity:
storage: 1Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Delete
storageClassName: openebs-lvmpv
volumeMode: Filesystem
csi:
driver: local.csi.openebs.io
volumeHandle: ispeak3-ts3-data
claimRef:
apiVersion: v1
kind: PersistentVolumeClaim
namespace: ispeak3
name: ispeak3-ts3-data
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: ispeak3-ts3-data
namespace: ispeak3
spec:
storageClassName: openebs-lvmpv
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
volumeName: ispeak3-ts3-data

20
apps/ispeak3/service.yaml Normal file
View File

@@ -0,0 +1,20 @@
apiVersion: v1
kind: Service
metadata:
name: teamspeak3
namespace: ispeak3
spec:
selector:
app: teamspeak3
ports:
- name: voice
protocol: UDP
port: 9987
targetPort: 9987
- name: filetransfer
protocol: TCP
port: 30033
targetPort: 30033
type: LoadBalancer
externalTrafficPolicy: Local
ipFamilyPolicy: PreferDualStack

View File

@@ -0,0 +1,34 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: teamspeak3-server
namespace: ispeak3
spec:
serviceName: "teamspeak3"
replicas: 1
selector:
matchLabels:
app: teamspeak3
template:
metadata:
labels:
app: teamspeak3
spec:
containers:
- name: teamspeak3
image: teamspeak:3.13.7
ports:
- containerPort: 9987
name: voice
protocol: UDP
- containerPort: 10011
name: query
- containerPort: 30033
name: filetransfer
volumeMounts:
- name: ts3-data
mountPath: /var/ts3server/
volumes:
- name: ts3-data
persistentVolumeClaim:
claimName: ispeak3-ts3-data

View File

@@ -2,7 +2,6 @@ apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- gitea
- registry
- renovate
- librechat
- frigate
@@ -10,3 +9,5 @@ resources:
- immich
- nas
- searxng
- ispeak3
- openwebui

View File

@@ -8,92 +8,113 @@ spec:
interval: 24h
url: https://dynomite567.github.io/helm-charts/
---
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: librechat
namespace: librechat
spec:
interval: 30m
chart:
spec:
chart: librechat
version: 1.9.1
sourceRef:
kind: HelmRepository
name: dynomite567-charts
values:
global:
librechat:
existingSecretName: librechat
librechat:
configEnv:
PLUGIN_MODELS: null
ALLOW_REGISTRATION: "false"
TRUST_PROXY: "1"
DOMAIN_CLIENT: https://librechat.lumpiasty.xyz
SEARCH: "true"
existingSecretName: librechat
configYamlContent: |
version: 1.0.3
# apiVersion: helm.toolkit.fluxcd.io/v2
# kind: HelmRelease
# metadata:
# name: librechat
# namespace: librechat
# spec:
# interval: 30m
# chart:
# spec:
# chart: librechat
# version: 1.9.1
# sourceRef:
# kind: HelmRepository
# name: dynomite567-charts
# values:
# global:
# librechat:
# existingSecretName: librechat
# librechat:
# configEnv:
# PLUGIN_MODELS: null
# ALLOW_REGISTRATION: "false"
# TRUST_PROXY: "1"
# DOMAIN_CLIENT: https://librechat.lumpiasty.xyz
# SEARCH: "true"
# existingSecretName: librechat
# configYamlContent: |
# version: 1.0.3
endpoints:
custom:
- name: "Llama.cpp"
apiKey: "llama"
baseURL: "http://llama.llama.svc.cluster.local:11434/v1"
models:
default: [
"DeepSeek-R1-0528-Qwen3-8B-GGUF",
"Qwen3-8B-GGUF",
"Qwen3-8B-GGUF-no-thinking",
"gemma3n-e4b",
"gemma3-12b",
"gemma3-12b-q2",
"gemma3-12b-novision",
"gemma3-4b",
"gemma3-4b-novision",
"Qwen3-4B-Thinking-2507",
"Qwen3-4B-Thinking-2507-long-ctx",
"Qwen2.5-VL-7B-Instruct-GGUF",
"Qwen2.5-VL-32B-Instruct-GGUF-IQ1_S",
"Qwen2.5-VL-32B-Instruct-GGUF-Q2_K_L"
]
titleConvo: true
titleModel: "gemma3-4b-novision"
summarize: false
summaryModel: "gemma3-4b-novision"
forcePrompt: false
modelDisplayLabel: "Llama.cpp"
imageVolume:
enabled: true
size: 10G
accessModes: ReadWriteOnce
storageClassName: mayastor-single-hdd
ingress:
enabled: true
className: nginx-ingress
annotations:
cert-manager.io/cluster-issuer: letsencrypt
nginx.ingress.kubernetes.io/proxy-body-size: "0"
nginx.ingress.kubernetes.io/proxy-buffering: "false"
nginx.ingress.kubernetes.io/proxy-read-timeout: 30m
hosts:
- host: librechat.lumpiasty.xyz
paths:
- path: /
pathType: ImplementationSpecific
tls:
- hosts:
- librechat.lumpiasty.xyz
secretName: librechat-ingress
# endpoints:
# custom:
# - name: "Llama.cpp"
# apiKey: "llama"
# baseURL: "http://llama.llama.svc.cluster.local:11434/v1"
# models:
# default: [
# "DeepSeek-R1-0528-Qwen3-8B-GGUF",
# "Qwen3-8B-GGUF",
# "Qwen3-8B-GGUF-no-thinking",
# "gemma3n-e4b",
# "gemma3-12b",
# "gemma3-12b-q2",
# "gemma3-12b-novision",
# "gemma3-4b",
# "gemma3-4b-novision",
# "Qwen3-4B-Thinking-2507",
# "Qwen3-4B-Thinking-2507-long-ctx",
# "Qwen2.5-VL-7B-Instruct-GGUF",
# "Qwen2.5-VL-32B-Instruct-GGUF-IQ1_S",
# "Qwen2.5-VL-32B-Instruct-GGUF-Q2_K_L",
# "Qwen3-VL-2B-Instruct-GGUF",
# "Qwen3-VL-2B-Instruct-GGUF-unslothish",
# "Qwen3-VL-2B-Thinking-GGUF",
# "Qwen3-VL-4B-Instruct-GGUF",
# "Qwen3-VL-4B-Instruct-GGUF-unslothish",
# "Qwen3-VL-4B-Thinking-GGUF",
# "Qwen3-VL-8B-Instruct-GGUF",
# "Qwen3-VL-8B-Instruct-GGUF-unslothish",
# "Qwen3-VL-8B-Thinking-GGUF",
# "Huihui-Qwen3-VL-8B-Instruct-abliterated-GGUF",
# "Huihui-Qwen3-VL-8B-Thinking-abliterated-GGUF"
# ]
# titleConvo: true
# titleModel: "gemma3-4b-novision"
# summarize: false
# summaryModel: "gemma3-4b-novision"
# forcePrompt: false
# modelDisplayLabel: "Llama.cpp"
mongodb:
persistence:
storageClass: mayastor-single-hdd
# # ✨ IMPORTANT: let llama-swap/llama-server own all these
# dropParams:
# - "temperature"
# - "top_p"
# - "top_k"
# - "presence_penalty"
# - "frequency_penalty"
# - "stop"
# - "max_tokens"
# imageVolume:
# enabled: true
# size: 10G
# accessModes: ReadWriteOnce
# storageClassName: mayastor-single-hdd
# ingress:
# enabled: true
# className: nginx-ingress
# annotations:
# cert-manager.io/cluster-issuer: letsencrypt
# nginx.ingress.kubernetes.io/proxy-body-size: "0"
# nginx.ingress.kubernetes.io/proxy-buffering: "false"
# nginx.ingress.kubernetes.io/proxy-read-timeout: 30m
# hosts:
# - host: librechat.lumpiasty.xyz
# paths:
# - path: /
# pathType: ImplementationSpecific
# tls:
# - hosts:
# - librechat.lumpiasty.xyz
# secretName: librechat-ingress
meilisearch:
persistence:
storageClass: mayastor-single-hdd
auth:
existingMasterKeySecret: librechat
# mongodb:
# persistence:
# storageClass: mayastor-single-hdd
# meilisearch:
# persistence:
# storageClass: mayastor-single-hdd
# auth:
# existingMasterKeySecret: librechat

View File

@@ -5,212 +5,479 @@ models:
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/DeepSeek-R1-0528-Qwen3-8B-GGUF:Q4_K_M
-ngl 37 -c 16384
--no-warmup
--port ${PORT}
-hf unsloth/DeepSeek-R1-0528-Qwen3-8B-GGUF:Q4_K_M
--n-gpu-layers 37
--ctx-size 16384
--no-warmup
--port ${PORT}
"Qwen3-8B-GGUF":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/Qwen3-8B-GGUF:Q4_K_M
-ngl 37 -c 16384
--no-warmup
--port ${PORT}
-hf unsloth/Qwen3-8B-GGUF:Q4_K_M
--n-gpu-layers 37
--ctx-size 16384
--no-warmup
--port ${PORT}
"Qwen3-8B-GGUF-no-thinking":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/Qwen3-8B-GGUF:Q4_K_M
-ngl 37 -c 16384
--jinja --chat-template-file /config/qwen_nothink_chat_template.jinja
--no-warmup
--port ${PORT}
-hf unsloth/Qwen3-8B-GGUF:Q4_K_M
--n-gpu-layers 37
--ctx-size 16384
--jinja
--chat-template-file /config/qwen_nothink_chat_template.jinja
--no-warmup
--port ${PORT}
"gemma3n-e4b":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/gemma-3n-E4B-it-GGUF:UD-Q4_K_XL
--ctx-size 16384
--n-gpu-layers 99
--seed 3407
--prio 2
--temp 1.0
--repeat-penalty 1.0
--min-p 0.00
--top-k 64
--top-p 0.95
--no-warmup
--port ${PORT}
-hf unsloth/gemma-3n-E4B-it-GGUF:UD-Q4_K_XL
--ctx-size 16384
--n-gpu-layers 99
--seed 3407
--prio 2
--temp 1.0
--repeat-penalty 1.0
--min-p 0.00
--top-k 64
--top-p 0.95
--no-warmup
--port ${PORT}
"gemma3-12b":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/gemma-3-12b-it-GGUF:Q4_K_M
--ctx-size 16384
--n-gpu-layers 99
--prio 2
--temp 1.0
--repeat-penalty 1.0
--min-p 0.00
--top-k 64
--top-p 0.95
--no-warmup
--port ${PORT}
-hf unsloth/gemma-3-12b-it-GGUF:Q4_K_M
--ctx-size 16384
--n-gpu-layers 99
--prio 2
--temp 1.0
--repeat-penalty 1.0
--min-p 0.00
--top-k 64
--top-p 0.95
--no-warmup
--port ${PORT}
"gemma3-12b-novision":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/gemma-3-12b-it-GGUF:Q4_K_M
--ctx-size 16384
--n-gpu-layers 99
--prio 2
--temp 1.0
--repeat-penalty 1.0
--min-p 0.00
--top-k 64
--top-p 0.95
--no-mmproj
--no-warmup
--port ${PORT}
-hf unsloth/gemma-3-12b-it-GGUF:Q4_K_M
--ctx-size 16384
--n-gpu-layers 99
--prio 2
--temp 1.0
--repeat-penalty 1.0
--min-p 0.00
--top-k 64
--top-p 0.95
--no-mmproj
--no-warmup
--port ${PORT}
"gemma3-12b-q2":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/gemma-3-12b-it-GGUF:Q2_K_L
--ctx-size 16384
--n-gpu-layers 99
--prio 2
--temp 1.0
--repeat-penalty 1.0
--min-p 0.00
--top-k 64
--top-p 0.95
--no-warmup
--port ${PORT}
-hf unsloth/gemma-3-12b-it-GGUF:Q2_K_L
--ctx-size 16384
--n-gpu-layers 99
--prio 2
--temp 1.0
--repeat-penalty 1.0
--min-p 0.00
--top-k 64
--top-p 0.95
--no-warmup
--port ${PORT}
"gemma3-4b":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/gemma-3-4b-it-GGUF:Q4_K_M
--ctx-size 16384
--n-gpu-layers 99
--prio 2
--temp 1.0
--repeat-penalty 1.0
--min-p 0.00
--top-k 64
--top-p 0.95
--no-warmup
--port ${PORT}
-hf unsloth/gemma-3-4b-it-GGUF:Q4_K_M
--ctx-size 16384
--n-gpu-layers 99
--prio 2
--temp 1.0
--repeat-penalty 1.0
--min-p 0.00
--top-k 64
--top-p 0.95
--no-warmup
--port ${PORT}
"gemma3-4b-novision":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/gemma-3-4b-it-GGUF:Q4_K_M
--ctx-size 16384
--n-gpu-layers 99
--prio 2
--temp 1.0
--repeat-penalty 1.0
--min-p 0.00
--top-k 64
--top-p 0.95
--no-mmproj
--no-warmup
--port ${PORT}
-hf unsloth/gemma-3-4b-it-GGUF:Q4_K_M
--ctx-size 16384
--n-gpu-layers 99
--prio 2
--temp 1.0
--repeat-penalty 1.0
--min-p 0.00
--top-k 64
--top-p 0.95
--no-mmproj
--no-warmup
--port ${PORT}
"Qwen3-4B-Thinking-2507":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/Qwen3-4B-Thinking-2507-GGUF:Q4_K_M
-ngl 99 -c 16384 --predict 8192
--temp 0.6
--min-p 0.00
--top-p 0.95
--top-k 20
--repeat-penalty 1.0
--no-warmup
--port ${PORT}
-hf unsloth/Qwen3-4B-Thinking-2507-GGUF:Q4_K_M
--n-gpu-layers 99
--ctx-size 16384
--predict 8192
--temp 0.6
--min-p 0.00
--top-p 0.95
--top-k 20
--repeat-penalty 1.0
--no-warmup
--port ${PORT}
"Qwen3-4B-Thinking-2507-long-ctx":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/Qwen3-4B-Thinking-2507-GGUF:Q4_K_M
-ngl 99 -c 262144 --predict 81920
--temp 0.6
--min-p 0.00
--top-p 0.95
--top-k 20
--repeat-penalty 1.0
--no-warmup
--flash-attn
--cache-type-k q8_0 --cache-type-v q8_0
--port ${PORT}
-hf unsloth/Qwen3-4B-Thinking-2507-GGUF:Q4_K_M
--n-gpu-layers 99
--ctx-size 262144
--predict 81920
--temp 0.6
--min-p 0.00
--top-p 0.95
--top-k 20
--repeat-penalty 1.0
--no-warmup
--flash-attn auto
--cache-type-k q8_0
--cache-type-v q8_0
--port ${PORT}
"Qwen3-4B-Instruct-2507":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/Qwen3-4B-Instruct-2507-GGUF:Q4_K_M
-ngl 99 -c 16384 --predict 8192
--temp 0.7
--min-p 0.00
--top-p 0.8
--top-k 20
--repeat-penalty 1.0
--no-warmup
--port ${PORT}
-hf unsloth/Qwen3-4B-Instruct-2507-GGUF:Q4_K_M
--n-gpu-layers 99
--ctx-size 16384
--predict 8192
--temp 0.7
--min-p 0.00
--top-p 0.8
--top-k 20
--repeat-penalty 1.0
--no-warmup
--port ${PORT}
"Qwen3-4B-Instruct-2507-long-ctx":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/Qwen3-4B-Instruct-2507-GGUF:Q4_K_M
-ngl 99 -c 262144 --predict 81920
--temp 0.7
--min-p 0.00
--top-p 0.8
--top-k 20
--repeat-penalty 1.0
--no-warmup
--flash-attn
--cache-type-k q8_0 --cache-type-v q8_0
--port ${PORT}
-hf unsloth/Qwen3-4B-Instruct-2507-GGUF:Q4_K_M
--n-gpu-layers 99
--ctx-size 262144
--predict 81920
--temp 0.7
--min-p 0.00
--top-p 0.8
--top-k 20
--repeat-penalty 1.0
--no-warmup
--flash-attn auto
--cache-type-k q8_0
--cache-type-v q8_0
--port ${PORT}
"Qwen2.5-VL-32B-Instruct-GGUF-IQ1_S":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/Qwen2.5-VL-32B-Instruct-GGUF:IQ1_S
-ngl 99 -c 16384 --predict 8192
--temp 0.7
--min-p 0.00
--top-p 0.8
--top-k 20
--repeat-penalty 1.0
--no-warmup
--port ${PORT}
-hf unsloth/Qwen2.5-VL-32B-Instruct-GGUF:IQ1_S
--n-gpu-layers 99
--ctx-size 16384
--predict 8192
--temp 0.7
--min-p 0.00
--top-p 0.8
--top-k 20
--repeat-penalty 1.0
--no-warmup
--port ${PORT}
"Qwen2.5-VL-32B-Instruct-GGUF-Q2_K_L":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/Qwen2.5-VL-32B-Instruct-GGUF:Q2_K_L
-ngl 99 -c 16384 --predict 8192
--temp 0.7
--min-p 0.00
--top-p 0.8
--top-k 20
--repeat-penalty 1.0
--no-warmup
--port ${PORT}
-hf unsloth/Qwen2.5-VL-32B-Instruct-GGUF:Q2_K_L
--n-gpu-layers 99
--ctx-size 16384
--predict 8192
--temp 0.7
--min-p 0.00
--top-p 0.8
--top-k 20
--repeat-penalty 1.0
--no-warmup
--port ${PORT}
"Qwen2.5-VL-7B-Instruct-GGUF":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/Qwen2.5-VL-7B-Instruct-GGUF:Q4_K_M
-ngl 37 -c 16384 --predict 8192
--temp 0.7
--min-p 0.00
--top-p 0.8
--top-k 20
--repeat-penalty 1.0
--no-warmup
--port ${PORT}
-hf unsloth/Qwen2.5-VL-7B-Instruct-GGUF:Q4_K_M
--n-gpu-layers 37
--ctx-size 16384
--predict 8192
--temp 0.7
--min-p 0.00
--top-p 0.8
--top-k 20
--repeat-penalty 1.0
--no-warmup
--port ${PORT}
"Qwen3-VL-2B-Instruct-GGUF":
ttl: 600
cmd: |
/app/llama-server
-hf Qwen/Qwen3-VL-2B-Instruct-GGUF:Q8_0
--n-gpu-layers 99
--ctx-size 12288
--predict 4096
--flash-attn auto
--jinja
--temp 0.7
--top-p 0.85
--top-k 20
--min-p 0.05
--repeat-penalty 1.15
--frequency-penalty 0.5
--presence-penalty 0.4
--no-warmup
--port ${PORT}
"Qwen3-VL-4B-Instruct-GGUF":
ttl: 600
cmd: |
/app/llama-server
-hf Qwen/Qwen3-VL-4B-Instruct-GGUF:Q8_0
--n-gpu-layers 99
--ctx-size 12288
--predict 4096
--flash-attn auto
--jinja
--temp 0.7
--top-p 0.85
--top-k 20
--min-p 0.05
--repeat-penalty 1.15
--frequency-penalty 0.5
--presence-penalty 0.4
--no-warmup
--port ${PORT}
"Qwen3-VL-8B-Instruct-GGUF":
ttl: 600
cmd: |
/app/llama-server
-hf Qwen/Qwen3-VL-8B-Instruct-GGUF:Q4_K_M
--n-gpu-layers 99
--ctx-size 12288
--predict 4096
--flash-attn auto
--jinja
--temp 0.7
--top-p 0.85
--top-k 20
--min-p 0.05
--repeat-penalty 1.15
--frequency-penalty 0.5
--presence-penalty 0.4
--no-warmup
--port ${PORT}
"Qwen3-VL-2B-Instruct-GGUF-unslothish":
ttl: 600
cmd: |
/app/llama-server
-hf Qwen/Qwen3-VL-2B-Instruct-GGUF:Q8_0
--n-gpu-layers 99
--ctx-size 12288
--predict 4096
--flash-attn auto
--jinja
--temp 0.7
--top-p 0.8
--top-k 20
--min-p 0.05
--repeat-penalty 1.15
--frequency-penalty 0.5
--presence-penalty 0.6
--no-warmup
--port ${PORT}
"Qwen3-VL-4B-Instruct-GGUF-unslothish":
ttl: 600
cmd: |
/app/llama-server
-hf Qwen/Qwen3-VL-4B-Instruct-GGUF:Q8_0
--n-gpu-layers 99
--ctx-size 12288
--predict 4096
--flash-attn auto
--jinja
--temp 0.7
--top-p 0.8
--top-k 20
--min-p 0.05
--repeat-penalty 1.15
--frequency-penalty 0.5
--presence-penalty 0.6
--no-warmup
--port ${PORT}
"Qwen3-VL-8B-Instruct-GGUF-unslothish":
ttl: 600
cmd: |
/app/llama-server
-hf Qwen/Qwen3-VL-8B-Instruct-GGUF:Q4_K_M
--n-gpu-layers 99
--ctx-size 12288
--predict 4096
--flash-attn auto
--jinja
--temp 0.7
--top-p 0.8
--top-k 20
--min-p 0.05
--repeat-penalty 1.15
--frequency-penalty 0.5
--presence-penalty 0.6
--no-warmup
--port ${PORT}
"Qwen3-VL-2B-Thinking-GGUF":
ttl: 600
cmd: |
/app/llama-server
-hf Qwen/Qwen3-VL-2B-Thinking-GGUF:Q8_0
--n-gpu-layers 99
--ctx-size 12288
--predict 4096
--flash-attn auto
--jinja
--top-p 0.95
--top-k 20
--temp 1.0
--min-p 0.0
--repeat-penalty 1.0
--presence-penalty 0.0
--no-warmup
--port ${PORT}
"Qwen3-VL-4B-Thinking-GGUF":
ttl: 600
cmd: |
/app/llama-server
-hf Qwen/Qwen3-VL-4B-Thinking-GGUF:Q4_K_M
--n-gpu-layers 99
--ctx-size 12288
--predict 4096
--flash-attn auto
--jinja
--top-p 0.95
--top-k 20
--temp 1.0
--min-p 0.0
--repeat-penalty 1.0
--presence-penalty 0.0
--no-warmup
--port ${PORT}
"Qwen3-VL-8B-Thinking-GGUF":
ttl: 600
cmd: |
/app/llama-server
-hf Qwen/Qwen3-VL-8B-Thinking-GGUF:Q4_K_M
--n-gpu-layers 99
--ctx-size 12288
--predict 4096
--flash-attn auto
--jinja
--top-p 0.95
--top-k 20
--temp 1.0
--min-p 0.0
--repeat-penalty 1.0
--presence-penalty 0.0
--no-warmup
--port ${PORT}
"Huihui-Qwen3-VL-8B-Instruct-abliterated-GGUF":
ttl: 600
cmd: |
/app/llama-server
-hf noctrex/Huihui-Qwen3-VL-8B-Instruct-abliterated-GGUF:Q6_K
--n-gpu-layers 99
--ctx-size 12288
--predict 4096
--flash-attn auto
--jinja
--temp 0.7
--top-p 0.85
--top-k 20
--min-p 0.05
--repeat-penalty 1.15
--frequency-penalty 0.5
--presence-penalty 0.4
--no-warmup
--port ${PORT}
"Huihui-Qwen3-VL-8B-Thinking-abliterated-GGUF":
ttl: 600
cmd: |
/app/llama-server
-hf noctrex/Huihui-Qwen3-VL-8B-Thinking-abliterated-GGUF:Q6_K
--n-gpu-layers 99
--ctx-size 12288
--predict 4096
--flash-attn auto
--jinja
--temp 0.7
--top-p 0.85
--top-k 20
--min-p 0.05
--repeat-penalty 1.15
--frequency-penalty 0.5
--presence-penalty 0.4
--no-warmup
--port ${PORT}
"Qwen3-Coder-Next-GGUF:Q4_K_M":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/Qwen3-Coder-Next-GGUF:Q4_K_M
--ctx-size 32768
--predict 8192
--temp 1.0
--min-p 0.01
--top-p 0.95
--top-k 40
--repeat-penalty 1.0
--no-warmup
--port ${PORT}

View File

@@ -16,7 +16,8 @@ spec:
spec:
containers:
- name: llama-swap
image: ghcr.io/mostlygeek/llama-swap:v166-vulkan-b6795
# TODO: make renovate update the image tag
image: ghcr.io/mostlygeek/llama-swap:v195-vulkan-b8148
imagePullPolicy: IfNotPresent
command:
- /app/llama-swap
@@ -29,7 +30,7 @@ spec:
protocol: TCP
volumeMounts:
- name: models
mountPath: /root/.cache
mountPath: /app/.cache
- mountPath: /dev/kfd
name: kfd
- mountPath: /dev/dri
@@ -41,7 +42,7 @@ spec:
volumes:
- name: models
persistentVolumeClaim:
claimName: llama-models
claimName: llama-models-lvmssd
- name: kfd
hostPath:
path: /dev/kfd

View File

@@ -5,7 +5,7 @@ resources:
- secret.yaml
- auth-proxy.yaml
- ingress.yaml
- pvc.yaml
- pvc-ssd.yaml
- deployment.yaml
configMapGenerator:
- name: llama-swap

46
apps/llama/pvc-ssd.yaml Normal file
View File

@@ -0,0 +1,46 @@
---
apiVersion: local.openebs.io/v1alpha1
kind: LVMVolume
metadata:
labels:
kubernetes.io/nodename: anapistula-delrosalae
name: llama-models-lvmssd
namespace: openebs
spec:
capacity: 200Gi
ownerNodeID: anapistula-delrosalae
shared: "yes"
thinProvision: "no"
vgPattern: ^openebs-ssd$
volGroup: openebs-ssd
---
kind: PersistentVolume
apiVersion: v1
metadata:
name: llama-models-lvmssd
spec:
capacity:
storage: 200Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: ssd-lvmpv
volumeMode: Filesystem
csi:
driver: local.csi.openebs.io
fsType: btrfs
volumeHandle: llama-models-lvmssd
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: llama-models-lvmssd
namespace: llama
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 200Gi
storageClassName: ssd-lvmpv
volumeName: llama-models-lvmssd

View File

@@ -1,13 +0,0 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
namespace: llama
name: llama-models
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 200Gi
storageClassName: mayastor-single-ssd

View File

@@ -1,14 +1,28 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: nas-sshd-config
name: nas-sftp-config
namespace: nas
data:
00-chroot.conf: |
Subsystem sftp internal-sftp
Match User nas
ChrootDirectory /config
ForceCommand internal-sftp -d /data
AllowTcpForwarding no
X11Forwarding no
PermitTunnel no
sftp.json: |
{
"Global": {
"Chroot": {
"Directory": "%h",
"StartPath": "data"
},
"Directories": [
"data"
]
},
"Users": [
{
"Username": "nas",
"UID": 1000,
"GID": 1000,
"PublicKeys": [
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCresbDFZijI+rZMgd3LdciPjpb4x4S5B7y0U+EoYPaz6hILT72fyz3QdcgKJJv8JUJI6g0811/yFRuOzCXgWaA922c/S/t6HMUrorh7mPVQMTN2dc/SVBvMa7S2M9NYBj6z1X2LRHs+g1JTMCtL202PIjes/E9qu0as0Vx6n/6HHNmtmA9LrpiAmurbeKXDmrYe2yWg/FA6cX5d86SJb21Dj8WqdCd3Hz0Pi6FzMKXhpWvs5Hfei1htsjsRzCxkpSTjlgFEFVfmHIXPfB06Sa6aCnkxAFnE7N+xNa9RIWeZmOXdA74LsfSKQ9eAXSrsC/IRxo2ce8cBzXJy+Itxw24fUqGYXBiCgx8i3ZA9IdwI1u71xYo9lyNjav5VykzKnAHRAYnDm9UsCf8k04reBevcLdtxL11vPCtind3xn76Nhy2b45dcp/MdYFANGsCcXJOMb6Aisb03HPGhs/aU3tCAQbTVe195mL9FWhGqIK2wBmF1SKW+4ssX2bIU6YaCYc= cardno:23_671_999"
]
}
]
}

View File

@@ -1,84 +1,58 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: nas-ssh
name: nas-sftp
namespace: nas
spec:
replicas: 1
selector:
matchLabels:
app: nas-ssh
app: nas-sftp
template:
metadata:
labels:
app: nas-ssh
app: nas-sftp
spec:
securityContext:
fsGroup: 1000
initContainers:
- name: prepare-config
image: alpine:3.20.3
- name: prepare-home
image: alpine:3.23.3
imagePullPolicy: IfNotPresent
command:
- /bin/sh
- -c
- |
set -euo pipefail
chown root:root /config
chmod 755 /config
mkdir -p /config/data
chown 1000:1000 /config/data
chmod 750 /config/data
mkdir -p /config/ssh_host_keys
chown root:root /config/ssh_host_keys
chmod 700 /config/ssh_host_keys
for key in /config/ssh_host_keys/*; do
[ -f "$key" ] || continue
chown root:root "$key"
chmod 600 "$key"
done
mkdir -p /config/sshd/sshd_config.d
cp /defaults/00-chroot.conf /config/sshd/sshd_config.d/00-chroot.conf
chown root:root /config/sshd/sshd_config.d/00-chroot.conf
chmod 644 /config/sshd/sshd_config.d/00-chroot.conf
mkdir -p /volume/sftp-root
chown root:root /volume/sftp-root
chmod 755 /volume/sftp-root
mkdir -p /volume/sftp-root/data
chown 1000:1000 /volume/sftp-root/data
chmod 750 /volume/sftp-root/data
mkdir -p /volume/host-keys
chown root:root /volume/host-keys
chmod 700 /volume/host-keys
volumeMounts:
- name: data
mountPath: /config
- name: sshd-config
mountPath: /defaults/00-chroot.conf
subPath: 00-chroot.conf
readOnly: true
- name: home
mountPath: /volume
containers:
- name: ssh
image: lscr.io/linuxserver/openssh-server:version-10.0_p1-r9
- name: sftp
image: docker.io/emberstack/sftp:build-5.1.72
imagePullPolicy: IfNotPresent
env:
- name: PUID
value: "1000"
- name: PGID
value: "1000"
- name: TZ
value: Etc/UTC
- name: USER_NAME
value: nas
- name: SUDO_ACCESS
value: "false"
- name: PASSWORD_ACCESS
value: "false"
- name: LOG_STDOUT
value: "true"
- name: PUBLIC_KEY
valueFrom:
secretKeyRef:
name: nas-ssh-authorized-keys
key: public_key
ports:
- containerPort: 2222
name: ssh
- containerPort: 22
name: sftp
protocol: TCP
volumeMounts:
- name: data
mountPath: /config
- name: config
mountPath: /app/config/sftp.json
subPath: sftp.json
readOnly: true
- name: home
mountPath: /home/nas
subPath: sftp-root
- name: home
mountPath: /etc/ssh/keys
subPath: host-keys
resources:
requests:
cpu: 50m
@@ -86,9 +60,9 @@ spec:
limits:
memory: 512Mi
volumes:
- name: data
- name: home
persistentVolumeClaim:
claimName: nas-data
- name: sshd-config
claimName: nas-data-lvm-hdd
- name: config
configMap:
name: nas-sshd-config
name: nas-sftp-config

View File

@@ -2,7 +2,6 @@ apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- secret.yaml
- configmap.yaml
- pvc.yaml
- deployment.yaml

View File

@@ -1,12 +1,49 @@
apiVersion: v1
kind: PersistentVolumeClaim
apiVersion: local.openebs.io/v1alpha1
kind: LVMVolume
metadata:
name: nas-data
labels:
kubernetes.io/nodename: anapistula-delrosalae
name: nas-data-lvm-hdd
namespace: openebs
spec:
capacity: 4Gi
ownerNodeID: anapistula-delrosalae
shared: "yes"
thinProvision: "no"
vgPattern: ^openebs-hdd$
volGroup: openebs-hdd
---
kind: PersistentVolume
apiVersion: v1
metadata:
name: nas-data-lvm-hdd
spec:
capacity:
storage: 4Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Delete
storageClassName: openebs-lvmpv
volumeMode: Filesystem
csi:
driver: local.csi.openebs.io
volumeHandle: nas-data-lvm-hdd
claimRef:
apiVersion: v1
kind: PersistentVolumeClaim
namespace: nas
name: nas-data-lvm-hdd
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: nas-data-lvm-hdd
namespace: nas
spec:
storageClassName: openebs-lvmpv
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 500Gi
storageClassName: mayastor-single-hdd
requests:
storage: 4Gi
volumeName: nas-data-lvm-hdd

View File

@@ -1,9 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: nas-ssh-authorized-keys
namespace: nas
type: Opaque
stringData:
public_key: |
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCresbDFZijI+rZMgd3LdciPjpb4x4S5B7y0U+EoYPaz6hILT72fyz3QdcgKJJv8JUJI6g0811/yFRuOzCXgWaA922c/S/t6HMUrorh7mPVQMTN2dc/SVBvMa7S2M9NYBj6z1X2LRHs+g1JTMCtL202PIjes/E9qu0as0Vx6n/6HHNmtmA9LrpiAmurbeKXDmrYe2yWg/FA6cX5d86SJb21Dj8WqdCd3Hz0Pi6FzMKXhpWvs5Hfei1htsjsRzCxkpSTjlgFEFVfmHIXPfB06Sa6aCnkxAFnE7N+xNa9RIWeZmOXdA74LsfSKQ9eAXSrsC/IRxo2ce8cBzXJy+Itxw24fUqGYXBiCgx8i3ZA9IdwI1u71xYo9lyNjav5VykzKnAHRAYnDm9UsCf8k04reBevcLdtxL11vPCtind3xn76Nhy2b45dcp/MdYFANGsCcXJOMb6Aisb03HPGhs/aU3tCAQbTVe195mL9FWhGqIK2wBmF1SKW+4ssX2bIU6YaCYc= cardno:23_671_999

View File

@@ -1,15 +1,15 @@
apiVersion: v1
kind: Service
metadata:
name: nas-ssh
name: nas-sftp
namespace: nas
spec:
type: LoadBalancer
externalTrafficPolicy: Cluster
ports:
- name: ssh
- name: sftp
port: 22
targetPort: 2222
targetPort: 22
protocol: TCP
selector:
app: nas-ssh
app: nas-sftp

View File

@@ -0,0 +1,44 @@
---
apiVersion: v1
kind: Service
metadata:
namespace: openwebui
name: openwebui-web
spec:
type: ClusterIP
selector:
app.kubernetes.io/component: open-webui
app.kubernetes.io/instance: openwebui
ports:
- name: http
port: 80
targetPort: 8080
protocol: TCP
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
namespace: openwebui
name: openwebui
annotations:
cert-manager.io/cluster-issuer: letsencrypt
nginx.ingress.kubernetes.io/proxy-body-size: "0"
nginx.ingress.kubernetes.io/proxy-buffering: "false"
nginx.ingress.kubernetes.io/proxy-read-timeout: 30m
spec:
ingressClassName: nginx-ingress
rules:
- host: openwebui.lumpiasty.xyz
http:
paths:
- backend:
service:
name: openwebui-web
port:
number: 80
path: /
pathType: Prefix
tls:
- hosts:
- openwebui.lumpiasty.xyz
secretName: openwebui-ingress

View File

@@ -1,8 +1,8 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- volume.yaml
- deployment.yaml
- pvc.yaml
- pvc-pipelines.yaml
- release.yaml
- ingress.yaml

View File

@@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: openwebui

View File

@@ -0,0 +1,46 @@
---
apiVersion: local.openebs.io/v1alpha1
kind: LVMVolume
metadata:
labels:
kubernetes.io/nodename: anapistula-delrosalae
name: openwebui-pipelines-lvmhdd
namespace: openebs
spec:
capacity: 1Gi
ownerNodeID: anapistula-delrosalae
shared: "yes"
thinProvision: "no"
vgPattern: ^openebs-hdd$
volGroup: openebs-hdd
---
kind: PersistentVolume
apiVersion: v1
metadata:
name: openwebui-pipelines-lvmhdd
spec:
capacity:
storage: 1Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: hdd-lvmpv
volumeMode: Filesystem
csi:
driver: local.csi.openebs.io
fsType: btrfs
volumeHandle: openwebui-pipelines-lvmhdd
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: openwebui-pipelines-lvmhdd
namespace: openwebui
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
storageClassName: hdd-lvmpv
volumeName: openwebui-pipelines-lvmhdd

46
apps/openwebui/pvc.yaml Normal file
View File

@@ -0,0 +1,46 @@
---
apiVersion: local.openebs.io/v1alpha1
kind: LVMVolume
metadata:
labels:
kubernetes.io/nodename: anapistula-delrosalae
name: openwebui-lvmhdd
namespace: openebs
spec:
capacity: 10Gi
ownerNodeID: anapistula-delrosalae
shared: "yes"
thinProvision: "no"
vgPattern: ^openebs-hdd$
volGroup: openebs-hdd
---
kind: PersistentVolume
apiVersion: v1
metadata:
name: openwebui-lvmhdd
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: hdd-lvmpv
volumeMode: Filesystem
csi:
driver: local.csi.openebs.io
fsType: btrfs
volumeHandle: openwebui-lvmhdd
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: openwebui-lvmhdd
namespace: openwebui
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
storageClassName: hdd-lvmpv
volumeName: openwebui-lvmhdd

View File

@@ -0,0 +1,46 @@
---
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository
metadata:
name: open-webui
namespace: openwebui
spec:
interval: 24h
url: https://open-webui.github.io/helm-charts
---
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: openwebui
namespace: openwebui
spec:
interval: 30m
chart:
spec:
chart: open-webui
version: 12.5.0
sourceRef:
kind: HelmRepository
name: open-webui
values:
# Disable built in ingress, service is broken in chart
# They have hard coded wrong target port
# Reimplementing that in ingress.yaml
ingress:
enabled: false
persistence:
enabled: true
existingClaim: openwebui-lvmhdd
enableOpenaiApi: true
openaiBaseApiUrl: "http://llama.llama.svc.cluster.local:11434/v1"
ollama:
enabled: false
pipelines:
enabled: true
persistence:
enabled: true
existingClaim: openwebui-pipelines-lvmhdd

View File

@@ -1,40 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: registry
namespace: registry
spec:
replicas: 1
selector:
matchLabels:
app: registry
template:
metadata:
labels:
app: registry
spec:
containers:
- name: registry
image: registry:3.0.0
ports:
- containerPort: 5000
volumeMounts:
- name: data
mountPath: /var/lib/registry
volumes:
- name: data
persistentVolumeClaim:
claimName: registry-data
---
apiVersion: v1
kind: Service
metadata:
name: registry-service
namespace: registry
spec:
selector:
app: registry
ports:
- protocol: TCP
port: 80
targetPort: 5000

View File

@@ -1,26 +0,0 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
namespace: registry
name: registry
annotations:
cert-manager.io/cluster-issuer: letsencrypt
nginx.ingress.kubernetes.io/proxy-body-size: "0"
spec:
ingressClassName: nginx-ingress
rules:
- host: registry.lumpiasty.xyz
http:
paths:
- backend:
service:
name: registry-service
port:
number: 80
path: /
pathType: Prefix
tls:
- hosts:
- registry.lumpiasty.xyz
secretName: researcher-ingress

View File

@@ -1,13 +0,0 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: registry-data
namespace: registry
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 50Gi
storageClassName: mayastor-single-hdd

View File

@@ -15,7 +15,7 @@ spec:
- name: renovate
# Update this to the latest available and then enable Renovate on
# the manifest
image: renovate/renovate:41.169.1-full
image: renovate/renovate:43.39.2-full
envFrom:
- secretRef:
name: renovate-gitea-token

View File

@@ -39,4 +39,4 @@ spec:
name: searxng-config
- name: searxng-persistent-data
persistentVolumeClaim:
claimName: searxng-persistent-data
claimName: searxng-persistent-data-lvmhdd

View File

@@ -1,13 +1,46 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
apiVersion: local.openebs.io/v1alpha1
kind: LVMVolume
metadata:
labels:
kubernetes.io/nodename: anapistula-delrosalae
name: searxng-persistent-data-lvmhdd
namespace: openebs
spec:
capacity: 1Gi
ownerNodeID: anapistula-delrosalae
shared: "yes"
thinProvision: "no"
vgPattern: ^openebs-hdd$
volGroup: openebs-hdd
---
kind: PersistentVolume
apiVersion: v1
metadata:
name: searxng-persistent-data-lvmhdd
spec:
capacity:
storage: 1Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: hdd-lvmpv
volumeMode: Filesystem
csi:
driver: local.csi.openebs.io
fsType: btrfs
volumeHandle: searxng-persistent-data-lvmhdd
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: searxng-persistent-data-lvmhdd
namespace: searxng
name: searxng-persistent-data
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
storageClassName: mayastor-single-ssd
storageClassName: hdd-lvmpv
volumeName: searxng-persistent-data-lvmhdd

View File

@@ -1,17 +1,34 @@
{
"nodes": {
"flake-compat": {
"devenv": {
"locked": {
"lastModified": 1733328505,
"narHash": "sha256-NeCCThCEP3eCl2l/+27kNNK7QrwZB1IJCrXfrbv5oqU=",
"rev": "ff81ac966bb2cae68946d5ed5fc4994f96d0ffec",
"revCount": 69,
"type": "tarball",
"url": "https://api.flakehub.com/f/pinned/edolstra/flake-compat/1.1.0/01948eb7-9cba-704f-bbf3-3fa956735b52/source.tar.gz"
"dir": "src/modules",
"lastModified": 1769881431,
"owner": "cachix",
"repo": "devenv",
"rev": "72d5e66e2dd5112766ef4c9565872b51094b542d",
"type": "github"
},
"original": {
"type": "tarball",
"url": "https://flakehub.com/f/edolstra/flake-compat/1.1.0.tar.gz"
"dir": "src/modules",
"owner": "cachix",
"repo": "devenv",
"type": "github"
}
},
"flake-compat": {
"flake": false,
"locked": {
"lastModified": 1767039857,
"owner": "NixOS",
"repo": "flake-compat",
"rev": "5edf11c44bc78a0d334f6334cdaf7d60d732daab",
"type": "github"
},
"original": {
"owner": "NixOS",
"repo": "flake-compat",
"type": "github"
}
},
"flake-utils": {
@@ -20,7 +37,6 @@
},
"locked": {
"lastModified": 1731533236,
"narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "11707dc2f618dd54ca8739b309ec4fc024de578b",
@@ -32,6 +48,47 @@
"type": "github"
}
},
"git-hooks": {
"inputs": {
"flake-compat": "flake-compat",
"gitignore": "gitignore",
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1769069492,
"owner": "cachix",
"repo": "git-hooks.nix",
"rev": "a1ef738813b15cf8ec759bdff5761b027e3e1d23",
"type": "github"
},
"original": {
"owner": "cachix",
"repo": "git-hooks.nix",
"type": "github"
}
},
"gitignore": {
"inputs": {
"nixpkgs": [
"git-hooks",
"nixpkgs"
]
},
"locked": {
"lastModified": 1762808025,
"owner": "hercules-ci",
"repo": "gitignore.nix",
"rev": "cb5e3fdca1de58ccbc3ef53de65bd372b48f567c",
"type": "github"
},
"original": {
"owner": "hercules-ci",
"repo": "gitignore.nix",
"type": "github"
}
},
"krew2nix": {
"inputs": {
"flake-utils": "flake-utils",
@@ -42,11 +99,10 @@
"treefmt-nix": "treefmt-nix"
},
"locked": {
"lastModified": 1751765453,
"narHash": "sha256-tgo3BwFM2UUYQz6dVARztbj5AjKfz4exlPxnKLS/ZRg=",
"lastModified": 1769904483,
"owner": "a1994sc",
"repo": "krew2nix",
"rev": "11f66e65a0146645388eeab68b6212de0b732ed9",
"rev": "17d6ad3375899bd3f7d4d298481536155f3ec13c",
"type": "github"
},
"original": {
@@ -57,11 +113,10 @@
},
"nixpkgs": {
"locked": {
"lastModified": 1751637120,
"narHash": "sha256-xVNy/XopSfIG9c46nRmPaKfH1Gn/56vQ8++xWA8itO4=",
"lastModified": 1769461804,
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "5c724ed1388e53cc231ed98330a60eb2f7be4be3",
"rev": "bfc1b8a4574108ceef22f02bafcf6611380c100d",
"type": "github"
},
"original": {
@@ -73,15 +128,18 @@
},
"root": {
"inputs": {
"flake-compat": "flake-compat",
"devenv": "devenv",
"git-hooks": "git-hooks",
"krew2nix": "krew2nix",
"nixpkgs": "nixpkgs"
"nixpkgs": "nixpkgs",
"pre-commit-hooks": [
"git-hooks"
]
}
},
"systems": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
@@ -96,7 +154,6 @@
"systems_2": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
@@ -116,11 +173,10 @@
]
},
"locked": {
"lastModified": 1750931469,
"narHash": "sha256-0IEdQB1nS+uViQw4k3VGUXntjkDp7aAlqcxdewb/hAc=",
"lastModified": 1769691507,
"owner": "numtide",
"repo": "treefmt-nix",
"rev": "ac8e6f32e11e9c7f153823abc3ab007f2a65d3e1",
"rev": "28b19c5844cc6e2257801d43f2772a4b4c050a1b",
"type": "github"
},
"original": {

61
devenv.nix Normal file
View File

@@ -0,0 +1,61 @@
{ pkgs, lib, config, inputs, ... }:
let
# Python with hvac package
python = pkgs.python313.withPackages (python-pkgs: with python-pkgs; [
hvac
]);
in
{
# Overlays - apply krew2nix to get kubectl with krew support
overlays = [
inputs.krew2nix.overlay
];
# Environment variables
env = {
GREET = "devenv";
TALOSCONFIG = "${config.devenv.root}/talos/generated/talosconfig";
EDITOR = "vim";
RESTIC_REPOSITORY = "s3:https://s3.eu-central-003.backblazeb2.com/lumpiasty-backups";
VAULT_ADDR = "https://openbao.lumpiasty.xyz:8200";
PATH = "${config.devenv.root}/utils:${pkgs.coreutils}/bin";
PYTHON_BIN = "${python}/bin/python";
KUBECONFIG = "${config.devenv.root}/talos/generated/kubeconfig";
};
# Packages
packages = with pkgs; [
python
vim gnumake
talosctl cilium-cli
kubectx k9s kubernetes-helm
(kubectl.withKrewPlugins (plugins: with plugins; [
mayastor
openebs
browse-pvc
]))
ansible
fluxcd
restic
openbao
pv-migrate
];
# Scripts
scripts.hello.exec = ''
echo hello from $GREET
'';
# Shell hooks
enterShell = ''
source ${pkgs.bash-completion}/share/bash-completion/bash_completion
echo "Environment ready!"
'';
# Tests
enterTest = ''
echo "Running tests"
git --version | grep --color=auto "${pkgs.git.version}"
'';
}

20
devenv.yaml Normal file
View File

@@ -0,0 +1,20 @@
# yaml-language-server: $schema=https://devenv.sh/devenv.schema.json
inputs:
nixpkgs:
url: github:NixOS/nixpkgs/nixos-unstable
krew2nix:
url: github:a1994sc/krew2nix
inputs:
nixpkgs:
follows: nixpkgs
# If you're using non-OSS software, you can set allowUnfree to true.
# allowUnfree: true
# If you're willing to use a package that's vulnerable
# permittedInsecurePackages:
# - "openssl-1.1.1w"
# If you have more than one devenv you can merge them
#imports:
# - ./backend

View File

@@ -1,62 +0,0 @@
{
inputs = {
nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
# Only to ease updating flake.lock, flake-compat is used by shell.nix
flake-compat.url = https://flakehub.com/f/edolstra/flake-compat/1.1.0.tar.gz;
# Allows us to install krew plugins
krew2nix.url = "github:a1994sc/krew2nix";
krew2nix.inputs.nixpkgs.follows = "nixpkgs";
};
outputs = { self, nixpkgs, krew2nix, ... }: let
system = "x86_64-linux";
in {
devShells."${system}".default =
let
pkgs = import nixpkgs {
overlays = [ krew2nix.overlay ];
inherit system;
};
python = (pkgs.python313.withPackages (python-pkgs: with python-pkgs; [
hvac
]));
in
pkgs.mkShell {
packages = with pkgs; [
python
vim gnumake
talosctl cilium-cli
kubectx k9s kubernetes-helm
(kubectl.withKrewPlugins (plugins: with plugins; [
mayastor
openebs
]))
ansible
fluxcd
restic
openbao
];
shellHook = ''
# Get completions working
source ${pkgs.bash-completion}/share/bash-completion/bash_completion
export TALOSCONFIG=$(pwd)/talos/generated/talosconfig
export EDITOR=vim
export RESTIC_REPOSITORY=s3:https://s3.eu-central-003.backblazeb2.com/lumpiasty-backups
# export AWS_ACCESS_KEY_ID=?
# export AWS_SECRET_ACCESS_KEY=?
# export RESTIC_PASSWORD=?
export VAULT_ADDR=https://openbao.lumpiasty.xyz:8200
# Add scripts from utils subdir
export PATH="$PATH:$(pwd)/utils"
export PYTHON_BIN=${python}/bin/python
'';
};
};
}

View File

@@ -9,7 +9,7 @@ spec:
peers:
- name: barracuda
peerASN: 65000
peerAddress: 2001:470:61a3:100:ffff:ffff:ffff:ffff
peerAddress: 2001:470:61a3:100::1
peerConfigRef:
name: cilium-peer
---

View File

@@ -0,0 +1,12 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: hdd-lvmpv
parameters:
storage: "lvm"
volgroup: "openebs-hdd"
fsType: "btrfs"
shared: "yes"
provisioner: local.csi.openebs.io
allowVolumeExpansion: true
volumeBindingMode: Immediate

View File

@@ -0,0 +1,13 @@
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: ssd-lvmpv
parameters:
storage: "lvm"
volgroup: "openebs-ssd"
fsType: "btrfs"
shared: "yes"
provisioner: local.csi.openebs.io
allowVolumeExpansion: true
volumeBindingMode: Immediate

View File

@@ -1,8 +0,0 @@
kind: VolumeSnapshotClass
apiVersion: snapshot.storage.k8s.io/v1
metadata:
name: csi-mayastor-snapshotclass
annotations:
snapshot.storage.kubernetes.io/is-default-class: "true"
driver: io.openebs.csi-mayastor
deletionPolicy: Delete

View File

@@ -0,0 +1,46 @@
---
apiVersion: local.openebs.io/v1alpha1
kind: LVMVolume
metadata:
labels:
kubernetes.io/nodename: anapistula-delrosalae
name: openbao-volume-lvmhdd
namespace: openebs
spec:
capacity: 1Gi
ownerNodeID: anapistula-delrosalae
shared: "yes"
thinProvision: "no"
vgPattern: ^openebs-hdd$
volGroup: openebs-hdd
---
kind: PersistentVolume
apiVersion: v1
metadata:
name: openbao-volume-lvmhdd
spec:
capacity:
storage: 1Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: hdd-lvmpv
volumeMode: Filesystem
csi:
driver: local.csi.openebs.io
fsType: btrfs
volumeHandle: openbao-volume-lvmhdd
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: openbao-volume-lvmhdd
namespace: openbao
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
storageClassName: hdd-lvmpv
volumeName: openbao-volume-lvmhdd

View File

@@ -1,20 +0,0 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: mayastor-single-hdd
parameters:
protocol: nvmf
# Single replica
repl: "1"
# Thin provision volumes
thin: "true"
# Allow expansion of volumes
allowVolumeExpansion: "true"
# Generate new filesystem's uuid when cloning
cloneFsIdAsVolumeId: "true"
# Schedule this sconly on hdd
poolAffinityTopologyLabel: |
type: hdd
provisioner: io.openebs.csi-mayastor
# Allow expansion of volumes
allowVolumeExpansion: true

View File

@@ -1,18 +0,0 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: mayastor-single-ssd
parameters:
protocol: nvmf
# Single replica
repl: "1"
# Thin provision volumes
thin: "true"
# Generate new filesystem's uuid when cloning
cloneFsIdAsVolumeId: "true"
# Schedule this sconly on ssd
poolAffinityTopologyLabel: |
type: ssd
provisioner: io.openebs.csi-mayastor
# Allow expansion of volumes
allowVolumeExpansion: true

View File

@@ -18,14 +18,13 @@ spec:
chart:
spec:
chart: cert-manager-webhook-ovh
version: 0.8.0
version: 0.9.1
sourceRef:
kind: HelmRepository
name: cert-manager-webhook-ovh
namespace: cert-manager
interval: 12h
values:
configVersion: 0.0.2
groupName: lumpiasty-homelab
certManager:
namespace: cert-manager
@@ -49,3 +48,11 @@ spec:
applicationConsumerKeyRef:
name: ovh-credentials
key: consumerKey
# Workaround for chart's bug
# nil pointer evaluating interface {}.enabled
externalAccountBinding:
enabled: false
keyID: ""
keySecretRef:
name: ""
key: ""

View File

@@ -23,7 +23,7 @@ spec:
chart:
spec:
chart: cert-manager
version: v1.19.1
version: v1.19.3
sourceRef:
kind: HelmRepository
name: cert-manager

View File

@@ -23,7 +23,7 @@ spec:
chart:
spec:
chart: cilium
version: 1.18.2
version: 1.19.1
sourceRef:
kind: HelmRepository
name: cilium

View File

@@ -23,7 +23,7 @@ spec:
chart:
spec:
chart: cloudnative-pg
version: 0.26.0
version: 0.27.1
sourceRef:
kind: HelmRepository
name: cnpg

View File

@@ -97,7 +97,7 @@ spec:
env:
- name: GOMEMLIMIT
value: 161MiB
image: registry.k8s.io/coredns/coredns:v1.13.0
image: registry.k8s.io/coredns/coredns:v1.14.1
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 5

View File

@@ -23,7 +23,7 @@ spec:
chart:
spec:
chart: ingress-nginx
version: 4.13.3
version: 4.14.3
sourceRef:
kind: HelmRepository
name: ingress-nginx

View File

@@ -23,7 +23,7 @@ spec:
chart:
spec:
chart: openbao
version: 0.19.0
version: 0.25.5
sourceRef:
kind: HelmRepository
name: openbao
@@ -60,21 +60,27 @@ spec:
service_registration "kubernetes" {}
replicas: 1
# Mount TLS cert to container
# Disable chart's data storage setting and add data volume manually
dataStorage:
enabled: false
volumes:
# Mount TLS cert to container
- name: tls
secret:
secretName: openbao-lumpiasty-xyz
- name: data
persistentVolumeClaim:
claimName: openbao-volume-lvmhdd
volumeMounts:
- name: tls
mountPath: /tls
readOnly: true
- name: data
mountPath: /openbao/data
service:
enabled: true
type: LoadBalancer
ipFamilyPolicy: RequireDualStack
dataStorage:
storageClass: mayastor-single-hdd
csi:
enabled: true
injector:

View File

@@ -23,7 +23,7 @@ spec:
chart:
spec:
chart: openebs
version: 4.3.3
version: 4.4.0
sourceRef:
kind: HelmRepository
name: openebs
@@ -38,95 +38,20 @@ spec:
lvm-localpv:
crds:
lmvLocalPv:
enabled: false
mayastor:
csi:
node:
initContainers:
# We need to disable the init container that checks for the nvme_tcp module, since Talos has that module built-in.
# https://www.talos.dev/v1.9/kubernetes-guides/configuration/storage/#deploy-mayastor
enabled: false
resources:
requests:
cpu: 0
controller:
resources:
requests:
cpu: 0
etcd:
clusterDomain: homelab.lumpiasty.xyz
# Single node cluster for now
replicaCount: 1
io_engine:
# Workaround for crashing io-engine
# https://github.com/openebs/mayastor/issues/1763#issuecomment-2481922234
envcontext: "iova-mode=pa"
coreList: [1, 7]
resources:
limits:
cpu: 4
agents:
core:
resources:
requests:
cpu: 0
ha:
node:
resources:
requests:
cpu: 0
cluster:
resources:
requests:
cpu: 0
apis:
rest:
resources:
requests:
cpu: 0
obs:
callhome:
resources:
requests:
cpu: 0
stats:
resources:
requests:
cpu: 0
operators:
pool:
resources:
requests:
cpu: 0
# Remove antiaffinity, breaks when I set it to 1 replica
nats:
cluster:
enable: true
replicas: 3
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution: []
enabled: true
loki:
loki:
commonConfig:
replication_factor: 1
singleBinary:
replicas: 1
minio:
replicas: 1
mode: standalone
enabled: false
alloy:
enabled: false
engines:
local:
lvm:
enabled: false
enabled: true
zfs:
enabled: false
replicated:
mayastor:
enabled: true
enabled: false

View File

@@ -23,7 +23,7 @@ spec:
chart:
spec:
chart: vault-secrets-operator
version: 1.0.1
version: 1.3.0
sourceRef:
kind: HelmRepository
name: hashicorp

View File

@@ -9,7 +9,6 @@ resources:
- controllers/cert-manager-webhook-ovh.yaml
- controllers/openebs.yaml
- controllers/k8up.yaml
- controllers/openbao.yaml
- controllers/external-secrets.yaml
- controllers/vault-secrets-operator.yaml
- controllers/mongodb-operator.yaml
@@ -18,8 +17,10 @@ resources:
- diskpools/anapistula-delrosalae-ssd.yaml
- configs/bgp-cluster-config.yaml
- configs/loadbalancer-ippool.yaml
- configs/single-hdd-sc.yaml
- configs/single-ssd-sc.yaml
- configs/mayastor-snapshotclass.yaml
- configs/lvmpv-hdd-sc.yaml
- configs/lvmpv-ssd-sc.yaml
- configs/openbao-cert.yaml
- configs/ovh-cert-manager-secret.yaml
- configs/openbao-volume.yaml
- controllers/openbao.yaml

View File

@@ -1,15 +0,0 @@
# Needed for Nix Environment Selector
# https://github.com/edolstra/flake-compat/
(import
(
let
lock = builtins.fromJSON (builtins.readFile ./flake.lock);
nodeName = lock.nodes.root.inputs.flake-compat;
in
fetchTarball {
url = lock.nodes.${nodeName}.locked.url;
sha256 = lock.nodes.${nodeName}.locked.narHash;
}
)
{ src = ./.; }
).shellNix

View File

@@ -7,7 +7,7 @@ machine:
- 192.168.1.35/24
routes:
- network: ::/0
gateway: 2001:470:61a3:100:ffff:ffff:ffff:ffff
gateway: 2001:470:61a3:100::1
- network: 0.0.0.0/0
gateway: 192.168.1.1
mtu: 1280
@@ -16,14 +16,5 @@ machine:
diskSelector:
wwid: t10.ATA SSDPR-CX400-256 GUH039914
# Generated on https://factory.talos.dev/
# amd-ucode and amdgpu
image: factory.talos.dev/metal-installer/9c1d1b442d73f96dcd04e81463eb20000ab014062d22e1b083e1773336bc1dd5:v1.10.6
extraKernelArgs:
- cpufreq.default_governor=performance
sysfs:
devices.system.cpu.cpu0.cpufreq.scaling_max_freq: "550000"
devices.system.cpu.cpu1.cpufreq.scaling_max_freq: "550000"
devices.system.cpu.cpu2.cpufreq.scaling_max_freq: "550000"
devices.system.cpu.cpu6.cpufreq.scaling_max_freq: "550000"
devices.system.cpu.cpu7.cpufreq.scaling_max_freq: "550000"
devices.system.cpu.cpu8.cpufreq.scaling_max_freq: "550000"
# amd-ucode, amdgpu and btrfs
image: factory.talos.dev/metal-installer/80c3a00af9a5930d1788532c6cc9e8a9b23f8e553d1bb2933b2221f92703d655:v1.12.4

View File

@@ -4,7 +4,7 @@ machine:
network:
nameservers:
- 2001:470:61a3:100:ffff:ffff:ffff:ffff
- 2001:470:61a3:100::1
- 192.168.1.1
searchDomains:

View File

@@ -1,22 +1,5 @@
# Mayastor requirements
# OpenEBS requirements
# https://www.talos.dev/v1.9/kubernetes-guides/configuration/storage/#openebs-mayastor-replicated-storage
machine:
sysctls:
vm.nr_hugepages: "2048"
nodeLabels:
openebs.io/engine: "mayastor"
kubelet:
extraMounts:
- destination: /var/local
type: bind
source: /var/local
options:
- bind
- rshared
- rw
install:
extraKernelArgs:
- isolcpus=1,7
cluster:
apiServer:

146
utils/kubectl-run-with-pvc.sh Executable file
View File

@@ -0,0 +1,146 @@
#!/usr/bin/env bash
# A utility script to run a kubectl pod with one or more PVCs mounted.
# Original: https://gist.github.com/yuanying/3aa7d59dcce65470804ab43def646ab6
# Modified to add help message, -n and -x options, and other improvements.
IMAGE="gcr.io/google-containers/ubuntu-slim:0.14"
COMMAND="/bin/bash"
NAMESPACE=""
CONTEXT=""
SUFFIX=$(date +%s | shasum | base64 | fold -w 10 | head -1 | tr '[:upper:]' '[:lower:]')
usage_exit() {
cat <<EOF
kubectl-run-with-pvc - Run a temporary pod with PersistentVolumeClaims mounted
USAGE:
$0 [-c command] [-i image] [-n namespace] [-x context] [-h] PVC [PVC ...]
DESCRIPTION:
Creates an ephemeral Kubernetes pod that mounts one or more PersistentVolumeClaims (PVCs).
Each PVC is mounted at /pvcs/<claimName>. The pod is automatically removed when you exit.
Useful for inspecting, debugging, or manipulating data in PVCs without having to deploy
a persistent pod or job.
OPTIONS:
-i IMAGE
Container image to use in the pod.
Default: gcr.io/google-containers/ubuntu-slim:0.14
-c COMMAND
Command to execute in the container.
Default: /bin/bash
-n NAMESPACE
Kubernetes namespace where the pod will be created.
Default: current namespace (from kubectl config)
-x CONTEXT
kubectl context to use for this operation.
Default: current context (from kubectl config)
-h
Display this help message and exit.
EXAMPLES:
# Mount a single PVC and get an interactive shell
$0 my-pvc
# Mount multiple PVCs
$0 data-pvc logs-pvc config-pvc
# Use a specific namespace
$0 -n my-namespace my-pvc
# Use a different context and namespace
$0 -x prod-cluster -n production my-pvc
# Use Alpine Linux instead of Ubuntu
$0 -i alpine:latest -c sh my-pvc
# Run a command non-interactively
$0 -c "ls -lh /pvcs/my-pvc" my-pvc
MOUNT PATHS:
Each PVC is mounted to: /pvcs/<claimName>
Example: If you mount 'database-pvc', it will be at /pvcs/database-pvc
NOTES:
- Pod name is auto-generated: pvc-mounter-<random-suffix>
- Pod is removed when you exit (--rm flag)
- Uses hostNetwork: true for networking access
- Requires kubectl configured and permissions to create pods
PREREQUISITES:
- kubectl installed and configured
- PVCs must already exist in the target namespace
- User must have permission to create pods in the target namespace
EOF
exit 1
}
while getopts i:c:n:x:h OPT
do
case $OPT in
i) IMAGE=$OPTARG
;;
c) COMMAND=$OPTARG
;;
n) NAMESPACE=$OPTARG
;;
x) CONTEXT=$OPTARG
;;
h) usage_exit
;;
\?) usage_exit
;;
esac
done
shift $(($OPTIND - 1))
# Require at least one PVC
if [ $# -eq 0 ]; then
echo "Error: At least one PVC name is required" 1>&2
usage_exit
fi
VOL_MOUNTS=""
VOLS=""
COMMA=""
for i in $@
do
VOL_MOUNTS="${VOL_MOUNTS}${COMMA}{\"name\": \"${i}\",\"mountPath\": \"/pvcs/${i}\"}"
VOLS="${VOLS}${COMMA}{\"name\": \"${i}\",\"persistentVolumeClaim\": {\"claimName\": \"${i}\"}}"
COMMA=","
done
KUBECTL_CMD="kubectl"
[ -n "$CONTEXT" ] && KUBECTL_CMD="$KUBECTL_CMD --context=$CONTEXT"
[ -n "$NAMESPACE" ] && KUBECTL_CMD="$KUBECTL_CMD --namespace=$NAMESPACE"
$KUBECTL_CMD run -it --rm --restart=Never --image=${IMAGE} pvc-mounter-${SUFFIX} --overrides "
{
\"spec\": {
\"containers\":[
{
\"args\": [\"${COMMAND}\"],
\"stdin\": true,
\"tty\": true,
\"name\": \"pvc\",
\"image\": \"${IMAGE}\",
\"volumeMounts\": [
${VOL_MOUNTS}
]
}
],
\"volumes\": [
${VOLS}
]
}
}
" -- ${COMMAND}