83 Commits

Author SHA1 Message Date
51433006a5 Update renovate/renovate Docker tag to v43.8.1 2026-02-11 00:00:57 +00:00
b1f3337c98 Merge pull request 'Update redis Docker tag to v24.1.3' (#120) from renovate/redis-24.x into fresh-start
Reviewed-on: #120
2026-02-06 00:16:26 +00:00
e610e96d80 Merge pull request 'Update Helm release gitea to v12.5.0' (#122) from renovate/gitea-12.x into fresh-start
Reviewed-on: #122
2026-02-06 00:16:16 +00:00
c9997fb8a7 Merge pull request 'Update Helm release ingress-nginx to v4.14.3' (#123) from renovate/ingress-nginx-4.x into fresh-start
Reviewed-on: #123
2026-02-06 00:16:06 +00:00
b9cc44d7e8 Merge pull request 'Update Helm release openbao to v0.25.0' (#124) from renovate/openbao-0.x into fresh-start
Reviewed-on: #124
2026-02-06 00:15:58 +00:00
be884d07c6 Merge pull request 'Update registry.k8s.io/coredns/coredns Docker tag to v1.14.1' (#125) from renovate/registry.k8s.io-coredns-coredns-1.x into fresh-start
Reviewed-on: #125
2026-02-06 00:15:28 +00:00
2875d84f33 Merge pull request 'Update alpine Docker tag to v3.23.3' (#126) from renovate/alpine-3.x into fresh-start
Reviewed-on: #126
2026-02-06 00:15:07 +00:00
573601a7ec Merge pull request 'Update Helm release immich to v1.0.12' (#127) from renovate/immich-1.x into fresh-start
Reviewed-on: #127
2026-02-06 00:14:59 +00:00
fb60744c5a Merge pull request 'Update renovate/renovate Docker tag to v43' (#128) from renovate/renovate-renovate-43.x into fresh-start
Reviewed-on: #128
2026-02-06 00:14:51 +00:00
52ca68c4ce Merge pull request 'Update Helm release cert-manager to v1.19.3' (#129) from renovate/cert-manager-1.x into fresh-start
Reviewed-on: #129
2026-02-06 00:14:42 +00:00
0934a1130a Update renovate/renovate Docker tag to v43 2026-02-06 00:01:36 +00:00
3d28650c1b add pv for new postgres' gitea cluster 2026-02-06 00:58:44 +01:00
15063c9885 add backup volume snapshot class for girea postgress 2026-02-06 00:27:45 +01:00
ba3cb2571c Update Helm release openbao to v0.25.0 2026-02-05 00:00:54 +00:00
5edaeb123c Update Helm release immich to v1.0.12 2026-02-05 00:00:43 +00:00
0dc37f69cb Update redis Docker tag to v24.1.3 2026-02-04 00:00:42 +00:00
777239ccb5 Update Helm release ingress-nginx to v4.14.3 2026-02-03 00:00:54 +00:00
352af6f386 Update Helm release cert-manager to v1.19.3 2026-02-03 00:00:50 +00:00
230197e3c6 move frigate deployment to new pvcs 2026-02-01 23:07:20 +01:00
0c5e22f538 add temporary frigate volume to migrate data 2026-02-01 20:11:25 +01:00
e79386b4a5 migrate from raw flake to devenv 2026-02-01 02:00:14 +01:00
8f4932132a Update alpine Docker tag to v3.23.3 2026-01-29 00:00:45 +00:00
bb6272b16e Update registry.k8s.io/coredns/coredns Docker tag to v1.14.1 2026-01-28 00:00:43 +00:00
3a71410c19 enable ts3 after copying files 2026-01-25 01:39:14 +01:00
e5af5c3945 add utility to run temporary pod with pvc mounted 2026-01-25 01:38:32 +01:00
6de56bfd10 add ispeak3 ts3 server 2026-01-25 01:07:35 +01:00
d70a704f89 Update Helm release gitea to v12.5.0 2026-01-24 00:00:54 +00:00
5df94c4656 add pv-migrate to tools 2026-01-19 00:12:44 +01:00
a6772893d0 delete old nas pvc and use new 2026-01-18 19:05:52 +01:00
ba31945337 add secondary nas volume 2026-01-18 18:59:30 +01:00
fcaa28c95a add lvmpv-hdd storage class 2026-01-18 18:53:35 +01:00
a40f9a046a enable openebs lvm-localpv controller 2026-01-18 00:31:52 +01:00
80ed3358e8 Merge pull request 'Update Helm release cilium to v1.18.6' (#116) from renovate/cilium-1.x into fresh-start
Reviewed-on: #116
2026-01-17 22:30:28 +00:00
eae4ff426c Merge pull request 'Update registry.k8s.io/coredns/coredns Docker tag to v1.13.2' (#118) from renovate/registry.k8s.io-coredns-coredns-1.x into fresh-start
Reviewed-on: #118
2026-01-17 22:30:21 +00:00
0fadd95386 Merge pull request 'Update renovate/renovate Docker tag to v42.84.1' (#119) from renovate/renovate-renovate-42.x into fresh-start
Reviewed-on: #119
2026-01-17 22:30:11 +00:00
29e06c60eb Merge pull request 'Update Helm release immich to v1.0.9' (#117) from renovate/immich-1.x into fresh-start
Reviewed-on: #117
2026-01-17 22:29:59 +00:00
27ae162886 Update renovate/renovate Docker tag to v42.84.1 2026-01-17 22:29:20 +00:00
d96344b310 Update registry.k8s.io/coredns/coredns Docker tag to v1.13.2 2026-01-17 22:29:17 +00:00
e3483fcfe3 Update Helm release immich to v1.0.9 2026-01-17 22:29:14 +00:00
784b335f65 Update Helm release cilium to v1.18.6 2026-01-17 22:29:12 +00:00
9300e327df Merge pull request 'Update alpine Docker tag to v3.23.2' (#104) from renovate/alpine-3.x into fresh-start
Reviewed-on: #104
2026-01-17 22:21:11 +00:00
90fb555dc2 Merge pull request 'Update Helm release openebs to v4.4.0' (#109) from renovate/openebs-4.x into fresh-start
Reviewed-on: #109
2026-01-17 22:20:06 +00:00
78b3b6b400 Merge pull request 'Update redis Docker tag to v24' (#110) from renovate/redis-24.x into fresh-start
Reviewed-on: #110
2026-01-17 22:01:54 +00:00
90897daa27 Merge pull request 'Update Helm release cert-manager to v1.19.2' (#113) from renovate/cert-manager-1.x into fresh-start
Reviewed-on: #113
2026-01-17 22:00:51 +00:00
0368252850 Merge pull request 'Update Helm release openbao to v0.23.3' (#111) from renovate/openbao-0.x into fresh-start
Reviewed-on: #111
2026-01-17 22:00:42 +00:00
1503c57fbe Merge pull request 'Update Helm release ingress-nginx to v4.14.1' (#112) from renovate/ingress-nginx-4.x into fresh-start
Reviewed-on: #112
2026-01-17 21:59:35 +00:00
0f12840b35 Merge pull request 'Update Helm release cloudnative-pg to v0.27.0' (#114) from renovate/cloudnative-pg-0.x into fresh-start
Reviewed-on: #114
2026-01-17 21:59:24 +00:00
87a071925c Merge pull request 'Update Helm release vault-secrets-operator to v1.2.0' (#115) from renovate/vault-secrets-operator-1.x into fresh-start
Reviewed-on: #115
2026-01-17 21:57:21 +00:00
b6efe42dc2 disable librechat release, it's using bitnami's mongodb 2026-01-17 22:55:28 +01:00
d2cfd7b73d Merge pull request 'Update renovate/renovate Docker tag to v42.84.0' (#107) from renovate/renovate-renovate-42.x into fresh-start
Reviewed-on: #107
2026-01-17 21:35:55 +00:00
bf1cae3fc7 Update renovate/renovate Docker tag to v42.84.0 2026-01-17 21:34:32 +00:00
6712e94237 Update Helm release vault-secrets-operator to v1.2.0 2026-01-13 00:00:37 +00:00
6f8e10f3fc Update Helm release openbao to v0.23.3 2026-01-06 00:00:48 +00:00
3c04fd6b10 Update redis Docker tag to v24 2025-12-19 00:00:48 +00:00
ef353d635a Update alpine Docker tag to v3.23.2 2025-12-19 00:00:36 +00:00
0097d057d5 Update Helm release cloudnative-pg to v0.27.0 2025-12-10 00:00:36 +00:00
b454fc606f Update Helm release cert-manager to v1.19.2 2025-12-10 00:00:32 +00:00
7feb19b7fc update immich 2025-12-07 02:11:41 +01:00
b21f8e402b add abliterated versions of qwen3-vl 2025-12-06 23:33:56 +01:00
68f51b26b0 Update Helm release ingress-nginx to v4.14.1 2025-12-06 00:00:32 +00:00
1095d7ef4d Update Helm release openebs to v4.4.0 2025-11-22 00:00:29 +00:00
8d83c6dc83 increase free space limit on frigate to 24h and enable two-way sync 2025-11-17 01:43:17 +01:00
65e75a4d39 Add 8B and 2B variants of qwen3-vl 2025-11-15 22:21:10 +01:00
6c7457d095 fix Qwen3-VL-4B-Instruct-GGUF models looping issue 2025-11-15 20:40:27 +01:00
9b556e98a9 add qwen3-vl thinking variant 2025-11-15 19:31:53 +01:00
202ebc7b86 add qwen3-vl, fix librechat taking over settings and clean up llama config 2025-11-15 19:18:43 +01:00
ec61023f74 fix cache location after llama-swap update 2025-11-15 18:05:12 +01:00
05d3493bb7 update llama-swap 2025-11-15 17:57:46 +01:00
2a9f8c3092 Merge pull request 'Update Helm release cilium to v1.18.4' (#99) from renovate/cilium-1.x into fresh-start
Reviewed-on: #99
2025-11-15 16:49:56 +00:00
226ee59fa6 Merge pull request 'Update Helm release cloudnative-pg to v0.26.1' (#100) from renovate/cloudnative-pg-0.x into fresh-start
Reviewed-on: #100
2025-11-15 16:49:50 +00:00
c8f34c45ac Merge pull request 'Update Helm release openbao to v0.19.2' (#101) from renovate/openbao-0.x into fresh-start
Reviewed-on: #101
2025-11-15 16:49:41 +00:00
c0fa400159 Merge pull request 'Update registry.k8s.io/coredns/coredns Docker tag to v1.13.1' (#102) from renovate/registry.k8s.io-coredns-coredns-1.x into fresh-start
Reviewed-on: #102
2025-11-15 16:49:31 +00:00
6ccb00e86e Merge pull request 'Update Helm release immich to v1.0.6' (#103) from renovate/immich-1.x into fresh-start
Reviewed-on: #103
2025-11-15 16:49:17 +00:00
7b8fb8d8bb Merge pull request 'Update Helm release ingress-nginx to v4.14.0' (#105) from renovate/ingress-nginx-4.x into fresh-start
Reviewed-on: #105
2025-11-15 16:48:24 +00:00
0ae3181267 Merge pull request 'Update renovate/renovate Docker tag to v42' (#106) from renovate/renovate-renovate-42.x into fresh-start
Reviewed-on: #106
2025-11-15 16:47:38 +00:00
c0d83249b9 Update renovate/renovate Docker tag to v42 2025-11-15 00:00:31 +00:00
974d70a39e Update Helm release cilium to v1.18.4 2025-11-13 00:00:23 +00:00
4518fc674a Update Helm release openbao to v0.19.2 2025-11-07 00:00:23 +00:00
c3912af26b Update Helm release immich to v1.0.6 2025-11-06 00:00:36 +00:00
797b97496e Update Helm release ingress-nginx to v4.14.0 2025-11-04 00:00:49 +00:00
29457af188 add nas deployment 2025-11-03 02:31:02 +01:00
2a8e56824e Update registry.k8s.io/coredns/coredns Docker tag to v1.13.1 2025-10-28 00:00:30 +00:00
f71794de4d Update Helm release cloudnative-pg to v0.26.1 2025-10-24 00:00:24 +00:00
46 changed files with 1224 additions and 452 deletions

12
.envrc Normal file
View File

@@ -0,0 +1,12 @@
#!/usr/bin/env bash
export DIRENV_WARN_TIMEOUT=20s
eval "$(devenv direnvrc)"
# `use devenv` supports the same options as the `devenv shell` command.
#
# To silence all output, use `--quiet`.
#
# Example usage: use devenv --quiet --impure --option services.postgres.enable:bool true
use devenv

12
.gitignore vendored
View File

@@ -1,2 +1,12 @@
secrets.yaml
talos/generated
talos/generated
# Devenv
.devenv*
devenv.local.nix
devenv.local.yaml
# direnv
.direnv
# pre-commit
.pre-commit-config.yaml

View File

@@ -1,7 +1,7 @@
{
"recommendations": [
"arrterian.nix-env-selector",
"jnoortheen.nix-ide",
"detachhead.basedpyright"
"detachhead.basedpyright",
"mkhl.direnv"
]
}

View File

@@ -1,13 +1,4 @@
{
"nixEnvSelector.nixFile": "${workspaceFolder}/shell.nix",
"terminal.integrated.profiles.linux": {
"Nix Shell": {
"path": "nix",
"args": ["develop"],
"icon": "terminal-linux"
}
},
"terminal.integrated.defaultProfile.linux": "Nix Shell",
"ansible.python.interpreterPath": "/bin/python",
"python.defaultInterpreterPath": "${env:PYTHON_BIN}"
}

View File

@@ -0,0 +1,49 @@
apiVersion: local.openebs.io/v1alpha1
kind: LVMVolume
metadata:
labels:
kubernetes.io/nodename: anapistula-delrosalae
name: frigate-config
namespace: openebs
spec:
capacity: 5Gi
ownerNodeID: anapistula-delrosalae
shared: "yes"
thinProvision: "no"
vgPattern: ^openebs-hdd$
volGroup: openebs-hdd
---
kind: PersistentVolume
apiVersion: v1
metadata:
name: frigate-config
spec:
capacity:
storage: 5Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: openebs-lvmpv
volumeMode: Filesystem
csi:
driver: local.csi.openebs.io
volumeHandle: frigate-config
claimRef:
apiVersion: v1
kind: PersistentVolumeClaim
namespace: frigate
name: frigate-config
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: frigate-config
namespace: frigate
spec:
storageClassName: openebs-lvmpv
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 5Gi
volumeName: frigate-config

View File

@@ -3,5 +3,7 @@ kind: Kustomization
resources:
- namespace.yaml
- secret.yaml
- config-pvc.yaml
- media-pvc.yaml
- release.yaml
- webrtc-svc.yaml

View File

@@ -0,0 +1,49 @@
apiVersion: local.openebs.io/v1alpha1
kind: LVMVolume
metadata:
labels:
kubernetes.io/nodename: anapistula-delrosalae
name: frigate-media
namespace: openebs
spec:
capacity: 500Gi
ownerNodeID: anapistula-delrosalae
shared: "yes"
thinProvision: "no"
vgPattern: ^openebs-hdd$
volGroup: openebs-hdd
---
kind: PersistentVolume
apiVersion: v1
metadata:
name: frigate-media
spec:
capacity:
storage: 500Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: openebs-lvmpv
volumeMode: Filesystem
csi:
driver: local.csi.openebs.io
volumeHandle: frigate-media
claimRef:
apiVersion: v1
kind: PersistentVolumeClaim
namespace: frigate
name: frigate-media
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: frigate-media
namespace: frigate
spec:
storageClassName: openebs-lvmpv
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 500Gi
volumeName: frigate-media

View File

@@ -36,6 +36,8 @@ spec:
cookie_secure: True
record:
expire_interval: 1440 # 24h
sync_recordings: True
enabled: True
retain:
days: 90

View File

@@ -2,6 +2,7 @@ apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- postgres-volume.yaml
- postgres-cluster.yaml
- release.yaml
- secret.yaml

View File

@@ -10,3 +10,7 @@ spec:
storage:
size: 10Gi
storageClass: mayastor-single-hdd
backup:
volumeSnapshot:
className: csi-mayastor-snapshotclass

View File

@@ -0,0 +1,32 @@
apiVersion: local.openebs.io/v1alpha1
kind: LVMVolume
metadata:
labels:
kubernetes.io/nodename: anapistula-delrosalae
name: gitea-postgresql-cluster-lvmhdd-1
namespace: openebs
spec:
capacity: 20Gi
ownerNodeID: anapistula-delrosalae
shared: "yes"
thinProvision: "no"
vgPattern: ^openebs-hdd$
volGroup: openebs-hdd
---
kind: PersistentVolume
apiVersion: v1
metadata:
name: gitea-postgresql-cluster-lvmhdd-1
spec:
capacity:
storage: 20Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: openebs-lvmpv
volumeMode: Filesystem
csi:
driver: local.csi.openebs.io
volumeHandle: gitea-postgresql-cluster-lvmhdd-1
---
# PVCs are dynamically created by the Postgres operator

View File

@@ -17,7 +17,7 @@ spec:
chart:
spec:
chart: gitea
version: 12.4.0
version: 12.5.0
sourceRef:
kind: HelmRepository
name: gitea-charts

View File

@@ -19,7 +19,7 @@ spec:
chart:
spec:
chart: redis
version: 23.1.3
version: 24.1.3
sourceRef:
kind: HelmRepository
name: bitnami

View File

@@ -18,7 +18,7 @@ spec:
chart:
spec:
chart: immich
version: 1.0.2
version: 1.0.12
sourceRef:
kind: HelmRepository
name: secustor

View File

@@ -0,0 +1,7 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- pvc.yaml
- statefulset.yaml
- service.yaml

View File

@@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: ispeak3

49
apps/ispeak3/pvc.yaml Normal file
View File

@@ -0,0 +1,49 @@
apiVersion: local.openebs.io/v1alpha1
kind: LVMVolume
metadata:
labels:
kubernetes.io/nodename: anapistula-delrosalae
name: ispeak3-ts3-data
namespace: openebs
spec:
capacity: 1Gi
ownerNodeID: anapistula-delrosalae
shared: "yes"
thinProvision: "no"
vgPattern: ^openebs-hdd$
volGroup: openebs-hdd
---
kind: PersistentVolume
apiVersion: v1
metadata:
name: ispeak3-ts3-data
spec:
capacity:
storage: 1Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Delete
storageClassName: openebs-lvmpv
volumeMode: Filesystem
csi:
driver: local.csi.openebs.io
volumeHandle: ispeak3-ts3-data
claimRef:
apiVersion: v1
kind: PersistentVolumeClaim
namespace: ispeak3
name: ispeak3-ts3-data
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: ispeak3-ts3-data
namespace: ispeak3
spec:
storageClassName: openebs-lvmpv
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
volumeName: ispeak3-ts3-data

20
apps/ispeak3/service.yaml Normal file
View File

@@ -0,0 +1,20 @@
apiVersion: v1
kind: Service
metadata:
name: teamspeak3
namespace: ispeak3
spec:
selector:
app: teamspeak3
ports:
- name: voice
protocol: UDP
port: 9987
targetPort: 9987
- name: filetransfer
protocol: TCP
port: 30033
targetPort: 30033
type: LoadBalancer
externalTrafficPolicy: Local
ipFamilyPolicy: PreferDualStack

View File

@@ -0,0 +1,34 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: teamspeak3-server
namespace: ispeak3
spec:
serviceName: "teamspeak3"
replicas: 1
selector:
matchLabels:
app: teamspeak3
template:
metadata:
labels:
app: teamspeak3
spec:
containers:
- name: teamspeak3
image: teamspeak:3.13.7
ports:
- containerPort: 9987
name: voice
protocol: UDP
- containerPort: 10011
name: query
- containerPort: 30033
name: filetransfer
volumeMounts:
- name: ts3-data
mountPath: /var/ts3server/
volumes:
- name: ts3-data
persistentVolumeClaim:
claimName: ispeak3-ts3-data

View File

@@ -10,3 +10,4 @@ resources:
- immich
- nas
- searxng
- ispeak3

View File

@@ -8,92 +8,113 @@ spec:
interval: 24h
url: https://dynomite567.github.io/helm-charts/
---
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: librechat
namespace: librechat
spec:
interval: 30m
chart:
spec:
chart: librechat
version: 1.9.1
sourceRef:
kind: HelmRepository
name: dynomite567-charts
values:
global:
librechat:
existingSecretName: librechat
librechat:
configEnv:
PLUGIN_MODELS: null
ALLOW_REGISTRATION: "false"
TRUST_PROXY: "1"
DOMAIN_CLIENT: https://librechat.lumpiasty.xyz
SEARCH: "true"
existingSecretName: librechat
configYamlContent: |
version: 1.0.3
# apiVersion: helm.toolkit.fluxcd.io/v2
# kind: HelmRelease
# metadata:
# name: librechat
# namespace: librechat
# spec:
# interval: 30m
# chart:
# spec:
# chart: librechat
# version: 1.9.1
# sourceRef:
# kind: HelmRepository
# name: dynomite567-charts
# values:
# global:
# librechat:
# existingSecretName: librechat
# librechat:
# configEnv:
# PLUGIN_MODELS: null
# ALLOW_REGISTRATION: "false"
# TRUST_PROXY: "1"
# DOMAIN_CLIENT: https://librechat.lumpiasty.xyz
# SEARCH: "true"
# existingSecretName: librechat
# configYamlContent: |
# version: 1.0.3
endpoints:
custom:
- name: "Llama.cpp"
apiKey: "llama"
baseURL: "http://llama.llama.svc.cluster.local:11434/v1"
models:
default: [
"DeepSeek-R1-0528-Qwen3-8B-GGUF",
"Qwen3-8B-GGUF",
"Qwen3-8B-GGUF-no-thinking",
"gemma3n-e4b",
"gemma3-12b",
"gemma3-12b-q2",
"gemma3-12b-novision",
"gemma3-4b",
"gemma3-4b-novision",
"Qwen3-4B-Thinking-2507",
"Qwen3-4B-Thinking-2507-long-ctx",
"Qwen2.5-VL-7B-Instruct-GGUF",
"Qwen2.5-VL-32B-Instruct-GGUF-IQ1_S",
"Qwen2.5-VL-32B-Instruct-GGUF-Q2_K_L"
]
titleConvo: true
titleModel: "gemma3-4b-novision"
summarize: false
summaryModel: "gemma3-4b-novision"
forcePrompt: false
modelDisplayLabel: "Llama.cpp"
imageVolume:
enabled: true
size: 10G
accessModes: ReadWriteOnce
storageClassName: mayastor-single-hdd
ingress:
enabled: true
className: nginx-ingress
annotations:
cert-manager.io/cluster-issuer: letsencrypt
nginx.ingress.kubernetes.io/proxy-body-size: "0"
nginx.ingress.kubernetes.io/proxy-buffering: "false"
nginx.ingress.kubernetes.io/proxy-read-timeout: 30m
hosts:
- host: librechat.lumpiasty.xyz
paths:
- path: /
pathType: ImplementationSpecific
tls:
- hosts:
- librechat.lumpiasty.xyz
secretName: librechat-ingress
# endpoints:
# custom:
# - name: "Llama.cpp"
# apiKey: "llama"
# baseURL: "http://llama.llama.svc.cluster.local:11434/v1"
# models:
# default: [
# "DeepSeek-R1-0528-Qwen3-8B-GGUF",
# "Qwen3-8B-GGUF",
# "Qwen3-8B-GGUF-no-thinking",
# "gemma3n-e4b",
# "gemma3-12b",
# "gemma3-12b-q2",
# "gemma3-12b-novision",
# "gemma3-4b",
# "gemma3-4b-novision",
# "Qwen3-4B-Thinking-2507",
# "Qwen3-4B-Thinking-2507-long-ctx",
# "Qwen2.5-VL-7B-Instruct-GGUF",
# "Qwen2.5-VL-32B-Instruct-GGUF-IQ1_S",
# "Qwen2.5-VL-32B-Instruct-GGUF-Q2_K_L",
# "Qwen3-VL-2B-Instruct-GGUF",
# "Qwen3-VL-2B-Instruct-GGUF-unslothish",
# "Qwen3-VL-2B-Thinking-GGUF",
# "Qwen3-VL-4B-Instruct-GGUF",
# "Qwen3-VL-4B-Instruct-GGUF-unslothish",
# "Qwen3-VL-4B-Thinking-GGUF",
# "Qwen3-VL-8B-Instruct-GGUF",
# "Qwen3-VL-8B-Instruct-GGUF-unslothish",
# "Qwen3-VL-8B-Thinking-GGUF",
# "Huihui-Qwen3-VL-8B-Instruct-abliterated-GGUF",
# "Huihui-Qwen3-VL-8B-Thinking-abliterated-GGUF"
# ]
# titleConvo: true
# titleModel: "gemma3-4b-novision"
# summarize: false
# summaryModel: "gemma3-4b-novision"
# forcePrompt: false
# modelDisplayLabel: "Llama.cpp"
mongodb:
persistence:
storageClass: mayastor-single-hdd
# # ✨ IMPORTANT: let llama-swap/llama-server own all these
# dropParams:
# - "temperature"
# - "top_p"
# - "top_k"
# - "presence_penalty"
# - "frequency_penalty"
# - "stop"
# - "max_tokens"
# imageVolume:
# enabled: true
# size: 10G
# accessModes: ReadWriteOnce
# storageClassName: mayastor-single-hdd
# ingress:
# enabled: true
# className: nginx-ingress
# annotations:
# cert-manager.io/cluster-issuer: letsencrypt
# nginx.ingress.kubernetes.io/proxy-body-size: "0"
# nginx.ingress.kubernetes.io/proxy-buffering: "false"
# nginx.ingress.kubernetes.io/proxy-read-timeout: 30m
# hosts:
# - host: librechat.lumpiasty.xyz
# paths:
# - path: /
# pathType: ImplementationSpecific
# tls:
# - hosts:
# - librechat.lumpiasty.xyz
# secretName: librechat-ingress
meilisearch:
persistence:
storageClass: mayastor-single-hdd
auth:
existingMasterKeySecret: librechat
# mongodb:
# persistence:
# storageClass: mayastor-single-hdd
# meilisearch:
# persistence:
# storageClass: mayastor-single-hdd
# auth:
# existingMasterKeySecret: librechat

View File

@@ -5,212 +5,464 @@ models:
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/DeepSeek-R1-0528-Qwen3-8B-GGUF:Q4_K_M
-ngl 37 -c 16384
--no-warmup
--port ${PORT}
-hf unsloth/DeepSeek-R1-0528-Qwen3-8B-GGUF:Q4_K_M
--n-gpu-layers 37
--ctx-size 16384
--no-warmup
--port ${PORT}
"Qwen3-8B-GGUF":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/Qwen3-8B-GGUF:Q4_K_M
-ngl 37 -c 16384
--no-warmup
--port ${PORT}
-hf unsloth/Qwen3-8B-GGUF:Q4_K_M
--n-gpu-layers 37
--ctx-size 16384
--no-warmup
--port ${PORT}
"Qwen3-8B-GGUF-no-thinking":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/Qwen3-8B-GGUF:Q4_K_M
-ngl 37 -c 16384
--jinja --chat-template-file /config/qwen_nothink_chat_template.jinja
--no-warmup
--port ${PORT}
-hf unsloth/Qwen3-8B-GGUF:Q4_K_M
--n-gpu-layers 37
--ctx-size 16384
--jinja
--chat-template-file /config/qwen_nothink_chat_template.jinja
--no-warmup
--port ${PORT}
"gemma3n-e4b":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/gemma-3n-E4B-it-GGUF:UD-Q4_K_XL
--ctx-size 16384
--n-gpu-layers 99
--seed 3407
--prio 2
--temp 1.0
--repeat-penalty 1.0
--min-p 0.00
--top-k 64
--top-p 0.95
--no-warmup
--port ${PORT}
-hf unsloth/gemma-3n-E4B-it-GGUF:UD-Q4_K_XL
--ctx-size 16384
--n-gpu-layers 99
--seed 3407
--prio 2
--temp 1.0
--repeat-penalty 1.0
--min-p 0.00
--top-k 64
--top-p 0.95
--no-warmup
--port ${PORT}
"gemma3-12b":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/gemma-3-12b-it-GGUF:Q4_K_M
--ctx-size 16384
--n-gpu-layers 99
--prio 2
--temp 1.0
--repeat-penalty 1.0
--min-p 0.00
--top-k 64
--top-p 0.95
--no-warmup
--port ${PORT}
-hf unsloth/gemma-3-12b-it-GGUF:Q4_K_M
--ctx-size 16384
--n-gpu-layers 99
--prio 2
--temp 1.0
--repeat-penalty 1.0
--min-p 0.00
--top-k 64
--top-p 0.95
--no-warmup
--port ${PORT}
"gemma3-12b-novision":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/gemma-3-12b-it-GGUF:Q4_K_M
--ctx-size 16384
--n-gpu-layers 99
--prio 2
--temp 1.0
--repeat-penalty 1.0
--min-p 0.00
--top-k 64
--top-p 0.95
--no-mmproj
--no-warmup
--port ${PORT}
-hf unsloth/gemma-3-12b-it-GGUF:Q4_K_M
--ctx-size 16384
--n-gpu-layers 99
--prio 2
--temp 1.0
--repeat-penalty 1.0
--min-p 0.00
--top-k 64
--top-p 0.95
--no-mmproj
--no-warmup
--port ${PORT}
"gemma3-12b-q2":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/gemma-3-12b-it-GGUF:Q2_K_L
--ctx-size 16384
--n-gpu-layers 99
--prio 2
--temp 1.0
--repeat-penalty 1.0
--min-p 0.00
--top-k 64
--top-p 0.95
--no-warmup
--port ${PORT}
-hf unsloth/gemma-3-12b-it-GGUF:Q2_K_L
--ctx-size 16384
--n-gpu-layers 99
--prio 2
--temp 1.0
--repeat-penalty 1.0
--min-p 0.00
--top-k 64
--top-p 0.95
--no-warmup
--port ${PORT}
"gemma3-4b":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/gemma-3-4b-it-GGUF:Q4_K_M
--ctx-size 16384
--n-gpu-layers 99
--prio 2
--temp 1.0
--repeat-penalty 1.0
--min-p 0.00
--top-k 64
--top-p 0.95
--no-warmup
--port ${PORT}
-hf unsloth/gemma-3-4b-it-GGUF:Q4_K_M
--ctx-size 16384
--n-gpu-layers 99
--prio 2
--temp 1.0
--repeat-penalty 1.0
--min-p 0.00
--top-k 64
--top-p 0.95
--no-warmup
--port ${PORT}
"gemma3-4b-novision":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/gemma-3-4b-it-GGUF:Q4_K_M
--ctx-size 16384
--n-gpu-layers 99
--prio 2
--temp 1.0
--repeat-penalty 1.0
--min-p 0.00
--top-k 64
--top-p 0.95
--no-mmproj
--no-warmup
--port ${PORT}
-hf unsloth/gemma-3-4b-it-GGUF:Q4_K_M
--ctx-size 16384
--n-gpu-layers 99
--prio 2
--temp 1.0
--repeat-penalty 1.0
--min-p 0.00
--top-k 64
--top-p 0.95
--no-mmproj
--no-warmup
--port ${PORT}
"Qwen3-4B-Thinking-2507":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/Qwen3-4B-Thinking-2507-GGUF:Q4_K_M
-ngl 99 -c 16384 --predict 8192
--temp 0.6
--min-p 0.00
--top-p 0.95
--top-k 20
--repeat-penalty 1.0
--no-warmup
--port ${PORT}
-hf unsloth/Qwen3-4B-Thinking-2507-GGUF:Q4_K_M
--n-gpu-layers 99
--ctx-size 16384
--predict 8192
--temp 0.6
--min-p 0.00
--top-p 0.95
--top-k 20
--repeat-penalty 1.0
--no-warmup
--port ${PORT}
"Qwen3-4B-Thinking-2507-long-ctx":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/Qwen3-4B-Thinking-2507-GGUF:Q4_K_M
-ngl 99 -c 262144 --predict 81920
--temp 0.6
--min-p 0.00
--top-p 0.95
--top-k 20
--repeat-penalty 1.0
--no-warmup
--flash-attn
--cache-type-k q8_0 --cache-type-v q8_0
--port ${PORT}
-hf unsloth/Qwen3-4B-Thinking-2507-GGUF:Q4_K_M
--n-gpu-layers 99
--ctx-size 262144
--predict 81920
--temp 0.6
--min-p 0.00
--top-p 0.95
--top-k 20
--repeat-penalty 1.0
--no-warmup
--flash-attn auto
--cache-type-k q8_0
--cache-type-v q8_0
--port ${PORT}
"Qwen3-4B-Instruct-2507":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/Qwen3-4B-Instruct-2507-GGUF:Q4_K_M
-ngl 99 -c 16384 --predict 8192
--temp 0.7
--min-p 0.00
--top-p 0.8
--top-k 20
--repeat-penalty 1.0
--no-warmup
--port ${PORT}
-hf unsloth/Qwen3-4B-Instruct-2507-GGUF:Q4_K_M
--n-gpu-layers 99
--ctx-size 16384
--predict 8192
--temp 0.7
--min-p 0.00
--top-p 0.8
--top-k 20
--repeat-penalty 1.0
--no-warmup
--port ${PORT}
"Qwen3-4B-Instruct-2507-long-ctx":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/Qwen3-4B-Instruct-2507-GGUF:Q4_K_M
-ngl 99 -c 262144 --predict 81920
--temp 0.7
--min-p 0.00
--top-p 0.8
--top-k 20
--repeat-penalty 1.0
--no-warmup
--flash-attn
--cache-type-k q8_0 --cache-type-v q8_0
--port ${PORT}
-hf unsloth/Qwen3-4B-Instruct-2507-GGUF:Q4_K_M
--n-gpu-layers 99
--ctx-size 262144
--predict 81920
--temp 0.7
--min-p 0.00
--top-p 0.8
--top-k 20
--repeat-penalty 1.0
--no-warmup
--flash-attn auto
--cache-type-k q8_0
--cache-type-v q8_0
--port ${PORT}
"Qwen2.5-VL-32B-Instruct-GGUF-IQ1_S":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/Qwen2.5-VL-32B-Instruct-GGUF:IQ1_S
-ngl 99 -c 16384 --predict 8192
--temp 0.7
--min-p 0.00
--top-p 0.8
--top-k 20
--repeat-penalty 1.0
--no-warmup
--port ${PORT}
-hf unsloth/Qwen2.5-VL-32B-Instruct-GGUF:IQ1_S
--n-gpu-layers 99
--ctx-size 16384
--predict 8192
--temp 0.7
--min-p 0.00
--top-p 0.8
--top-k 20
--repeat-penalty 1.0
--no-warmup
--port ${PORT}
"Qwen2.5-VL-32B-Instruct-GGUF-Q2_K_L":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/Qwen2.5-VL-32B-Instruct-GGUF:Q2_K_L
-ngl 99 -c 16384 --predict 8192
--temp 0.7
--min-p 0.00
--top-p 0.8
--top-k 20
--repeat-penalty 1.0
--no-warmup
--port ${PORT}
-hf unsloth/Qwen2.5-VL-32B-Instruct-GGUF:Q2_K_L
--n-gpu-layers 99
--ctx-size 16384
--predict 8192
--temp 0.7
--min-p 0.00
--top-p 0.8
--top-k 20
--repeat-penalty 1.0
--no-warmup
--port ${PORT}
"Qwen2.5-VL-7B-Instruct-GGUF":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/Qwen2.5-VL-7B-Instruct-GGUF:Q4_K_M
-ngl 37 -c 16384 --predict 8192
--temp 0.7
--min-p 0.00
--top-p 0.8
--top-k 20
--repeat-penalty 1.0
--no-warmup
--port ${PORT}
-hf unsloth/Qwen2.5-VL-7B-Instruct-GGUF:Q4_K_M
--n-gpu-layers 37
--ctx-size 16384
--predict 8192
--temp 0.7
--min-p 0.00
--top-p 0.8
--top-k 20
--repeat-penalty 1.0
--no-warmup
--port ${PORT}
"Qwen3-VL-2B-Instruct-GGUF":
ttl: 600
cmd: |
/app/llama-server
-hf Qwen/Qwen3-VL-2B-Instruct-GGUF:Q8_0
--n-gpu-layers 99
--ctx-size 12288
--predict 4096
--flash-attn auto
--jinja
--temp 0.7
--top-p 0.85
--top-k 20
--min-p 0.05
--repeat-penalty 1.15
--frequency-penalty 0.5
--presence-penalty 0.4
--no-warmup
--port ${PORT}
"Qwen3-VL-4B-Instruct-GGUF":
ttl: 600
cmd: |
/app/llama-server
-hf Qwen/Qwen3-VL-4B-Instruct-GGUF:Q8_0
--n-gpu-layers 99
--ctx-size 12288
--predict 4096
--flash-attn auto
--jinja
--temp 0.7
--top-p 0.85
--top-k 20
--min-p 0.05
--repeat-penalty 1.15
--frequency-penalty 0.5
--presence-penalty 0.4
--no-warmup
--port ${PORT}
"Qwen3-VL-8B-Instruct-GGUF":
ttl: 600
cmd: |
/app/llama-server
-hf Qwen/Qwen3-VL-8B-Instruct-GGUF:Q4_K_M
--n-gpu-layers 99
--ctx-size 12288
--predict 4096
--flash-attn auto
--jinja
--temp 0.7
--top-p 0.85
--top-k 20
--min-p 0.05
--repeat-penalty 1.15
--frequency-penalty 0.5
--presence-penalty 0.4
--no-warmup
--port ${PORT}
"Qwen3-VL-2B-Instruct-GGUF-unslothish":
ttl: 600
cmd: |
/app/llama-server
-hf Qwen/Qwen3-VL-2B-Instruct-GGUF:Q8_0
--n-gpu-layers 99
--ctx-size 12288
--predict 4096
--flash-attn auto
--jinja
--temp 0.7
--top-p 0.8
--top-k 20
--min-p 0.05
--repeat-penalty 1.15
--frequency-penalty 0.5
--presence-penalty 0.6
--no-warmup
--port ${PORT}
"Qwen3-VL-4B-Instruct-GGUF-unslothish":
ttl: 600
cmd: |
/app/llama-server
-hf Qwen/Qwen3-VL-4B-Instruct-GGUF:Q8_0
--n-gpu-layers 99
--ctx-size 12288
--predict 4096
--flash-attn auto
--jinja
--temp 0.7
--top-p 0.8
--top-k 20
--min-p 0.05
--repeat-penalty 1.15
--frequency-penalty 0.5
--presence-penalty 0.6
--no-warmup
--port ${PORT}
"Qwen3-VL-8B-Instruct-GGUF-unslothish":
ttl: 600
cmd: |
/app/llama-server
-hf Qwen/Qwen3-VL-8B-Instruct-GGUF:Q4_K_M
--n-gpu-layers 99
--ctx-size 12288
--predict 4096
--flash-attn auto
--jinja
--temp 0.7
--top-p 0.8
--top-k 20
--min-p 0.05
--repeat-penalty 1.15
--frequency-penalty 0.5
--presence-penalty 0.6
--no-warmup
--port ${PORT}
"Qwen3-VL-2B-Thinking-GGUF":
ttl: 600
cmd: |
/app/llama-server
-hf Qwen/Qwen3-VL-2B-Thinking-GGUF:Q8_0
--n-gpu-layers 99
--ctx-size 12288
--predict 4096
--flash-attn auto
--jinja
--top-p 0.95
--top-k 20
--temp 1.0
--min-p 0.0
--repeat-penalty 1.0
--presence-penalty 0.0
--no-warmup
--port ${PORT}
"Qwen3-VL-4B-Thinking-GGUF":
ttl: 600
cmd: |
/app/llama-server
-hf Qwen/Qwen3-VL-4B-Thinking-GGUF:Q4_K_M
--n-gpu-layers 99
--ctx-size 12288
--predict 4096
--flash-attn auto
--jinja
--top-p 0.95
--top-k 20
--temp 1.0
--min-p 0.0
--repeat-penalty 1.0
--presence-penalty 0.0
--no-warmup
--port ${PORT}
"Qwen3-VL-8B-Thinking-GGUF":
ttl: 600
cmd: |
/app/llama-server
-hf Qwen/Qwen3-VL-8B-Thinking-GGUF:Q4_K_M
--n-gpu-layers 99
--ctx-size 12288
--predict 4096
--flash-attn auto
--jinja
--top-p 0.95
--top-k 20
--temp 1.0
--min-p 0.0
--repeat-penalty 1.0
--presence-penalty 0.0
--no-warmup
--port ${PORT}
"Huihui-Qwen3-VL-8B-Instruct-abliterated-GGUF":
ttl: 600
cmd: |
/app/llama-server
-hf noctrex/Huihui-Qwen3-VL-8B-Instruct-abliterated-GGUF:Q6_K
--n-gpu-layers 99
--ctx-size 12288
--predict 4096
--flash-attn auto
--jinja
--temp 0.7
--top-p 0.85
--top-k 20
--min-p 0.05
--repeat-penalty 1.15
--frequency-penalty 0.5
--presence-penalty 0.4
--no-warmup
--port ${PORT}
"Huihui-Qwen3-VL-8B-Thinking-abliterated-GGUF":
ttl: 600
cmd: |
/app/llama-server
-hf noctrex/Huihui-Qwen3-VL-8B-Thinking-abliterated-GGUF:Q6_K
--n-gpu-layers 99
--ctx-size 12288
--predict 4096
--flash-attn auto
--jinja
--temp 0.7
--top-p 0.85
--top-k 20
--min-p 0.05
--repeat-penalty 1.15
--frequency-penalty 0.5
--presence-penalty 0.4
--no-warmup
--port ${PORT}

View File

@@ -16,7 +16,7 @@ spec:
spec:
containers:
- name: llama-swap
image: ghcr.io/mostlygeek/llama-swap:v166-vulkan-b6795
image: ghcr.io/mostlygeek/llama-swap:v172-vulkan-b7062
imagePullPolicy: IfNotPresent
command:
- /app/llama-swap
@@ -29,7 +29,7 @@ spec:
protocol: TCP
volumeMounts:
- name: models
mountPath: /root/.cache
mountPath: /app/.cache
- mountPath: /dev/kfd
name: kfd
- mountPath: /dev/dri

View File

@@ -1,14 +1,28 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: nas-sshd-config
name: nas-sftp-config
namespace: nas
data:
00-chroot.conf: |
Subsystem sftp internal-sftp
Match User nas
ChrootDirectory /config
ForceCommand internal-sftp -d /data
AllowTcpForwarding no
X11Forwarding no
PermitTunnel no
sftp.json: |
{
"Global": {
"Chroot": {
"Directory": "%h",
"StartPath": "data"
},
"Directories": [
"data"
]
},
"Users": [
{
"Username": "nas",
"UID": 1000,
"GID": 1000,
"PublicKeys": [
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCresbDFZijI+rZMgd3LdciPjpb4x4S5B7y0U+EoYPaz6hILT72fyz3QdcgKJJv8JUJI6g0811/yFRuOzCXgWaA922c/S/t6HMUrorh7mPVQMTN2dc/SVBvMa7S2M9NYBj6z1X2LRHs+g1JTMCtL202PIjes/E9qu0as0Vx6n/6HHNmtmA9LrpiAmurbeKXDmrYe2yWg/FA6cX5d86SJb21Dj8WqdCd3Hz0Pi6FzMKXhpWvs5Hfei1htsjsRzCxkpSTjlgFEFVfmHIXPfB06Sa6aCnkxAFnE7N+xNa9RIWeZmOXdA74LsfSKQ9eAXSrsC/IRxo2ce8cBzXJy+Itxw24fUqGYXBiCgx8i3ZA9IdwI1u71xYo9lyNjav5VykzKnAHRAYnDm9UsCf8k04reBevcLdtxL11vPCtind3xn76Nhy2b45dcp/MdYFANGsCcXJOMb6Aisb03HPGhs/aU3tCAQbTVe195mL9FWhGqIK2wBmF1SKW+4ssX2bIU6YaCYc= cardno:23_671_999"
]
}
]
}

View File

@@ -1,84 +1,58 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: nas-ssh
name: nas-sftp
namespace: nas
spec:
replicas: 1
selector:
matchLabels:
app: nas-ssh
app: nas-sftp
template:
metadata:
labels:
app: nas-ssh
app: nas-sftp
spec:
securityContext:
fsGroup: 1000
initContainers:
- name: prepare-config
image: alpine:3.20.3
- name: prepare-home
image: alpine:3.23.3
imagePullPolicy: IfNotPresent
command:
- /bin/sh
- -c
- |
set -euo pipefail
chown root:root /config
chmod 755 /config
mkdir -p /config/data
chown 1000:1000 /config/data
chmod 750 /config/data
mkdir -p /config/ssh_host_keys
chown root:root /config/ssh_host_keys
chmod 700 /config/ssh_host_keys
for key in /config/ssh_host_keys/*; do
[ -f "$key" ] || continue
chown root:root "$key"
chmod 600 "$key"
done
mkdir -p /config/sshd/sshd_config.d
cp /defaults/00-chroot.conf /config/sshd/sshd_config.d/00-chroot.conf
chown root:root /config/sshd/sshd_config.d/00-chroot.conf
chmod 644 /config/sshd/sshd_config.d/00-chroot.conf
mkdir -p /volume/sftp-root
chown root:root /volume/sftp-root
chmod 755 /volume/sftp-root
mkdir -p /volume/sftp-root/data
chown 1000:1000 /volume/sftp-root/data
chmod 750 /volume/sftp-root/data
mkdir -p /volume/host-keys
chown root:root /volume/host-keys
chmod 700 /volume/host-keys
volumeMounts:
- name: data
mountPath: /config
- name: sshd-config
mountPath: /defaults/00-chroot.conf
subPath: 00-chroot.conf
readOnly: true
- name: home
mountPath: /volume
containers:
- name: ssh
image: lscr.io/linuxserver/openssh-server:version-10.0_p1-r9
- name: sftp
image: docker.io/emberstack/sftp:build-5.1.72
imagePullPolicy: IfNotPresent
env:
- name: PUID
value: "1000"
- name: PGID
value: "1000"
- name: TZ
value: Etc/UTC
- name: USER_NAME
value: nas
- name: SUDO_ACCESS
value: "false"
- name: PASSWORD_ACCESS
value: "false"
- name: LOG_STDOUT
value: "true"
- name: PUBLIC_KEY
valueFrom:
secretKeyRef:
name: nas-ssh-authorized-keys
key: public_key
ports:
- containerPort: 2222
name: ssh
- containerPort: 22
name: sftp
protocol: TCP
volumeMounts:
- name: data
mountPath: /config
- name: config
mountPath: /app/config/sftp.json
subPath: sftp.json
readOnly: true
- name: home
mountPath: /home/nas
subPath: sftp-root
- name: home
mountPath: /etc/ssh/keys
subPath: host-keys
resources:
requests:
cpu: 50m
@@ -86,9 +60,9 @@ spec:
limits:
memory: 512Mi
volumes:
- name: data
- name: home
persistentVolumeClaim:
claimName: nas-data
- name: sshd-config
claimName: nas-data-lvm-hdd
- name: config
configMap:
name: nas-sshd-config
name: nas-sftp-config

View File

@@ -2,7 +2,6 @@ apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- secret.yaml
- configmap.yaml
- pvc.yaml
- deployment.yaml

View File

@@ -1,12 +1,49 @@
apiVersion: v1
kind: PersistentVolumeClaim
apiVersion: local.openebs.io/v1alpha1
kind: LVMVolume
metadata:
name: nas-data
labels:
kubernetes.io/nodename: anapistula-delrosalae
name: nas-data-lvm-hdd
namespace: openebs
spec:
capacity: 4Gi
ownerNodeID: anapistula-delrosalae
shared: "yes"
thinProvision: "no"
vgPattern: ^openebs-hdd$
volGroup: openebs-hdd
---
kind: PersistentVolume
apiVersion: v1
metadata:
name: nas-data-lvm-hdd
spec:
capacity:
storage: 4Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Delete
storageClassName: openebs-lvmpv
volumeMode: Filesystem
csi:
driver: local.csi.openebs.io
volumeHandle: nas-data-lvm-hdd
claimRef:
apiVersion: v1
kind: PersistentVolumeClaim
namespace: nas
name: nas-data-lvm-hdd
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: nas-data-lvm-hdd
namespace: nas
spec:
storageClassName: openebs-lvmpv
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 500Gi
storageClassName: mayastor-single-hdd
requests:
storage: 4Gi
volumeName: nas-data-lvm-hdd

View File

@@ -1,9 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: nas-ssh-authorized-keys
namespace: nas
type: Opaque
stringData:
public_key: |
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCresbDFZijI+rZMgd3LdciPjpb4x4S5B7y0U+EoYPaz6hILT72fyz3QdcgKJJv8JUJI6g0811/yFRuOzCXgWaA922c/S/t6HMUrorh7mPVQMTN2dc/SVBvMa7S2M9NYBj6z1X2LRHs+g1JTMCtL202PIjes/E9qu0as0Vx6n/6HHNmtmA9LrpiAmurbeKXDmrYe2yWg/FA6cX5d86SJb21Dj8WqdCd3Hz0Pi6FzMKXhpWvs5Hfei1htsjsRzCxkpSTjlgFEFVfmHIXPfB06Sa6aCnkxAFnE7N+xNa9RIWeZmOXdA74LsfSKQ9eAXSrsC/IRxo2ce8cBzXJy+Itxw24fUqGYXBiCgx8i3ZA9IdwI1u71xYo9lyNjav5VykzKnAHRAYnDm9UsCf8k04reBevcLdtxL11vPCtind3xn76Nhy2b45dcp/MdYFANGsCcXJOMb6Aisb03HPGhs/aU3tCAQbTVe195mL9FWhGqIK2wBmF1SKW+4ssX2bIU6YaCYc= cardno:23_671_999

View File

@@ -1,15 +1,15 @@
apiVersion: v1
kind: Service
metadata:
name: nas-ssh
name: nas-sftp
namespace: nas
spec:
type: LoadBalancer
externalTrafficPolicy: Cluster
ports:
- name: ssh
- name: sftp
port: 22
targetPort: 2222
targetPort: 22
protocol: TCP
selector:
app: nas-ssh
app: nas-sftp

View File

@@ -15,7 +15,7 @@ spec:
- name: renovate
# Update this to the latest available and then enable Renovate on
# the manifest
image: renovate/renovate:41.169.1-full
image: renovate/renovate:43.8.1-full
envFrom:
- secretRef:
name: renovate-gitea-token

View File

@@ -1,17 +1,34 @@
{
"nodes": {
"flake-compat": {
"devenv": {
"locked": {
"lastModified": 1733328505,
"narHash": "sha256-NeCCThCEP3eCl2l/+27kNNK7QrwZB1IJCrXfrbv5oqU=",
"rev": "ff81ac966bb2cae68946d5ed5fc4994f96d0ffec",
"revCount": 69,
"type": "tarball",
"url": "https://api.flakehub.com/f/pinned/edolstra/flake-compat/1.1.0/01948eb7-9cba-704f-bbf3-3fa956735b52/source.tar.gz"
"dir": "src/modules",
"lastModified": 1769881431,
"owner": "cachix",
"repo": "devenv",
"rev": "72d5e66e2dd5112766ef4c9565872b51094b542d",
"type": "github"
},
"original": {
"type": "tarball",
"url": "https://flakehub.com/f/edolstra/flake-compat/1.1.0.tar.gz"
"dir": "src/modules",
"owner": "cachix",
"repo": "devenv",
"type": "github"
}
},
"flake-compat": {
"flake": false,
"locked": {
"lastModified": 1767039857,
"owner": "NixOS",
"repo": "flake-compat",
"rev": "5edf11c44bc78a0d334f6334cdaf7d60d732daab",
"type": "github"
},
"original": {
"owner": "NixOS",
"repo": "flake-compat",
"type": "github"
}
},
"flake-utils": {
@@ -20,7 +37,6 @@
},
"locked": {
"lastModified": 1731533236,
"narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "11707dc2f618dd54ca8739b309ec4fc024de578b",
@@ -32,6 +48,47 @@
"type": "github"
}
},
"git-hooks": {
"inputs": {
"flake-compat": "flake-compat",
"gitignore": "gitignore",
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1769069492,
"owner": "cachix",
"repo": "git-hooks.nix",
"rev": "a1ef738813b15cf8ec759bdff5761b027e3e1d23",
"type": "github"
},
"original": {
"owner": "cachix",
"repo": "git-hooks.nix",
"type": "github"
}
},
"gitignore": {
"inputs": {
"nixpkgs": [
"git-hooks",
"nixpkgs"
]
},
"locked": {
"lastModified": 1762808025,
"owner": "hercules-ci",
"repo": "gitignore.nix",
"rev": "cb5e3fdca1de58ccbc3ef53de65bd372b48f567c",
"type": "github"
},
"original": {
"owner": "hercules-ci",
"repo": "gitignore.nix",
"type": "github"
}
},
"krew2nix": {
"inputs": {
"flake-utils": "flake-utils",
@@ -42,11 +99,10 @@
"treefmt-nix": "treefmt-nix"
},
"locked": {
"lastModified": 1751765453,
"narHash": "sha256-tgo3BwFM2UUYQz6dVARztbj5AjKfz4exlPxnKLS/ZRg=",
"lastModified": 1769904483,
"owner": "a1994sc",
"repo": "krew2nix",
"rev": "11f66e65a0146645388eeab68b6212de0b732ed9",
"rev": "17d6ad3375899bd3f7d4d298481536155f3ec13c",
"type": "github"
},
"original": {
@@ -57,11 +113,10 @@
},
"nixpkgs": {
"locked": {
"lastModified": 1751637120,
"narHash": "sha256-xVNy/XopSfIG9c46nRmPaKfH1Gn/56vQ8++xWA8itO4=",
"lastModified": 1769461804,
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "5c724ed1388e53cc231ed98330a60eb2f7be4be3",
"rev": "bfc1b8a4574108ceef22f02bafcf6611380c100d",
"type": "github"
},
"original": {
@@ -73,15 +128,18 @@
},
"root": {
"inputs": {
"flake-compat": "flake-compat",
"devenv": "devenv",
"git-hooks": "git-hooks",
"krew2nix": "krew2nix",
"nixpkgs": "nixpkgs"
"nixpkgs": "nixpkgs",
"pre-commit-hooks": [
"git-hooks"
]
}
},
"systems": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
@@ -96,7 +154,6 @@
"systems_2": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
@@ -116,11 +173,10 @@
]
},
"locked": {
"lastModified": 1750931469,
"narHash": "sha256-0IEdQB1nS+uViQw4k3VGUXntjkDp7aAlqcxdewb/hAc=",
"lastModified": 1769691507,
"owner": "numtide",
"repo": "treefmt-nix",
"rev": "ac8e6f32e11e9c7f153823abc3ab007f2a65d3e1",
"rev": "28b19c5844cc6e2257801d43f2772a4b4c050a1b",
"type": "github"
},
"original": {

59
devenv.nix Normal file
View File

@@ -0,0 +1,59 @@
{ pkgs, lib, config, inputs, ... }:
let
# Python with hvac package
python = pkgs.python313.withPackages (python-pkgs: with python-pkgs; [
hvac
]);
in
{
# Overlays - apply krew2nix to get kubectl with krew support
overlays = [
inputs.krew2nix.overlay
];
# Environment variables
env = {
GREET = "devenv";
TALOSCONFIG = "${config.devenv.root}/talos/generated/talosconfig";
EDITOR = "vim";
RESTIC_REPOSITORY = "s3:https://s3.eu-central-003.backblazeb2.com/lumpiasty-backups";
VAULT_ADDR = "https://openbao.lumpiasty.xyz:8200";
PATH = "${config.devenv.root}/utils:${pkgs.coreutils}/bin";
PYTHON_BIN = "${python}/bin/python";
};
# Packages
packages = with pkgs; [
python
vim gnumake
talosctl cilium-cli
kubectx k9s kubernetes-helm
(kubectl.withKrewPlugins (plugins: with plugins; [
mayastor
openebs
]))
ansible
fluxcd
restic
openbao
pv-migrate
];
# Scripts
scripts.hello.exec = ''
echo hello from $GREET
'';
# Shell hooks
enterShell = ''
source ${pkgs.bash-completion}/share/bash-completion/bash_completion
echo "Environment ready!"
'';
# Tests
enterTest = ''
echo "Running tests"
git --version | grep --color=auto "${pkgs.git.version}"
'';
}

20
devenv.yaml Normal file
View File

@@ -0,0 +1,20 @@
# yaml-language-server: $schema=https://devenv.sh/devenv.schema.json
inputs:
nixpkgs:
url: github:NixOS/nixpkgs/nixos-unstable
krew2nix:
url: github:a1994sc/krew2nix
inputs:
nixpkgs:
follows: nixpkgs
# If you're using non-OSS software, you can set allowUnfree to true.
# allowUnfree: true
# If you're willing to use a package that's vulnerable
# permittedInsecurePackages:
# - "openssl-1.1.1w"
# If you have more than one devenv you can merge them
#imports:
# - ./backend

View File

@@ -1,62 +0,0 @@
{
inputs = {
nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
# Only to ease updating flake.lock, flake-compat is used by shell.nix
flake-compat.url = https://flakehub.com/f/edolstra/flake-compat/1.1.0.tar.gz;
# Allows us to install krew plugins
krew2nix.url = "github:a1994sc/krew2nix";
krew2nix.inputs.nixpkgs.follows = "nixpkgs";
};
outputs = { self, nixpkgs, krew2nix, ... }: let
system = "x86_64-linux";
in {
devShells."${system}".default =
let
pkgs = import nixpkgs {
overlays = [ krew2nix.overlay ];
inherit system;
};
python = (pkgs.python313.withPackages (python-pkgs: with python-pkgs; [
hvac
]));
in
pkgs.mkShell {
packages = with pkgs; [
python
vim gnumake
talosctl cilium-cli
kubectx k9s kubernetes-helm
(kubectl.withKrewPlugins (plugins: with plugins; [
mayastor
openebs
]))
ansible
fluxcd
restic
openbao
];
shellHook = ''
# Get completions working
source ${pkgs.bash-completion}/share/bash-completion/bash_completion
export TALOSCONFIG=$(pwd)/talos/generated/talosconfig
export EDITOR=vim
export RESTIC_REPOSITORY=s3:https://s3.eu-central-003.backblazeb2.com/lumpiasty-backups
# export AWS_ACCESS_KEY_ID=?
# export AWS_SECRET_ACCESS_KEY=?
# export RESTIC_PASSWORD=?
export VAULT_ADDR=https://openbao.lumpiasty.xyz:8200
# Add scripts from utils subdir
export PATH="$PATH:$(pwd)/utils"
export PYTHON_BIN=${python}/bin/python
'';
};
};
}

View File

@@ -0,0 +1,12 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: hdd-lvmpv
parameters:
storage: "lvm"
volgroup: "openebs-hdd"
fsType: "btrfs"
shared: "yes"
provisioner: local.csi.openebs.io
allowVolumeExpansion: true
volumeBindingMode: Immediate

View File

@@ -23,7 +23,7 @@ spec:
chart:
spec:
chart: cert-manager
version: v1.19.1
version: v1.19.3
sourceRef:
kind: HelmRepository
name: cert-manager

View File

@@ -23,7 +23,7 @@ spec:
chart:
spec:
chart: cilium
version: 1.18.2
version: 1.18.6
sourceRef:
kind: HelmRepository
name: cilium

View File

@@ -23,7 +23,7 @@ spec:
chart:
spec:
chart: cloudnative-pg
version: 0.26.0
version: 0.27.0
sourceRef:
kind: HelmRepository
name: cnpg

View File

@@ -97,7 +97,7 @@ spec:
env:
- name: GOMEMLIMIT
value: 161MiB
image: registry.k8s.io/coredns/coredns:v1.13.0
image: registry.k8s.io/coredns/coredns:v1.14.1
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 5

View File

@@ -23,7 +23,7 @@ spec:
chart:
spec:
chart: ingress-nginx
version: 4.13.3
version: 4.14.3
sourceRef:
kind: HelmRepository
name: ingress-nginx

View File

@@ -23,7 +23,7 @@ spec:
chart:
spec:
chart: openbao
version: 0.19.0
version: 0.25.0
sourceRef:
kind: HelmRepository
name: openbao

View File

@@ -23,7 +23,7 @@ spec:
chart:
spec:
chart: openebs
version: 4.3.3
version: 4.4.0
sourceRef:
kind: HelmRepository
name: openebs
@@ -38,7 +38,7 @@ spec:
lvm-localpv:
crds:
lmvLocalPv:
enabled: false
enabled: true
mayastor:
csi:
@@ -124,7 +124,7 @@ spec:
engines:
local:
lvm:
enabled: false
enabled: true
zfs:
enabled: false
replicated:

View File

@@ -23,7 +23,7 @@ spec:
chart:
spec:
chart: vault-secrets-operator
version: 1.0.1
version: 1.2.0
sourceRef:
kind: HelmRepository
name: hashicorp

View File

@@ -20,6 +20,7 @@ resources:
- configs/loadbalancer-ippool.yaml
- configs/single-hdd-sc.yaml
- configs/single-ssd-sc.yaml
- configs/lvmpv-hdd-sc.yaml
- configs/mayastor-snapshotclass.yaml
- configs/openbao-cert.yaml
- configs/ovh-cert-manager-secret.yaml

View File

@@ -1,15 +0,0 @@
# Needed for Nix Environment Selector
# https://github.com/edolstra/flake-compat/
(import
(
let
lock = builtins.fromJSON (builtins.readFile ./flake.lock);
nodeName = lock.nodes.root.inputs.flake-compat;
in
fetchTarball {
url = lock.nodes.${nodeName}.locked.url;
sha256 = lock.nodes.${nodeName}.locked.narHash;
}
)
{ src = ./.; }
).shellNix

146
utils/kubectl-run-with-pvc.sh Executable file
View File

@@ -0,0 +1,146 @@
#!/usr/bin/env bash
# A utility script to run a kubectl pod with one or more PVCs mounted.
# Original: https://gist.github.com/yuanying/3aa7d59dcce65470804ab43def646ab6
# Modified to add help message, -n and -x options, and other improvements.
IMAGE="gcr.io/google-containers/ubuntu-slim:0.14"
COMMAND="/bin/bash"
NAMESPACE=""
CONTEXT=""
SUFFIX=$(date +%s | shasum | base64 | fold -w 10 | head -1 | tr '[:upper:]' '[:lower:]')
usage_exit() {
cat <<EOF
kubectl-run-with-pvc - Run a temporary pod with PersistentVolumeClaims mounted
USAGE:
$0 [-c command] [-i image] [-n namespace] [-x context] [-h] PVC [PVC ...]
DESCRIPTION:
Creates an ephemeral Kubernetes pod that mounts one or more PersistentVolumeClaims (PVCs).
Each PVC is mounted at /pvcs/<claimName>. The pod is automatically removed when you exit.
Useful for inspecting, debugging, or manipulating data in PVCs without having to deploy
a persistent pod or job.
OPTIONS:
-i IMAGE
Container image to use in the pod.
Default: gcr.io/google-containers/ubuntu-slim:0.14
-c COMMAND
Command to execute in the container.
Default: /bin/bash
-n NAMESPACE
Kubernetes namespace where the pod will be created.
Default: current namespace (from kubectl config)
-x CONTEXT
kubectl context to use for this operation.
Default: current context (from kubectl config)
-h
Display this help message and exit.
EXAMPLES:
# Mount a single PVC and get an interactive shell
$0 my-pvc
# Mount multiple PVCs
$0 data-pvc logs-pvc config-pvc
# Use a specific namespace
$0 -n my-namespace my-pvc
# Use a different context and namespace
$0 -x prod-cluster -n production my-pvc
# Use Alpine Linux instead of Ubuntu
$0 -i alpine:latest -c sh my-pvc
# Run a command non-interactively
$0 -c "ls -lh /pvcs/my-pvc" my-pvc
MOUNT PATHS:
Each PVC is mounted to: /pvcs/<claimName>
Example: If you mount 'database-pvc', it will be at /pvcs/database-pvc
NOTES:
- Pod name is auto-generated: pvc-mounter-<random-suffix>
- Pod is removed when you exit (--rm flag)
- Uses hostNetwork: true for networking access
- Requires kubectl configured and permissions to create pods
PREREQUISITES:
- kubectl installed and configured
- PVCs must already exist in the target namespace
- User must have permission to create pods in the target namespace
EOF
exit 1
}
while getopts i:c:n:x:h OPT
do
case $OPT in
i) IMAGE=$OPTARG
;;
c) COMMAND=$OPTARG
;;
n) NAMESPACE=$OPTARG
;;
x) CONTEXT=$OPTARG
;;
h) usage_exit
;;
\?) usage_exit
;;
esac
done
shift $(($OPTIND - 1))
# Require at least one PVC
if [ $# -eq 0 ]; then
echo "Error: At least one PVC name is required" 1>&2
usage_exit
fi
VOL_MOUNTS=""
VOLS=""
COMMA=""
for i in $@
do
VOL_MOUNTS="${VOL_MOUNTS}${COMMA}{\"name\": \"${i}\",\"mountPath\": \"/pvcs/${i}\"}"
VOLS="${VOLS}${COMMA}{\"name\": \"${i}\",\"persistentVolumeClaim\": {\"claimName\": \"${i}\"}}"
COMMA=","
done
KUBECTL_CMD="kubectl"
[ -n "$CONTEXT" ] && KUBECTL_CMD="$KUBECTL_CMD --context=$CONTEXT"
[ -n "$NAMESPACE" ] && KUBECTL_CMD="$KUBECTL_CMD --namespace=$NAMESPACE"
$KUBECTL_CMD run -it --rm --restart=Never --image=${IMAGE} pvc-mounter-${SUFFIX} --overrides "
{
\"spec\": {
\"containers\":[
{
\"args\": [\"${COMMAND}\"],
\"stdin\": true,
\"tty\": true,
\"name\": \"pvc\",
\"image\": \"${IMAGE}\",
\"volumeMounts\": [
${VOL_MOUNTS}
]
}
],
\"volumes\": [
${VOLS}
]
}
}
" -- ${COMMAND}