2 Commits

Author SHA1 Message Date
223642ca21 Update renovate/renovate Docker tag to v41.169.1 2025-11-03 00:00:29 +00:00
3a57ef6953 add nas deployment 2025-11-03 00:56:36 +01:00
46 changed files with 449 additions and 1221 deletions

12
.envrc
View File

@@ -1,12 +0,0 @@
#!/usr/bin/env bash
export DIRENV_WARN_TIMEOUT=20s
eval "$(devenv direnvrc)"
# `use devenv` supports the same options as the `devenv shell` command.
#
# To silence all output, use `--quiet`.
#
# Example usage: use devenv --quiet --impure --option services.postgres.enable:bool true
use devenv

12
.gitignore vendored
View File

@@ -1,12 +1,2 @@
secrets.yaml
talos/generated
# Devenv
.devenv*
devenv.local.nix
devenv.local.yaml
# direnv
.direnv
# pre-commit
.pre-commit-config.yaml
talos/generated

View File

@@ -1,7 +1,7 @@
{
"recommendations": [
"arrterian.nix-env-selector",
"jnoortheen.nix-ide",
"detachhead.basedpyright",
"mkhl.direnv"
"detachhead.basedpyright"
]
}

View File

@@ -1,4 +1,13 @@
{
"nixEnvSelector.nixFile": "${workspaceFolder}/shell.nix",
"terminal.integrated.profiles.linux": {
"Nix Shell": {
"path": "nix",
"args": ["develop"],
"icon": "terminal-linux"
}
},
"terminal.integrated.defaultProfile.linux": "Nix Shell",
"ansible.python.interpreterPath": "/bin/python",
"python.defaultInterpreterPath": "${env:PYTHON_BIN}"
}

View File

@@ -1,49 +0,0 @@
apiVersion: local.openebs.io/v1alpha1
kind: LVMVolume
metadata:
labels:
kubernetes.io/nodename: anapistula-delrosalae
name: frigate-config
namespace: openebs
spec:
capacity: 5Gi
ownerNodeID: anapistula-delrosalae
shared: "yes"
thinProvision: "no"
vgPattern: ^openebs-hdd$
volGroup: openebs-hdd
---
kind: PersistentVolume
apiVersion: v1
metadata:
name: frigate-config
spec:
capacity:
storage: 5Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: openebs-lvmpv
volumeMode: Filesystem
csi:
driver: local.csi.openebs.io
volumeHandle: frigate-config
claimRef:
apiVersion: v1
kind: PersistentVolumeClaim
namespace: frigate
name: frigate-config
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: frigate-config
namespace: frigate
spec:
storageClassName: openebs-lvmpv
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 5Gi
volumeName: frigate-config

View File

@@ -3,7 +3,5 @@ kind: Kustomization
resources:
- namespace.yaml
- secret.yaml
- config-pvc.yaml
- media-pvc.yaml
- release.yaml
- webrtc-svc.yaml

View File

@@ -1,49 +0,0 @@
apiVersion: local.openebs.io/v1alpha1
kind: LVMVolume
metadata:
labels:
kubernetes.io/nodename: anapistula-delrosalae
name: frigate-media
namespace: openebs
spec:
capacity: 500Gi
ownerNodeID: anapistula-delrosalae
shared: "yes"
thinProvision: "no"
vgPattern: ^openebs-hdd$
volGroup: openebs-hdd
---
kind: PersistentVolume
apiVersion: v1
metadata:
name: frigate-media
spec:
capacity:
storage: 500Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: openebs-lvmpv
volumeMode: Filesystem
csi:
driver: local.csi.openebs.io
volumeHandle: frigate-media
claimRef:
apiVersion: v1
kind: PersistentVolumeClaim
namespace: frigate
name: frigate-media
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: frigate-media
namespace: frigate
spec:
storageClassName: openebs-lvmpv
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 500Gi
volumeName: frigate-media

View File

@@ -36,8 +36,6 @@ spec:
cookie_secure: True
record:
expire_interval: 1440 # 24h
sync_recordings: True
enabled: True
retain:
days: 90

View File

@@ -2,7 +2,6 @@ apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- postgres-volume.yaml
- postgres-cluster.yaml
- release.yaml
- secret.yaml

View File

@@ -10,7 +10,3 @@ spec:
storage:
size: 10Gi
storageClass: mayastor-single-hdd
backup:
volumeSnapshot:
className: csi-mayastor-snapshotclass

View File

@@ -1,32 +0,0 @@
apiVersion: local.openebs.io/v1alpha1
kind: LVMVolume
metadata:
labels:
kubernetes.io/nodename: anapistula-delrosalae
name: gitea-postgresql-cluster-lvmhdd-1
namespace: openebs
spec:
capacity: 20Gi
ownerNodeID: anapistula-delrosalae
shared: "yes"
thinProvision: "no"
vgPattern: ^openebs-hdd$
volGroup: openebs-hdd
---
kind: PersistentVolume
apiVersion: v1
metadata:
name: gitea-postgresql-cluster-lvmhdd-1
spec:
capacity:
storage: 20Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: openebs-lvmpv
volumeMode: Filesystem
csi:
driver: local.csi.openebs.io
volumeHandle: gitea-postgresql-cluster-lvmhdd-1
---
# PVCs are dynamically created by the Postgres operator

View File

@@ -17,7 +17,7 @@ spec:
chart:
spec:
chart: gitea
version: 12.5.0
version: 12.4.0
sourceRef:
kind: HelmRepository
name: gitea-charts

View File

@@ -19,7 +19,7 @@ spec:
chart:
spec:
chart: redis
version: 24.1.3
version: 23.1.3
sourceRef:
kind: HelmRepository
name: bitnami

View File

@@ -18,7 +18,7 @@ spec:
chart:
spec:
chart: immich
version: 1.0.12
version: 1.0.2
sourceRef:
kind: HelmRepository
name: secustor

View File

@@ -1,7 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- pvc.yaml
- statefulset.yaml
- service.yaml

View File

@@ -1,4 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: ispeak3

View File

@@ -1,49 +0,0 @@
apiVersion: local.openebs.io/v1alpha1
kind: LVMVolume
metadata:
labels:
kubernetes.io/nodename: anapistula-delrosalae
name: ispeak3-ts3-data
namespace: openebs
spec:
capacity: 1Gi
ownerNodeID: anapistula-delrosalae
shared: "yes"
thinProvision: "no"
vgPattern: ^openebs-hdd$
volGroup: openebs-hdd
---
kind: PersistentVolume
apiVersion: v1
metadata:
name: ispeak3-ts3-data
spec:
capacity:
storage: 1Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Delete
storageClassName: openebs-lvmpv
volumeMode: Filesystem
csi:
driver: local.csi.openebs.io
volumeHandle: ispeak3-ts3-data
claimRef:
apiVersion: v1
kind: PersistentVolumeClaim
namespace: ispeak3
name: ispeak3-ts3-data
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: ispeak3-ts3-data
namespace: ispeak3
spec:
storageClassName: openebs-lvmpv
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
volumeName: ispeak3-ts3-data

View File

@@ -1,20 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: teamspeak3
namespace: ispeak3
spec:
selector:
app: teamspeak3
ports:
- name: voice
protocol: UDP
port: 9987
targetPort: 9987
- name: filetransfer
protocol: TCP
port: 30033
targetPort: 30033
type: LoadBalancer
externalTrafficPolicy: Local
ipFamilyPolicy: PreferDualStack

View File

@@ -1,34 +0,0 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: teamspeak3-server
namespace: ispeak3
spec:
serviceName: "teamspeak3"
replicas: 1
selector:
matchLabels:
app: teamspeak3
template:
metadata:
labels:
app: teamspeak3
spec:
containers:
- name: teamspeak3
image: teamspeak:3.13.7
ports:
- containerPort: 9987
name: voice
protocol: UDP
- containerPort: 10011
name: query
- containerPort: 30033
name: filetransfer
volumeMounts:
- name: ts3-data
mountPath: /var/ts3server/
volumes:
- name: ts3-data
persistentVolumeClaim:
claimName: ispeak3-ts3-data

View File

@@ -10,4 +10,3 @@ resources:
- immich
- nas
- searxng
- ispeak3

View File

@@ -8,113 +8,92 @@ spec:
interval: 24h
url: https://dynomite567.github.io/helm-charts/
---
# apiVersion: helm.toolkit.fluxcd.io/v2
# kind: HelmRelease
# metadata:
# name: librechat
# namespace: librechat
# spec:
# interval: 30m
# chart:
# spec:
# chart: librechat
# version: 1.9.1
# sourceRef:
# kind: HelmRepository
# name: dynomite567-charts
# values:
# global:
# librechat:
# existingSecretName: librechat
# librechat:
# configEnv:
# PLUGIN_MODELS: null
# ALLOW_REGISTRATION: "false"
# TRUST_PROXY: "1"
# DOMAIN_CLIENT: https://librechat.lumpiasty.xyz
# SEARCH: "true"
# existingSecretName: librechat
# configYamlContent: |
# version: 1.0.3
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: librechat
namespace: librechat
spec:
interval: 30m
chart:
spec:
chart: librechat
version: 1.9.1
sourceRef:
kind: HelmRepository
name: dynomite567-charts
values:
global:
librechat:
existingSecretName: librechat
librechat:
configEnv:
PLUGIN_MODELS: null
ALLOW_REGISTRATION: "false"
TRUST_PROXY: "1"
DOMAIN_CLIENT: https://librechat.lumpiasty.xyz
SEARCH: "true"
existingSecretName: librechat
configYamlContent: |
version: 1.0.3
# endpoints:
# custom:
# - name: "Llama.cpp"
# apiKey: "llama"
# baseURL: "http://llama.llama.svc.cluster.local:11434/v1"
# models:
# default: [
# "DeepSeek-R1-0528-Qwen3-8B-GGUF",
# "Qwen3-8B-GGUF",
# "Qwen3-8B-GGUF-no-thinking",
# "gemma3n-e4b",
# "gemma3-12b",
# "gemma3-12b-q2",
# "gemma3-12b-novision",
# "gemma3-4b",
# "gemma3-4b-novision",
# "Qwen3-4B-Thinking-2507",
# "Qwen3-4B-Thinking-2507-long-ctx",
# "Qwen2.5-VL-7B-Instruct-GGUF",
# "Qwen2.5-VL-32B-Instruct-GGUF-IQ1_S",
# "Qwen2.5-VL-32B-Instruct-GGUF-Q2_K_L",
# "Qwen3-VL-2B-Instruct-GGUF",
# "Qwen3-VL-2B-Instruct-GGUF-unslothish",
# "Qwen3-VL-2B-Thinking-GGUF",
# "Qwen3-VL-4B-Instruct-GGUF",
# "Qwen3-VL-4B-Instruct-GGUF-unslothish",
# "Qwen3-VL-4B-Thinking-GGUF",
# "Qwen3-VL-8B-Instruct-GGUF",
# "Qwen3-VL-8B-Instruct-GGUF-unslothish",
# "Qwen3-VL-8B-Thinking-GGUF",
# "Huihui-Qwen3-VL-8B-Instruct-abliterated-GGUF",
# "Huihui-Qwen3-VL-8B-Thinking-abliterated-GGUF"
# ]
# titleConvo: true
# titleModel: "gemma3-4b-novision"
# summarize: false
# summaryModel: "gemma3-4b-novision"
# forcePrompt: false
# modelDisplayLabel: "Llama.cpp"
endpoints:
custom:
- name: "Llama.cpp"
apiKey: "llama"
baseURL: "http://llama.llama.svc.cluster.local:11434/v1"
models:
default: [
"DeepSeek-R1-0528-Qwen3-8B-GGUF",
"Qwen3-8B-GGUF",
"Qwen3-8B-GGUF-no-thinking",
"gemma3n-e4b",
"gemma3-12b",
"gemma3-12b-q2",
"gemma3-12b-novision",
"gemma3-4b",
"gemma3-4b-novision",
"Qwen3-4B-Thinking-2507",
"Qwen3-4B-Thinking-2507-long-ctx",
"Qwen2.5-VL-7B-Instruct-GGUF",
"Qwen2.5-VL-32B-Instruct-GGUF-IQ1_S",
"Qwen2.5-VL-32B-Instruct-GGUF-Q2_K_L"
]
titleConvo: true
titleModel: "gemma3-4b-novision"
summarize: false
summaryModel: "gemma3-4b-novision"
forcePrompt: false
modelDisplayLabel: "Llama.cpp"
imageVolume:
enabled: true
size: 10G
accessModes: ReadWriteOnce
storageClassName: mayastor-single-hdd
ingress:
enabled: true
className: nginx-ingress
annotations:
cert-manager.io/cluster-issuer: letsencrypt
nginx.ingress.kubernetes.io/proxy-body-size: "0"
nginx.ingress.kubernetes.io/proxy-buffering: "false"
nginx.ingress.kubernetes.io/proxy-read-timeout: 30m
hosts:
- host: librechat.lumpiasty.xyz
paths:
- path: /
pathType: ImplementationSpecific
tls:
- hosts:
- librechat.lumpiasty.xyz
secretName: librechat-ingress
# # ✨ IMPORTANT: let llama-swap/llama-server own all these
# dropParams:
# - "temperature"
# - "top_p"
# - "top_k"
# - "presence_penalty"
# - "frequency_penalty"
# - "stop"
# - "max_tokens"
# imageVolume:
# enabled: true
# size: 10G
# accessModes: ReadWriteOnce
# storageClassName: mayastor-single-hdd
# ingress:
# enabled: true
# className: nginx-ingress
# annotations:
# cert-manager.io/cluster-issuer: letsencrypt
# nginx.ingress.kubernetes.io/proxy-body-size: "0"
# nginx.ingress.kubernetes.io/proxy-buffering: "false"
# nginx.ingress.kubernetes.io/proxy-read-timeout: 30m
# hosts:
# - host: librechat.lumpiasty.xyz
# paths:
# - path: /
# pathType: ImplementationSpecific
# tls:
# - hosts:
# - librechat.lumpiasty.xyz
# secretName: librechat-ingress
mongodb:
persistence:
storageClass: mayastor-single-hdd
# mongodb:
# persistence:
# storageClass: mayastor-single-hdd
# meilisearch:
# persistence:
# storageClass: mayastor-single-hdd
# auth:
# existingMasterKeySecret: librechat
meilisearch:
persistence:
storageClass: mayastor-single-hdd
auth:
existingMasterKeySecret: librechat

View File

@@ -5,464 +5,212 @@ models:
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/DeepSeek-R1-0528-Qwen3-8B-GGUF:Q4_K_M
--n-gpu-layers 37
--ctx-size 16384
--no-warmup
--port ${PORT}
-hf unsloth/DeepSeek-R1-0528-Qwen3-8B-GGUF:Q4_K_M
-ngl 37 -c 16384
--no-warmup
--port ${PORT}
"Qwen3-8B-GGUF":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/Qwen3-8B-GGUF:Q4_K_M
--n-gpu-layers 37
--ctx-size 16384
--no-warmup
--port ${PORT}
-hf unsloth/Qwen3-8B-GGUF:Q4_K_M
-ngl 37 -c 16384
--no-warmup
--port ${PORT}
"Qwen3-8B-GGUF-no-thinking":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/Qwen3-8B-GGUF:Q4_K_M
--n-gpu-layers 37
--ctx-size 16384
--jinja
--chat-template-file /config/qwen_nothink_chat_template.jinja
--no-warmup
--port ${PORT}
-hf unsloth/Qwen3-8B-GGUF:Q4_K_M
-ngl 37 -c 16384
--jinja --chat-template-file /config/qwen_nothink_chat_template.jinja
--no-warmup
--port ${PORT}
"gemma3n-e4b":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/gemma-3n-E4B-it-GGUF:UD-Q4_K_XL
--ctx-size 16384
--n-gpu-layers 99
--seed 3407
--prio 2
--temp 1.0
--repeat-penalty 1.0
--min-p 0.00
--top-k 64
--top-p 0.95
--no-warmup
--port ${PORT}
-hf unsloth/gemma-3n-E4B-it-GGUF:UD-Q4_K_XL
--ctx-size 16384
--n-gpu-layers 99
--seed 3407
--prio 2
--temp 1.0
--repeat-penalty 1.0
--min-p 0.00
--top-k 64
--top-p 0.95
--no-warmup
--port ${PORT}
"gemma3-12b":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/gemma-3-12b-it-GGUF:Q4_K_M
--ctx-size 16384
--n-gpu-layers 99
--prio 2
--temp 1.0
--repeat-penalty 1.0
--min-p 0.00
--top-k 64
--top-p 0.95
--no-warmup
--port ${PORT}
-hf unsloth/gemma-3-12b-it-GGUF:Q4_K_M
--ctx-size 16384
--n-gpu-layers 99
--prio 2
--temp 1.0
--repeat-penalty 1.0
--min-p 0.00
--top-k 64
--top-p 0.95
--no-warmup
--port ${PORT}
"gemma3-12b-novision":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/gemma-3-12b-it-GGUF:Q4_K_M
--ctx-size 16384
--n-gpu-layers 99
--prio 2
--temp 1.0
--repeat-penalty 1.0
--min-p 0.00
--top-k 64
--top-p 0.95
--no-mmproj
--no-warmup
--port ${PORT}
-hf unsloth/gemma-3-12b-it-GGUF:Q4_K_M
--ctx-size 16384
--n-gpu-layers 99
--prio 2
--temp 1.0
--repeat-penalty 1.0
--min-p 0.00
--top-k 64
--top-p 0.95
--no-mmproj
--no-warmup
--port ${PORT}
"gemma3-12b-q2":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/gemma-3-12b-it-GGUF:Q2_K_L
--ctx-size 16384
--n-gpu-layers 99
--prio 2
--temp 1.0
--repeat-penalty 1.0
--min-p 0.00
--top-k 64
--top-p 0.95
--no-warmup
--port ${PORT}
-hf unsloth/gemma-3-12b-it-GGUF:Q2_K_L
--ctx-size 16384
--n-gpu-layers 99
--prio 2
--temp 1.0
--repeat-penalty 1.0
--min-p 0.00
--top-k 64
--top-p 0.95
--no-warmup
--port ${PORT}
"gemma3-4b":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/gemma-3-4b-it-GGUF:Q4_K_M
--ctx-size 16384
--n-gpu-layers 99
--prio 2
--temp 1.0
--repeat-penalty 1.0
--min-p 0.00
--top-k 64
--top-p 0.95
--no-warmup
--port ${PORT}
-hf unsloth/gemma-3-4b-it-GGUF:Q4_K_M
--ctx-size 16384
--n-gpu-layers 99
--prio 2
--temp 1.0
--repeat-penalty 1.0
--min-p 0.00
--top-k 64
--top-p 0.95
--no-warmup
--port ${PORT}
"gemma3-4b-novision":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/gemma-3-4b-it-GGUF:Q4_K_M
--ctx-size 16384
--n-gpu-layers 99
--prio 2
--temp 1.0
--repeat-penalty 1.0
--min-p 0.00
--top-k 64
--top-p 0.95
--no-mmproj
--no-warmup
--port ${PORT}
-hf unsloth/gemma-3-4b-it-GGUF:Q4_K_M
--ctx-size 16384
--n-gpu-layers 99
--prio 2
--temp 1.0
--repeat-penalty 1.0
--min-p 0.00
--top-k 64
--top-p 0.95
--no-mmproj
--no-warmup
--port ${PORT}
"Qwen3-4B-Thinking-2507":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/Qwen3-4B-Thinking-2507-GGUF:Q4_K_M
--n-gpu-layers 99
--ctx-size 16384
--predict 8192
--temp 0.6
--min-p 0.00
--top-p 0.95
--top-k 20
--repeat-penalty 1.0
--no-warmup
--port ${PORT}
-hf unsloth/Qwen3-4B-Thinking-2507-GGUF:Q4_K_M
-ngl 99 -c 16384 --predict 8192
--temp 0.6
--min-p 0.00
--top-p 0.95
--top-k 20
--repeat-penalty 1.0
--no-warmup
--port ${PORT}
"Qwen3-4B-Thinking-2507-long-ctx":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/Qwen3-4B-Thinking-2507-GGUF:Q4_K_M
--n-gpu-layers 99
--ctx-size 262144
--predict 81920
--temp 0.6
--min-p 0.00
--top-p 0.95
--top-k 20
--repeat-penalty 1.0
--no-warmup
--flash-attn auto
--cache-type-k q8_0
--cache-type-v q8_0
--port ${PORT}
-hf unsloth/Qwen3-4B-Thinking-2507-GGUF:Q4_K_M
-ngl 99 -c 262144 --predict 81920
--temp 0.6
--min-p 0.00
--top-p 0.95
--top-k 20
--repeat-penalty 1.0
--no-warmup
--flash-attn
--cache-type-k q8_0 --cache-type-v q8_0
--port ${PORT}
"Qwen3-4B-Instruct-2507":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/Qwen3-4B-Instruct-2507-GGUF:Q4_K_M
--n-gpu-layers 99
--ctx-size 16384
--predict 8192
--temp 0.7
--min-p 0.00
--top-p 0.8
--top-k 20
--repeat-penalty 1.0
--no-warmup
--port ${PORT}
-hf unsloth/Qwen3-4B-Instruct-2507-GGUF:Q4_K_M
-ngl 99 -c 16384 --predict 8192
--temp 0.7
--min-p 0.00
--top-p 0.8
--top-k 20
--repeat-penalty 1.0
--no-warmup
--port ${PORT}
"Qwen3-4B-Instruct-2507-long-ctx":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/Qwen3-4B-Instruct-2507-GGUF:Q4_K_M
--n-gpu-layers 99
--ctx-size 262144
--predict 81920
--temp 0.7
--min-p 0.00
--top-p 0.8
--top-k 20
--repeat-penalty 1.0
--no-warmup
--flash-attn auto
--cache-type-k q8_0
--cache-type-v q8_0
--port ${PORT}
-hf unsloth/Qwen3-4B-Instruct-2507-GGUF:Q4_K_M
-ngl 99 -c 262144 --predict 81920
--temp 0.7
--min-p 0.00
--top-p 0.8
--top-k 20
--repeat-penalty 1.0
--no-warmup
--flash-attn
--cache-type-k q8_0 --cache-type-v q8_0
--port ${PORT}
"Qwen2.5-VL-32B-Instruct-GGUF-IQ1_S":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/Qwen2.5-VL-32B-Instruct-GGUF:IQ1_S
--n-gpu-layers 99
--ctx-size 16384
--predict 8192
--temp 0.7
--min-p 0.00
--top-p 0.8
--top-k 20
--repeat-penalty 1.0
--no-warmup
--port ${PORT}
-hf unsloth/Qwen2.5-VL-32B-Instruct-GGUF:IQ1_S
-ngl 99 -c 16384 --predict 8192
--temp 0.7
--min-p 0.00
--top-p 0.8
--top-k 20
--repeat-penalty 1.0
--no-warmup
--port ${PORT}
"Qwen2.5-VL-32B-Instruct-GGUF-Q2_K_L":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/Qwen2.5-VL-32B-Instruct-GGUF:Q2_K_L
--n-gpu-layers 99
--ctx-size 16384
--predict 8192
--temp 0.7
--min-p 0.00
--top-p 0.8
--top-k 20
--repeat-penalty 1.0
--no-warmup
--port ${PORT}
-hf unsloth/Qwen2.5-VL-32B-Instruct-GGUF:Q2_K_L
-ngl 99 -c 16384 --predict 8192
--temp 0.7
--min-p 0.00
--top-p 0.8
--top-k 20
--repeat-penalty 1.0
--no-warmup
--port ${PORT}
"Qwen2.5-VL-7B-Instruct-GGUF":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/Qwen2.5-VL-7B-Instruct-GGUF:Q4_K_M
--n-gpu-layers 37
--ctx-size 16384
--predict 8192
--temp 0.7
--min-p 0.00
--top-p 0.8
--top-k 20
--repeat-penalty 1.0
--no-warmup
--port ${PORT}
"Qwen3-VL-2B-Instruct-GGUF":
ttl: 600
cmd: |
/app/llama-server
-hf Qwen/Qwen3-VL-2B-Instruct-GGUF:Q8_0
--n-gpu-layers 99
--ctx-size 12288
--predict 4096
--flash-attn auto
--jinja
--temp 0.7
--top-p 0.85
--top-k 20
--min-p 0.05
--repeat-penalty 1.15
--frequency-penalty 0.5
--presence-penalty 0.4
--no-warmup
--port ${PORT}
"Qwen3-VL-4B-Instruct-GGUF":
ttl: 600
cmd: |
/app/llama-server
-hf Qwen/Qwen3-VL-4B-Instruct-GGUF:Q8_0
--n-gpu-layers 99
--ctx-size 12288
--predict 4096
--flash-attn auto
--jinja
--temp 0.7
--top-p 0.85
--top-k 20
--min-p 0.05
--repeat-penalty 1.15
--frequency-penalty 0.5
--presence-penalty 0.4
--no-warmup
--port ${PORT}
"Qwen3-VL-8B-Instruct-GGUF":
ttl: 600
cmd: |
/app/llama-server
-hf Qwen/Qwen3-VL-8B-Instruct-GGUF:Q4_K_M
--n-gpu-layers 99
--ctx-size 12288
--predict 4096
--flash-attn auto
--jinja
--temp 0.7
--top-p 0.85
--top-k 20
--min-p 0.05
--repeat-penalty 1.15
--frequency-penalty 0.5
--presence-penalty 0.4
--no-warmup
--port ${PORT}
"Qwen3-VL-2B-Instruct-GGUF-unslothish":
ttl: 600
cmd: |
/app/llama-server
-hf Qwen/Qwen3-VL-2B-Instruct-GGUF:Q8_0
--n-gpu-layers 99
--ctx-size 12288
--predict 4096
--flash-attn auto
--jinja
--temp 0.7
--top-p 0.8
--top-k 20
--min-p 0.05
--repeat-penalty 1.15
--frequency-penalty 0.5
--presence-penalty 0.6
--no-warmup
--port ${PORT}
"Qwen3-VL-4B-Instruct-GGUF-unslothish":
ttl: 600
cmd: |
/app/llama-server
-hf Qwen/Qwen3-VL-4B-Instruct-GGUF:Q8_0
--n-gpu-layers 99
--ctx-size 12288
--predict 4096
--flash-attn auto
--jinja
--temp 0.7
--top-p 0.8
--top-k 20
--min-p 0.05
--repeat-penalty 1.15
--frequency-penalty 0.5
--presence-penalty 0.6
--no-warmup
--port ${PORT}
"Qwen3-VL-8B-Instruct-GGUF-unslothish":
ttl: 600
cmd: |
/app/llama-server
-hf Qwen/Qwen3-VL-8B-Instruct-GGUF:Q4_K_M
--n-gpu-layers 99
--ctx-size 12288
--predict 4096
--flash-attn auto
--jinja
--temp 0.7
--top-p 0.8
--top-k 20
--min-p 0.05
--repeat-penalty 1.15
--frequency-penalty 0.5
--presence-penalty 0.6
--no-warmup
--port ${PORT}
"Qwen3-VL-2B-Thinking-GGUF":
ttl: 600
cmd: |
/app/llama-server
-hf Qwen/Qwen3-VL-2B-Thinking-GGUF:Q8_0
--n-gpu-layers 99
--ctx-size 12288
--predict 4096
--flash-attn auto
--jinja
--top-p 0.95
--top-k 20
--temp 1.0
--min-p 0.0
--repeat-penalty 1.0
--presence-penalty 0.0
--no-warmup
--port ${PORT}
"Qwen3-VL-4B-Thinking-GGUF":
ttl: 600
cmd: |
/app/llama-server
-hf Qwen/Qwen3-VL-4B-Thinking-GGUF:Q4_K_M
--n-gpu-layers 99
--ctx-size 12288
--predict 4096
--flash-attn auto
--jinja
--top-p 0.95
--top-k 20
--temp 1.0
--min-p 0.0
--repeat-penalty 1.0
--presence-penalty 0.0
--no-warmup
--port ${PORT}
"Qwen3-VL-8B-Thinking-GGUF":
ttl: 600
cmd: |
/app/llama-server
-hf Qwen/Qwen3-VL-8B-Thinking-GGUF:Q4_K_M
--n-gpu-layers 99
--ctx-size 12288
--predict 4096
--flash-attn auto
--jinja
--top-p 0.95
--top-k 20
--temp 1.0
--min-p 0.0
--repeat-penalty 1.0
--presence-penalty 0.0
--no-warmup
--port ${PORT}
"Huihui-Qwen3-VL-8B-Instruct-abliterated-GGUF":
ttl: 600
cmd: |
/app/llama-server
-hf noctrex/Huihui-Qwen3-VL-8B-Instruct-abliterated-GGUF:Q6_K
--n-gpu-layers 99
--ctx-size 12288
--predict 4096
--flash-attn auto
--jinja
--temp 0.7
--top-p 0.85
--top-k 20
--min-p 0.05
--repeat-penalty 1.15
--frequency-penalty 0.5
--presence-penalty 0.4
--no-warmup
--port ${PORT}
"Huihui-Qwen3-VL-8B-Thinking-abliterated-GGUF":
ttl: 600
cmd: |
/app/llama-server
-hf noctrex/Huihui-Qwen3-VL-8B-Thinking-abliterated-GGUF:Q6_K
--n-gpu-layers 99
--ctx-size 12288
--predict 4096
--flash-attn auto
--jinja
--temp 0.7
--top-p 0.85
--top-k 20
--min-p 0.05
--repeat-penalty 1.15
--frequency-penalty 0.5
--presence-penalty 0.4
--no-warmup
--port ${PORT}
-hf unsloth/Qwen2.5-VL-7B-Instruct-GGUF:Q4_K_M
-ngl 37 -c 16384 --predict 8192
--temp 0.7
--min-p 0.00
--top-p 0.8
--top-k 20
--repeat-penalty 1.0
--no-warmup
--port ${PORT}

View File

@@ -16,7 +16,7 @@ spec:
spec:
containers:
- name: llama-swap
image: ghcr.io/mostlygeek/llama-swap:v172-vulkan-b7062
image: ghcr.io/mostlygeek/llama-swap:v166-vulkan-b6795
imagePullPolicy: IfNotPresent
command:
- /app/llama-swap
@@ -29,7 +29,7 @@ spec:
protocol: TCP
volumeMounts:
- name: models
mountPath: /app/.cache
mountPath: /root/.cache
- mountPath: /dev/kfd
name: kfd
- mountPath: /dev/dri

View File

@@ -1,28 +1,14 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: nas-sftp-config
name: nas-sshd-config
namespace: nas
data:
sftp.json: |
{
"Global": {
"Chroot": {
"Directory": "%h",
"StartPath": "data"
},
"Directories": [
"data"
]
},
"Users": [
{
"Username": "nas",
"UID": 1000,
"GID": 1000,
"PublicKeys": [
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCresbDFZijI+rZMgd3LdciPjpb4x4S5B7y0U+EoYPaz6hILT72fyz3QdcgKJJv8JUJI6g0811/yFRuOzCXgWaA922c/S/t6HMUrorh7mPVQMTN2dc/SVBvMa7S2M9NYBj6z1X2LRHs+g1JTMCtL202PIjes/E9qu0as0Vx6n/6HHNmtmA9LrpiAmurbeKXDmrYe2yWg/FA6cX5d86SJb21Dj8WqdCd3Hz0Pi6FzMKXhpWvs5Hfei1htsjsRzCxkpSTjlgFEFVfmHIXPfB06Sa6aCnkxAFnE7N+xNa9RIWeZmOXdA74LsfSKQ9eAXSrsC/IRxo2ce8cBzXJy+Itxw24fUqGYXBiCgx8i3ZA9IdwI1u71xYo9lyNjav5VykzKnAHRAYnDm9UsCf8k04reBevcLdtxL11vPCtind3xn76Nhy2b45dcp/MdYFANGsCcXJOMb6Aisb03HPGhs/aU3tCAQbTVe195mL9FWhGqIK2wBmF1SKW+4ssX2bIU6YaCYc= cardno:23_671_999"
]
}
]
}
00-chroot.conf: |
Subsystem sftp internal-sftp
Match User nas
ChrootDirectory /config
ForceCommand internal-sftp -d /data
AllowTcpForwarding no
X11Forwarding no
PermitTunnel no

View File

@@ -1,58 +1,84 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: nas-sftp
name: nas-ssh
namespace: nas
spec:
replicas: 1
selector:
matchLabels:
app: nas-sftp
app: nas-ssh
template:
metadata:
labels:
app: nas-sftp
app: nas-ssh
spec:
securityContext:
fsGroup: 1000
initContainers:
- name: prepare-home
image: alpine:3.23.3
- name: prepare-config
image: alpine:3.20.3
imagePullPolicy: IfNotPresent
command:
- /bin/sh
- -c
- |
set -euo pipefail
mkdir -p /volume/sftp-root
chown root:root /volume/sftp-root
chmod 755 /volume/sftp-root
mkdir -p /volume/sftp-root/data
chown 1000:1000 /volume/sftp-root/data
chmod 750 /volume/sftp-root/data
mkdir -p /volume/host-keys
chown root:root /volume/host-keys
chmod 700 /volume/host-keys
chown root:root /config
chmod 755 /config
mkdir -p /config/data
chown 1000:1000 /config/data
chmod 750 /config/data
mkdir -p /config/ssh_host_keys
chown root:root /config/ssh_host_keys
chmod 700 /config/ssh_host_keys
for key in /config/ssh_host_keys/*; do
[ -f "$key" ] || continue
chown root:root "$key"
chmod 600 "$key"
done
mkdir -p /config/sshd/sshd_config.d
cp /defaults/00-chroot.conf /config/sshd/sshd_config.d/00-chroot.conf
chown root:root /config/sshd/sshd_config.d/00-chroot.conf
chmod 644 /config/sshd/sshd_config.d/00-chroot.conf
volumeMounts:
- name: home
mountPath: /volume
- name: data
mountPath: /config
- name: sshd-config
mountPath: /defaults/00-chroot.conf
subPath: 00-chroot.conf
readOnly: true
containers:
- name: sftp
image: docker.io/emberstack/sftp:build-5.1.72
- name: ssh
image: lscr.io/linuxserver/openssh-server:version-10.0_p1-r9
imagePullPolicy: IfNotPresent
env:
- name: PUID
value: "1000"
- name: PGID
value: "1000"
- name: TZ
value: Etc/UTC
- name: USER_NAME
value: nas
- name: SUDO_ACCESS
value: "false"
- name: PASSWORD_ACCESS
value: "false"
- name: LOG_STDOUT
value: "true"
- name: PUBLIC_KEY
valueFrom:
secretKeyRef:
name: nas-ssh-authorized-keys
key: public_key
ports:
- containerPort: 22
name: sftp
- containerPort: 2222
name: ssh
protocol: TCP
volumeMounts:
- name: config
mountPath: /app/config/sftp.json
subPath: sftp.json
readOnly: true
- name: home
mountPath: /home/nas
subPath: sftp-root
- name: home
mountPath: /etc/ssh/keys
subPath: host-keys
- name: data
mountPath: /config
resources:
requests:
cpu: 50m
@@ -60,9 +86,9 @@ spec:
limits:
memory: 512Mi
volumes:
- name: home
- name: data
persistentVolumeClaim:
claimName: nas-data-lvm-hdd
- name: config
claimName: nas-data
- name: sshd-config
configMap:
name: nas-sftp-config
name: nas-sshd-config

View File

@@ -2,6 +2,7 @@ apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- secret.yaml
- configmap.yaml
- pvc.yaml
- deployment.yaml

View File

@@ -1,49 +1,12 @@
apiVersion: local.openebs.io/v1alpha1
kind: LVMVolume
metadata:
labels:
kubernetes.io/nodename: anapistula-delrosalae
name: nas-data-lvm-hdd
namespace: openebs
spec:
capacity: 4Gi
ownerNodeID: anapistula-delrosalae
shared: "yes"
thinProvision: "no"
vgPattern: ^openebs-hdd$
volGroup: openebs-hdd
---
kind: PersistentVolume
apiVersion: v1
metadata:
name: nas-data-lvm-hdd
spec:
capacity:
storage: 4Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Delete
storageClassName: openebs-lvmpv
volumeMode: Filesystem
csi:
driver: local.csi.openebs.io
volumeHandle: nas-data-lvm-hdd
claimRef:
apiVersion: v1
kind: PersistentVolumeClaim
namespace: nas
name: nas-data-lvm-hdd
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: nas-data-lvm-hdd
name: nas-data
namespace: nas
spec:
storageClassName: openebs-lvmpv
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 4Gi
volumeName: nas-data-lvm-hdd
requests:
storage: 500Gi
storageClassName: mayastor-single-hdd

9
apps/nas/secret.yaml Normal file
View File

@@ -0,0 +1,9 @@
apiVersion: v1
kind: Secret
metadata:
name: nas-ssh-authorized-keys
namespace: nas
type: Opaque
stringData:
public_key: |
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCresbDFZijI+rZMgd3LdciPjpb4x4S5B7y0U+EoYPaz6hILT72fyz3QdcgKJJv8JUJI6g0811/yFRuOzCXgWaA922c/S/t6HMUrorh7mPVQMTN2dc/SVBvMa7S2M9NYBj6z1X2LRHs+g1JTMCtL202PIjes/E9qu0as0Vx6n/6HHNmtmA9LrpiAmurbeKXDmrYe2yWg/FA6cX5d86SJb21Dj8WqdCd3Hz0Pi6FzMKXhpWvs5Hfei1htsjsRzCxkpSTjlgFEFVfmHIXPfB06Sa6aCnkxAFnE7N+xNa9RIWeZmOXdA74LsfSKQ9eAXSrsC/IRxo2ce8cBzXJy+Itxw24fUqGYXBiCgx8i3ZA9IdwI1u71xYo9lyNjav5VykzKnAHRAYnDm9UsCf8k04reBevcLdtxL11vPCtind3xn76Nhy2b45dcp/MdYFANGsCcXJOMb6Aisb03HPGhs/aU3tCAQbTVe195mL9FWhGqIK2wBmF1SKW+4ssX2bIU6YaCYc= cardno:23_671_999

View File

@@ -1,15 +1,15 @@
apiVersion: v1
kind: Service
metadata:
name: nas-sftp
name: nas-ssh
namespace: nas
spec:
type: LoadBalancer
externalTrafficPolicy: Cluster
ports:
- name: sftp
- name: ssh
port: 22
targetPort: 22
targetPort: 2222
protocol: TCP
selector:
app: nas-sftp
app: nas-ssh

View File

@@ -15,7 +15,7 @@ spec:
- name: renovate
# Update this to the latest available and then enable Renovate on
# the manifest
image: renovate/renovate:43.4.0-full
image: renovate/renovate:41.169.1-full
envFrom:
- secretRef:
name: renovate-gitea-token

View File

@@ -1,59 +0,0 @@
{ pkgs, lib, config, inputs, ... }:
let
# Python with hvac package
python = pkgs.python313.withPackages (python-pkgs: with python-pkgs; [
hvac
]);
in
{
# Overlays - apply krew2nix to get kubectl with krew support
overlays = [
inputs.krew2nix.overlay
];
# Environment variables
env = {
GREET = "devenv";
TALOSCONFIG = "${config.devenv.root}/talos/generated/talosconfig";
EDITOR = "vim";
RESTIC_REPOSITORY = "s3:https://s3.eu-central-003.backblazeb2.com/lumpiasty-backups";
VAULT_ADDR = "https://openbao.lumpiasty.xyz:8200";
PATH = "${config.devenv.root}/utils:${pkgs.coreutils}/bin";
PYTHON_BIN = "${python}/bin/python";
};
# Packages
packages = with pkgs; [
python
vim gnumake
talosctl cilium-cli
kubectx k9s kubernetes-helm
(kubectl.withKrewPlugins (plugins: with plugins; [
mayastor
openebs
]))
ansible
fluxcd
restic
openbao
pv-migrate
];
# Scripts
scripts.hello.exec = ''
echo hello from $GREET
'';
# Shell hooks
enterShell = ''
source ${pkgs.bash-completion}/share/bash-completion/bash_completion
echo "Environment ready!"
'';
# Tests
enterTest = ''
echo "Running tests"
git --version | grep --color=auto "${pkgs.git.version}"
'';
}

View File

@@ -1,20 +0,0 @@
# yaml-language-server: $schema=https://devenv.sh/devenv.schema.json
inputs:
nixpkgs:
url: github:NixOS/nixpkgs/nixos-unstable
krew2nix:
url: github:a1994sc/krew2nix
inputs:
nixpkgs:
follows: nixpkgs
# If you're using non-OSS software, you can set allowUnfree to true.
# allowUnfree: true
# If you're willing to use a package that's vulnerable
# permittedInsecurePackages:
# - "openssl-1.1.1w"
# If you have more than one devenv you can merge them
#imports:
# - ./backend

View File

@@ -1,34 +1,17 @@
{
"nodes": {
"devenv": {
"locked": {
"dir": "src/modules",
"lastModified": 1769881431,
"owner": "cachix",
"repo": "devenv",
"rev": "72d5e66e2dd5112766ef4c9565872b51094b542d",
"type": "github"
},
"original": {
"dir": "src/modules",
"owner": "cachix",
"repo": "devenv",
"type": "github"
}
},
"flake-compat": {
"flake": false,
"locked": {
"lastModified": 1767039857,
"owner": "NixOS",
"repo": "flake-compat",
"rev": "5edf11c44bc78a0d334f6334cdaf7d60d732daab",
"type": "github"
"lastModified": 1733328505,
"narHash": "sha256-NeCCThCEP3eCl2l/+27kNNK7QrwZB1IJCrXfrbv5oqU=",
"rev": "ff81ac966bb2cae68946d5ed5fc4994f96d0ffec",
"revCount": 69,
"type": "tarball",
"url": "https://api.flakehub.com/f/pinned/edolstra/flake-compat/1.1.0/01948eb7-9cba-704f-bbf3-3fa956735b52/source.tar.gz"
},
"original": {
"owner": "NixOS",
"repo": "flake-compat",
"type": "github"
"type": "tarball",
"url": "https://flakehub.com/f/edolstra/flake-compat/1.1.0.tar.gz"
}
},
"flake-utils": {
@@ -37,6 +20,7 @@
},
"locked": {
"lastModified": 1731533236,
"narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "11707dc2f618dd54ca8739b309ec4fc024de578b",
@@ -48,47 +32,6 @@
"type": "github"
}
},
"git-hooks": {
"inputs": {
"flake-compat": "flake-compat",
"gitignore": "gitignore",
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1769069492,
"owner": "cachix",
"repo": "git-hooks.nix",
"rev": "a1ef738813b15cf8ec759bdff5761b027e3e1d23",
"type": "github"
},
"original": {
"owner": "cachix",
"repo": "git-hooks.nix",
"type": "github"
}
},
"gitignore": {
"inputs": {
"nixpkgs": [
"git-hooks",
"nixpkgs"
]
},
"locked": {
"lastModified": 1762808025,
"owner": "hercules-ci",
"repo": "gitignore.nix",
"rev": "cb5e3fdca1de58ccbc3ef53de65bd372b48f567c",
"type": "github"
},
"original": {
"owner": "hercules-ci",
"repo": "gitignore.nix",
"type": "github"
}
},
"krew2nix": {
"inputs": {
"flake-utils": "flake-utils",
@@ -99,10 +42,11 @@
"treefmt-nix": "treefmt-nix"
},
"locked": {
"lastModified": 1769904483,
"lastModified": 1751765453,
"narHash": "sha256-tgo3BwFM2UUYQz6dVARztbj5AjKfz4exlPxnKLS/ZRg=",
"owner": "a1994sc",
"repo": "krew2nix",
"rev": "17d6ad3375899bd3f7d4d298481536155f3ec13c",
"rev": "11f66e65a0146645388eeab68b6212de0b732ed9",
"type": "github"
},
"original": {
@@ -113,10 +57,11 @@
},
"nixpkgs": {
"locked": {
"lastModified": 1769461804,
"lastModified": 1751637120,
"narHash": "sha256-xVNy/XopSfIG9c46nRmPaKfH1Gn/56vQ8++xWA8itO4=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "bfc1b8a4574108ceef22f02bafcf6611380c100d",
"rev": "5c724ed1388e53cc231ed98330a60eb2f7be4be3",
"type": "github"
},
"original": {
@@ -128,18 +73,15 @@
},
"root": {
"inputs": {
"devenv": "devenv",
"git-hooks": "git-hooks",
"flake-compat": "flake-compat",
"krew2nix": "krew2nix",
"nixpkgs": "nixpkgs",
"pre-commit-hooks": [
"git-hooks"
]
"nixpkgs": "nixpkgs"
}
},
"systems": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
@@ -154,6 +96,7 @@
"systems_2": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
@@ -173,10 +116,11 @@
]
},
"locked": {
"lastModified": 1769691507,
"lastModified": 1750931469,
"narHash": "sha256-0IEdQB1nS+uViQw4k3VGUXntjkDp7aAlqcxdewb/hAc=",
"owner": "numtide",
"repo": "treefmt-nix",
"rev": "28b19c5844cc6e2257801d43f2772a4b4c050a1b",
"rev": "ac8e6f32e11e9c7f153823abc3ab007f2a65d3e1",
"type": "github"
},
"original": {

62
flake.nix Normal file
View File

@@ -0,0 +1,62 @@
{
inputs = {
nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
# Only to ease updating flake.lock, flake-compat is used by shell.nix
flake-compat.url = https://flakehub.com/f/edolstra/flake-compat/1.1.0.tar.gz;
# Allows us to install krew plugins
krew2nix.url = "github:a1994sc/krew2nix";
krew2nix.inputs.nixpkgs.follows = "nixpkgs";
};
outputs = { self, nixpkgs, krew2nix, ... }: let
system = "x86_64-linux";
in {
devShells."${system}".default =
let
pkgs = import nixpkgs {
overlays = [ krew2nix.overlay ];
inherit system;
};
python = (pkgs.python313.withPackages (python-pkgs: with python-pkgs; [
hvac
]));
in
pkgs.mkShell {
packages = with pkgs; [
python
vim gnumake
talosctl cilium-cli
kubectx k9s kubernetes-helm
(kubectl.withKrewPlugins (plugins: with plugins; [
mayastor
openebs
]))
ansible
fluxcd
restic
openbao
];
shellHook = ''
# Get completions working
source ${pkgs.bash-completion}/share/bash-completion/bash_completion
export TALOSCONFIG=$(pwd)/talos/generated/talosconfig
export EDITOR=vim
export RESTIC_REPOSITORY=s3:https://s3.eu-central-003.backblazeb2.com/lumpiasty-backups
# export AWS_ACCESS_KEY_ID=?
# export AWS_SECRET_ACCESS_KEY=?
# export RESTIC_PASSWORD=?
export VAULT_ADDR=https://openbao.lumpiasty.xyz:8200
# Add scripts from utils subdir
export PATH="$PATH:$(pwd)/utils"
export PYTHON_BIN=${python}/bin/python
'';
};
};
}

View File

@@ -1,12 +0,0 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: hdd-lvmpv
parameters:
storage: "lvm"
volgroup: "openebs-hdd"
fsType: "btrfs"
shared: "yes"
provisioner: local.csi.openebs.io
allowVolumeExpansion: true
volumeBindingMode: Immediate

View File

@@ -23,7 +23,7 @@ spec:
chart:
spec:
chart: cert-manager
version: v1.19.3
version: v1.19.1
sourceRef:
kind: HelmRepository
name: cert-manager

View File

@@ -23,7 +23,7 @@ spec:
chart:
spec:
chart: cilium
version: 1.18.6
version: 1.18.2
sourceRef:
kind: HelmRepository
name: cilium

View File

@@ -23,7 +23,7 @@ spec:
chart:
spec:
chart: cloudnative-pg
version: 0.27.0
version: 0.26.0
sourceRef:
kind: HelmRepository
name: cnpg

View File

@@ -97,7 +97,7 @@ spec:
env:
- name: GOMEMLIMIT
value: 161MiB
image: registry.k8s.io/coredns/coredns:v1.14.1
image: registry.k8s.io/coredns/coredns:v1.13.0
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 5

View File

@@ -23,7 +23,7 @@ spec:
chart:
spec:
chart: ingress-nginx
version: 4.14.3
version: 4.13.3
sourceRef:
kind: HelmRepository
name: ingress-nginx

View File

@@ -23,7 +23,7 @@ spec:
chart:
spec:
chart: openbao
version: 0.25.4
version: 0.19.0
sourceRef:
kind: HelmRepository
name: openbao

View File

@@ -23,7 +23,7 @@ spec:
chart:
spec:
chart: openebs
version: 4.4.0
version: 4.3.3
sourceRef:
kind: HelmRepository
name: openebs
@@ -38,7 +38,7 @@ spec:
lvm-localpv:
crds:
lmvLocalPv:
enabled: true
enabled: false
mayastor:
csi:
@@ -124,7 +124,7 @@ spec:
engines:
local:
lvm:
enabled: true
enabled: false
zfs:
enabled: false
replicated:

View File

@@ -23,7 +23,7 @@ spec:
chart:
spec:
chart: vault-secrets-operator
version: 1.2.0
version: 1.0.1
sourceRef:
kind: HelmRepository
name: hashicorp

View File

@@ -20,7 +20,6 @@ resources:
- configs/loadbalancer-ippool.yaml
- configs/single-hdd-sc.yaml
- configs/single-ssd-sc.yaml
- configs/lvmpv-hdd-sc.yaml
- configs/mayastor-snapshotclass.yaml
- configs/openbao-cert.yaml
- configs/ovh-cert-manager-secret.yaml

15
shell.nix Normal file
View File

@@ -0,0 +1,15 @@
# Needed for Nix Environment Selector
# https://github.com/edolstra/flake-compat/
(import
(
let
lock = builtins.fromJSON (builtins.readFile ./flake.lock);
nodeName = lock.nodes.root.inputs.flake-compat;
in
fetchTarball {
url = lock.nodes.${nodeName}.locked.url;
sha256 = lock.nodes.${nodeName}.locked.narHash;
}
)
{ src = ./.; }
).shellNix

View File

@@ -1,146 +0,0 @@
#!/usr/bin/env bash
# A utility script to run a kubectl pod with one or more PVCs mounted.
# Original: https://gist.github.com/yuanying/3aa7d59dcce65470804ab43def646ab6
# Modified to add help message, -n and -x options, and other improvements.
IMAGE="gcr.io/google-containers/ubuntu-slim:0.14"
COMMAND="/bin/bash"
NAMESPACE=""
CONTEXT=""
SUFFIX=$(date +%s | shasum | base64 | fold -w 10 | head -1 | tr '[:upper:]' '[:lower:]')
usage_exit() {
cat <<EOF
kubectl-run-with-pvc - Run a temporary pod with PersistentVolumeClaims mounted
USAGE:
$0 [-c command] [-i image] [-n namespace] [-x context] [-h] PVC [PVC ...]
DESCRIPTION:
Creates an ephemeral Kubernetes pod that mounts one or more PersistentVolumeClaims (PVCs).
Each PVC is mounted at /pvcs/<claimName>. The pod is automatically removed when you exit.
Useful for inspecting, debugging, or manipulating data in PVCs without having to deploy
a persistent pod or job.
OPTIONS:
-i IMAGE
Container image to use in the pod.
Default: gcr.io/google-containers/ubuntu-slim:0.14
-c COMMAND
Command to execute in the container.
Default: /bin/bash
-n NAMESPACE
Kubernetes namespace where the pod will be created.
Default: current namespace (from kubectl config)
-x CONTEXT
kubectl context to use for this operation.
Default: current context (from kubectl config)
-h
Display this help message and exit.
EXAMPLES:
# Mount a single PVC and get an interactive shell
$0 my-pvc
# Mount multiple PVCs
$0 data-pvc logs-pvc config-pvc
# Use a specific namespace
$0 -n my-namespace my-pvc
# Use a different context and namespace
$0 -x prod-cluster -n production my-pvc
# Use Alpine Linux instead of Ubuntu
$0 -i alpine:latest -c sh my-pvc
# Run a command non-interactively
$0 -c "ls -lh /pvcs/my-pvc" my-pvc
MOUNT PATHS:
Each PVC is mounted to: /pvcs/<claimName>
Example: If you mount 'database-pvc', it will be at /pvcs/database-pvc
NOTES:
- Pod name is auto-generated: pvc-mounter-<random-suffix>
- Pod is removed when you exit (--rm flag)
- Uses hostNetwork: true for networking access
- Requires kubectl configured and permissions to create pods
PREREQUISITES:
- kubectl installed and configured
- PVCs must already exist in the target namespace
- User must have permission to create pods in the target namespace
EOF
exit 1
}
while getopts i:c:n:x:h OPT
do
case $OPT in
i) IMAGE=$OPTARG
;;
c) COMMAND=$OPTARG
;;
n) NAMESPACE=$OPTARG
;;
x) CONTEXT=$OPTARG
;;
h) usage_exit
;;
\?) usage_exit
;;
esac
done
shift $(($OPTIND - 1))
# Require at least one PVC
if [ $# -eq 0 ]; then
echo "Error: At least one PVC name is required" 1>&2
usage_exit
fi
VOL_MOUNTS=""
VOLS=""
COMMA=""
for i in $@
do
VOL_MOUNTS="${VOL_MOUNTS}${COMMA}{\"name\": \"${i}\",\"mountPath\": \"/pvcs/${i}\"}"
VOLS="${VOLS}${COMMA}{\"name\": \"${i}\",\"persistentVolumeClaim\": {\"claimName\": \"${i}\"}}"
COMMA=","
done
KUBECTL_CMD="kubectl"
[ -n "$CONTEXT" ] && KUBECTL_CMD="$KUBECTL_CMD --context=$CONTEXT"
[ -n "$NAMESPACE" ] && KUBECTL_CMD="$KUBECTL_CMD --namespace=$NAMESPACE"
$KUBECTL_CMD run -it --rm --restart=Never --image=${IMAGE} pvc-mounter-${SUFFIX} --overrides "
{
\"spec\": {
\"containers\":[
{
\"args\": [\"${COMMAND}\"],
\"stdin\": true,
\"tty\": true,
\"name\": \"pvc\",
\"image\": \"${IMAGE}\",
\"volumeMounts\": [
${VOL_MOUNTS}
]
}
],
\"volumes\": [
${VOLS}
]
}
}
" -- ${COMMAND}