102 Commits

Author SHA1 Message Date
57eb77917a chore(deps): update renovate/renovate docker tag to v43.104.3 2026-04-05 00:00:47 +00:00
a0814e76ee increase pvc for llama to 300 Gi
All checks were successful
ci/woodpecker/push/flux-reconcile-source Pipeline was successful
2026-04-04 22:49:26 +02:00
da163398a5 add notes about woodpecker to readme
All checks were successful
ci/woodpecker/push/flux-reconcile-source Pipeline was successful
2026-04-04 03:29:15 +02:00
8160a52176 add gemma 4 models
All checks were successful
ci/woodpecker/push/flux-reconcile-source Pipeline was successful
2026-04-04 02:48:02 +02:00
ad3b2229c2 get rid of openrouter proxying via llama-swap
All checks were successful
ci/woodpecker/push/flux-reconcile-source Pipeline was successful
2026-04-04 02:39:26 +02:00
57c2c7ea8d add woodpecker pipeline to reconcile flux
All checks were successful
ci/woodpecker/push/flux-reconcile-source Pipeline was successful
2026-04-04 02:31:08 +02:00
f2d60e0b15 add kubernetes secret engine and approle auth to openbao 2026-04-04 02:06:18 +02:00
9d5dd332fc Merge pull request 'chore(deps): update ghcr.io/mostlygeek/llama-swap docker tag to v199-vulkan-b8637' (#196) from renovate/ghcr.io-mostlygeek-llama-swap-199.x into fresh-start 2026-04-04 00:00:57 +00:00
e923fc3c30 chore(deps): update ghcr.io/mostlygeek/llama-swap docker tag to v199-vulkan-b8637 2026-04-04 00:00:54 +00:00
1945f2a9bc remove test woodpeeker pipeline 2026-04-03 23:20:49 +02:00
fdd6755c2f rip out all garm related stuff 2026-04-03 23:20:36 +02:00
3d85148c5a add woodpecker cli 2026-04-03 23:14:46 +02:00
ab5a551124 update devenv 2026-04-03 23:12:10 +02:00
1bb357b3c8 enable web search in opencode 2026-04-03 22:56:58 +02:00
6a0b544bad Merge pull request 'chore(deps): update ghcr.io/mostlygeek/llama-swap docker tag to v199-vulkan-b8606' (#193) from renovate/ghcr.io-mostlygeek-llama-swap-199.x into fresh-start
All checks were successful
ci/woodpecker/push/my-first-workflow Pipeline was successful
2026-04-03 00:00:36 +00:00
4e30c9b94d chore(deps): update ghcr.io/mostlygeek/llama-swap docker tag to v199-vulkan-b8606 2026-04-03 00:00:32 +00:00
dfafadb4e3 add woodpecker to giitea's allowed host list 2026-04-02 23:01:14 +02:00
ae42e342ca add test workflow
All checks were successful
ci/woodpecker/push/my-first-workflow Pipeline was successful
2026-04-02 22:57:48 +02:00
670312d75b add woodpecker ci 2026-04-02 22:35:28 +02:00
0ce1a797fc Merge pull request 'chore(deps): update ghcr.io/mostlygeek/llama-swap docker tag to v199-vulkan-b8589' (#191) from renovate/ghcr.io-mostlygeek-llama-swap-199.x into fresh-start 2026-04-02 00:00:33 +00:00
3d53b4b10b chore(deps): update ghcr.io/mostlygeek/llama-swap docker tag to v199-vulkan-b8589 2026-04-02 00:00:30 +00:00
98f63b1576 Merge pull request 'chore(deps): update helm release immich to v1.2.2' (#190) from renovate/immich-1.x into fresh-start 2026-04-01 00:00:35 +00:00
edba33b552 chore(deps): update helm release immich to v1.2.2 2026-04-01 00:00:32 +00:00
054df42d8b update qwen3.5 4b ctx size to 128k 2026-03-30 21:05:00 +02:00
08db022d0d Merge pull request 'chore(deps): update ghcr.io/mostlygeek/llama-swap docker tag to v199-vulkan-b8576' (#189) from renovate/ghcr.io-mostlygeek-llama-swap-199.x into fresh-start 2026-03-30 00:00:52 +00:00
e485a4fc7f chore(deps): update ghcr.io/mostlygeek/llama-swap docker tag to v199-vulkan-b8576 2026-03-30 00:00:49 +00:00
9e74ed6a19 increase --fit-target to 1.5GB 2026-03-29 23:50:45 +02:00
42e89c9bb7 Merge pull request 'chore(deps): update ghcr.io/mostlygeek/llama-swap docker tag to v199-vulkan-b8562' (#188) from renovate/ghcr.io-mostlygeek-llama-swap-199.x into fresh-start 2026-03-29 00:00:53 +00:00
99bc04b76a chore(deps): update ghcr.io/mostlygeek/llama-swap docker tag to v199-vulkan-b8562 2026-03-29 00:00:50 +00:00
7ee77e33d4 Merge pull request 'chore(deps): update helm release cert-manager to v1.20.1' (#186) from renovate/cert-manager-1.x into fresh-start 2026-03-28 00:05:47 +00:00
8bdd5f2196 chore(deps): update helm release cert-manager to v1.20.1 2026-03-28 00:05:44 +00:00
1d8cb85bd4 Merge pull request 'chore(deps): update renovate/renovate docker tag to v43.95.0' (#163) from renovate/renovate-renovate-43.x into fresh-start
Reviewed-on: #163
2026-03-27 17:43:07 +00:00
eeb302b63b Merge pull request 'chore(deps): update helm release immich to v1.2.1' (#175) from renovate/immich-1.x into fresh-start
Reviewed-on: #175
2026-03-27 17:42:59 +00:00
69b437ed3b Merge pull request 'chore(deps): update helm release k8up to v4.9.0' (#182) from renovate/k8up-4.x into fresh-start
Reviewed-on: #182
2026-03-27 17:42:52 +00:00
54674a6e79 Merge pull request 'chore(deps): update helm release open-webui to v12.13.0' (#183) from renovate/open-webui-12.x into fresh-start
Reviewed-on: #183
2026-03-27 17:42:46 +00:00
a9da405326 chore(deps): update renovate/renovate docker tag to v43.95.0 2026-03-27 17:42:10 +00:00
264871bf68 Merge pull request 'chore(deps): update ghcr.io/mostlygeek/llama-swap docker tag to v199-vulkan-b8547' (#185) from renovate/ghcr.io-mostlygeek-llama-swap-199.x into fresh-start 2026-03-27 17:42:09 +00:00
6bcd0ba464 chore(deps): update helm release open-webui to v12.13.0 2026-03-27 17:42:07 +00:00
cb53301926 chore(deps): update ghcr.io/mostlygeek/llama-swap docker tag to v199-vulkan-b8547 2026-03-27 17:42:04 +00:00
110817b748 Merge pull request 'chore(deps): update ghcr.io/mostlygeek/llama-swap docker tag to v199' (#184) from renovate/ghcr.io-mostlygeek-llama-swap-199.x into fresh-start
Reviewed-on: #184
2026-03-27 17:40:38 +00:00
66cb3c9d82 chore(deps): update ghcr.io/mostlygeek/llama-swap docker tag to v199 2026-03-27 00:00:28 +00:00
42ae7af649 chore(deps): update helm release k8up to v4.9.0 2026-03-26 00:00:57 +00:00
cffcb1cc2d Merge pull request 'chore(deps): update helm release openbao to v0.26.2' (#181) from renovate/openbao-0.x into fresh-start 2026-03-26 00:00:57 +00:00
a4a7dd6fe6 Merge pull request 'chore(deps): update ghcr.io/mostlygeek/llama-swap docker tag to v198-vulkan-b8508' (#180) from renovate/ghcr.io-mostlygeek-llama-swap-198.x into fresh-start 2026-03-26 00:00:54 +00:00
52b8ca79dc chore(deps): update helm release openbao to v0.26.2 2026-03-26 00:00:54 +00:00
9a1fe1f740 chore(deps): update ghcr.io/mostlygeek/llama-swap docker tag to v198-vulkan-b8508 2026-03-26 00:00:49 +00:00
e996a60378 Merge pull request 'chore(deps): update helm release cert-manager-webhook-ovh to v0.9.5' (#179) from renovate/cert-manager-webhook-ovh-0.x into fresh-start 2026-03-25 00:00:35 +00:00
0ccd4d93f1 chore(deps): update helm release immich to v1.2.1 2026-03-25 00:00:34 +00:00
d667c6c0fc Merge pull request 'chore(deps): update ghcr.io/mostlygeek/llama-swap docker tag to v198-vulkan-b8496' (#178) from renovate/ghcr.io-mostlygeek-llama-swap-198.x into fresh-start 2026-03-25 00:00:33 +00:00
4254ebc9ef chore(deps): update helm release cert-manager-webhook-ovh to v0.9.5 2026-03-25 00:00:32 +00:00
8cf02fea0e chore(deps): update ghcr.io/mostlygeek/llama-swap docker tag to v198-vulkan-b8496 2026-03-25 00:00:29 +00:00
aa3c74d6a7 Merge pull request 'chore(deps): update helm release cilium to v1.19.2' (#177) from renovate/cilium-1.x into fresh-start 2026-03-24 00:00:44 +00:00
289089428e Merge pull request 'chore(deps): update ghcr.io/mostlygeek/llama-swap docker tag to v198-vulkan-b8477' (#176) from renovate/ghcr.io-mostlygeek-llama-swap-198.x into fresh-start 2026-03-24 00:00:41 +00:00
a93f6ec36f chore(deps): update helm release cilium to v1.19.2 2026-03-24 00:00:41 +00:00
1d85bf3a88 chore(deps): update ghcr.io/mostlygeek/llama-swap docker tag to v198-vulkan-b8477 2026-03-24 00:00:39 +00:00
f495debf25 Merge pull request 'chore(deps): update ghcr.io/mostlygeek/llama-swap docker tag to v198-vulkan-b8468' (#174) from renovate/ghcr.io-mostlygeek-llama-swap-198.x into fresh-start 2026-03-23 00:00:24 +00:00
bfede17c87 chore(deps): update ghcr.io/mostlygeek/llama-swap docker tag to v198-vulkan-b8468 2026-03-23 00:00:21 +00:00
08ca3f4c4e Merge pull request 'chore(deps): update ghcr.io/mostlygeek/llama-swap docker tag to v198-vulkan-b8461' (#173) from renovate/ghcr.io-mostlygeek-llama-swap-198.x into fresh-start 2026-03-22 00:00:27 +00:00
471c0ba62d chore(deps): update ghcr.io/mostlygeek/llama-swap docker tag to v198-vulkan-b8461 2026-03-22 00:00:23 +00:00
261141f509 Merge pull request 'chore(deps): update helm release k8up to v4.8.7' (#172) from renovate/k8up-4.x into fresh-start 2026-03-20 22:31:45 +00:00
86d5751842 Merge pull request 'chore(deps): update helm release immich to v1.1.3' (#171) from renovate/immich-1.x into fresh-start 2026-03-20 22:31:42 +00:00
43e531a3ca chore(deps): update helm release k8up to v4.8.7 2026-03-20 22:31:41 +00:00
9a0764268b Merge pull request 'chore(deps): update ghcr.io/mostlygeek/llama-swap docker tag to v198-vulkan-b8445' (#170) from renovate/ghcr.io-mostlygeek-llama-swap-198.x into fresh-start 2026-03-20 22:31:39 +00:00
7c88498756 chore(deps): update helm release immich to v1.1.3 2026-03-20 22:31:38 +00:00
8717526358 chore(deps): update ghcr.io/mostlygeek/llama-swap docker tag to v198-vulkan-b8445 2026-03-20 22:31:36 +00:00
b6a7e5092c Merge pull request 'chore(deps): update helm release ingress-nginx to v4.15.1' (#169) from renovate/ingress-nginx-4.x into fresh-start 2026-03-20 00:00:56 +00:00
27f7a5f29a Merge pull request 'chore(deps): update helm release immich to v1.1.2' (#168) from renovate/immich-1.x into fresh-start 2026-03-20 00:00:52 +00:00
9d0fd0981a chore(deps): update helm release ingress-nginx to v4.15.1 2026-03-20 00:00:52 +00:00
51bc53dbbc chore(deps): update helm release immich to v1.1.2 2026-03-20 00:00:50 +00:00
ce0b13ebb3 change kv cache quant to q8_0 2026-03-20 00:57:39 +01:00
516e157d39 Merge pull request 'chore(deps): update ghcr.io/mostlygeek/llama-swap docker tag to v198-vulkan-b8400' (#167) from renovate/ghcr.io-mostlygeek-llama-swap-198.x into fresh-start 2026-03-19 00:00:38 +00:00
73d6d1f15a chore(deps): update ghcr.io/mostlygeek/llama-swap docker tag to v198-vulkan-b8400 2026-03-19 00:00:34 +00:00
c51fc2a5ef Merge pull request 'chore(deps): update ghcr.io/mostlygeek/llama-swap docker tag to v198-vulkan-b8390' (#166) from renovate/ghcr.io-mostlygeek-llama-swap-198.x into fresh-start 2026-03-18 00:00:31 +00:00
8d994e7aa1 chore(deps): update ghcr.io/mostlygeek/llama-swap docker tag to v198-vulkan-b8390 2026-03-18 00:00:28 +00:00
5b551c6c6e switch pullPolicy to Always on crawl4ai-proxy 2026-03-17 01:47:29 +01:00
7e7b3e3d71 add max ctx on llama.cpp 2026-03-17 01:33:35 +01:00
9f315b38e3 use modded crawl4ai proxy image 2026-03-17 01:24:09 +01:00
3e1a806db1 Merge pull request 'chore(deps): update helm release openbao to v0.26.1' (#165) from renovate/openbao-0.x into fresh-start 2026-03-17 00:01:02 +00:00
f7dba45165 Merge pull request 'chore(deps): update ghcr.io/mostlygeek/llama-swap docker tag to v198-vulkan-b8369' (#164) from renovate/ghcr.io-mostlygeek-llama-swap-198.x into fresh-start 2026-03-17 00:01:00 +00:00
c8fac3201a chore(deps): update helm release openbao to v0.26.1 2026-03-17 00:01:00 +00:00
82864a4738 chore(deps): update ghcr.io/mostlygeek/llama-swap docker tag to v198-vulkan-b8369 2026-03-17 00:00:58 +00:00
b54c05b956 add crawl4ai-proxy for openwebui 2026-03-16 20:25:30 +01:00
afdada25a0 add crawl4ai deployment 2026-03-16 19:42:01 +01:00
79315d32db add GLM-4.7-Flash model 2026-03-16 18:19:28 +01:00
a2a5cd72a9 configure open webui to use sso from authentik 2026-03-16 17:30:16 +01:00
c2706a8af2 Merge pull request 'chore(deps): update renovate/renovate docker tag to v43.76.1' (#157) from renovate/renovate-renovate-43.x into fresh-start
Reviewed-on: #157
2026-03-15 17:40:55 +00:00
610ca0017e Merge pull request 'chore(deps): update ghcr.io/mostlygeek/llama-swap docker tag to v198-vulkan-b8352' (#162) from renovate/ghcr.io-mostlygeek-llama-swap-198.x into fresh-start 2026-03-15 17:40:29 +00:00
466932347a chore(deps): update renovate/renovate docker tag to v43.76.1 2026-03-15 17:40:29 +00:00
afbcea4e82 chore(deps): update ghcr.io/mostlygeek/llama-swap docker tag to v198-vulkan-b8352 2026-03-15 17:40:26 +00:00
20ad26ed31 Merge pull request 'chore(deps): update alpine docker tag to v3.23' (#158) from renovate/alpine-3.x into fresh-start
Reviewed-on: #158
2026-03-15 17:38:29 +00:00
7a2d1e0437 Merge pull request 'chore(deps): update helm release openbao to v0.26.0' (#159) from renovate/openbao-0.x into fresh-start
Reviewed-on: #159
2026-03-15 17:38:19 +00:00
6b5929fb95 Merge pull request 'chore(deps): update golang docker tag to v1.26' (#160) from renovate/golang-1.x into fresh-start
Reviewed-on: #160
2026-03-15 17:37:51 +00:00
6b64f1a8b8 Merge pull request 'chore(deps): update ghcr.io/mostlygeek/llama-swap docker tag to v198' (#161) from renovate/ghcr.io-mostlygeek-llama-swap-198.x into fresh-start
Reviewed-on: #161
2026-03-15 17:37:40 +00:00
4b4cec10be chore(deps): update ghcr.io/mostlygeek/llama-swap docker tag to v198 2026-03-15 00:00:34 +00:00
1f319d607a chore(deps): update golang docker tag to v1.26 2026-03-15 00:00:32 +00:00
7d90001f18 chore(deps): update alpine docker tag to v3.23 2026-03-15 00:00:30 +00:00
7948f53d1d add authentik vault policies 2026-03-14 20:12:01 +01:00
829a5a3fd8 add authentik deployment 2026-03-14 20:08:48 +01:00
cf28dcb5eb add missing allowed renovate command 2026-03-14 19:58:35 +01:00
4f1764d192 fix shell completion in garm-cli 2026-03-14 19:27:45 +01:00
49f88e4f96 remove non-functional garm image update workflow 2026-03-14 19:27:35 +01:00
493f939551 chore(deps): update helm release openbao to v0.26.0 2026-03-14 00:00:29 +00:00
80 changed files with 945 additions and 996 deletions

View File

@@ -1,66 +0,0 @@
name: Build garm image
on:
schedule:
- cron: "13 3 * * *"
push:
branches:
- main
paths:
- .gitea/workflows/garm-image.yml
- apps/garm/image-source.env
- docker/garm/**
workflow_dispatch:
jobs:
build-and-push:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Load pin data
shell: bash
run: |
set -euo pipefail
source apps/garm/image-source.env
echo "GARM_COMMIT=${GARM_COMMIT}" >> "$GITHUB_ENV"
echo "GARM_COMMIT_NUMBER=${GARM_COMMIT_NUMBER}" >> "$GITHUB_ENV"
echo "GARM_IMAGE=${GARM_IMAGE}" >> "$GITHUB_ENV"
- name: Verify commit number
shell: bash
run: |
set -euo pipefail
tmpdir="$(mktemp -d)"
trap 'rm -rf "$tmpdir"' EXIT
git clone --filter=blob:none https://github.com/cloudbase/garm.git "$tmpdir"
expected="$(git -C "$tmpdir" rev-list --count "$GARM_COMMIT")"
if [ "$expected" != "$GARM_COMMIT_NUMBER" ]; then
echo "Pin mismatch: expected r${expected}, got r${GARM_COMMIT_NUMBER}" >&2
exit 1
fi
- name: Set up Buildx
uses: docker/setup-buildx-action@v3
- name: Login to gitea registry
uses: docker/login-action@v3
with:
registry: gitea.lumpiasty.xyz
username: ${{ secrets.REGISTRY_USERNAME }}
password: ${{ secrets.REGISTRY_PASSWORD }}
- name: Build and push
uses: docker/build-push-action@v6
with:
context: .
file: docker/garm/Dockerfile
push: true
build-args: |
GARM_COMMIT=${{ env.GARM_COMMIT }}
tags: |
${{ env.GARM_IMAGE }}
labels: |
org.opencontainers.image.source=https://github.com/cloudbase/garm
org.opencontainers.image.revision=${{ env.GARM_COMMIT }}

View File

@@ -0,0 +1,49 @@
when:
- event: push
branch: fresh-start
skip_clone: true
steps:
- name: Get kubernetes access from OpenBao
image: quay.io/openbao/openbao:2.5.2
environment:
VAULT_ADDR: https://openbao.lumpiasty.xyz:8200
ROLE_ID:
from_secret: flux_reconcile_role_id
SECRET_ID:
from_secret: flux_reconcile_secret_id
commands:
- bao write -field token auth/approle/login
role_id=$ROLE_ID
secret_id=$SECRET_ID > /woodpecker/.vault_id
- export VAULT_TOKEN=$(cat /woodpecker/.vault_id)
- bao write -format json -f /kubernetes/creds/flux-reconcile > /woodpecker/kube_credentials
- name: Construct Kubeconfig
image: alpine/k8s:1.32.13
environment:
KUBECONFIG: /woodpecker/kubeconfig
commands:
- kubectl config set-cluster cluster
--server=https://$KUBERNETES_SERVICE_HOST
--certificate-authority=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt
- kubectl config set-credentials cluster
--token=$(jq -r .data.service_account_token /woodpecker/kube_credentials)
- kubectl config set-context cluster
--cluster cluster
--user cluster
--namespace flux-system
- kubectl config use-context cluster
- name: Reconcile git source
image: ghcr.io/fluxcd/flux-cli:v2.8.3
environment:
KUBECONFIG: /woodpecker/kubeconfig
commands:
- flux reconcile source git flux-system
- name: Invalidate OpenBao token
image: quay.io/openbao/openbao:2.5.2
environment:
VAULT_ADDR: https://openbao.lumpiasty.xyz:8200
commands:
- export VAULT_TOKEN=$(cat /woodpecker/.vault_id)
- bao write -f auth/token/revoke-self

View File

@@ -1,6 +1,6 @@
SHELL := /usr/bin/env bash
.PHONY: install-router gen-talos-config apply-talos-config get-kubeconfig garm-image-build garm-image-push garm-image-build-push
.PHONY: install-router gen-talos-config apply-talos-config get-kubeconfig
install-router:
ansible-playbook ansible/playbook.yml -i ansible/hosts
@@ -27,19 +27,3 @@ apply-talos-config:
get-kubeconfig:
talosctl -n anapistula-delrosalae kubeconfig talos/generated/kubeconfig
garm-image-build:
set -euo pipefail; \
source apps/garm/image-source.env; \
docker build \
-f docker/garm/Dockerfile \
--build-arg GARM_COMMIT=$$GARM_COMMIT \
-t $$GARM_IMAGE \
.
garm-image-push:
set -euo pipefail; \
source apps/garm/image-source.env; \
docker push $$GARM_IMAGE
garm-image-build-push: garm-image-build garm-image-push

View File

@@ -141,7 +141,7 @@ Currently the k8s cluster consists of single node (hostname anapistula-delrosala
## Software stack
The cluster itself is based on [Talos Linux](https://www.talos.dev/) (which is also a Kubernetes distribution) and uses [Cilium](https://cilium.io/) as CNI, IPAM, kube-proxy replacement, Load Balancer, and BGP control plane. Persistent volumes are managed by [OpenEBS LVM LocalPV](https://openebs.io/docs/user-guides/local-storage-user-guide/local-pv-lvm/lvm-overview). Applications are deployed using GitOps (this repo) and reconciled on cluster using [Flux](https://fluxcd.io/). Git repository is hosted on [Gitea](https://gitea.io/) running on a cluster itself. Secets are kept in [OpenBao](https://openbao.org/) (HashiCorp Vault fork) running on a cluster and synced to cluster objects using [Vault Secrets Operator](https://github.com/hashicorp/vault-secrets-operator). Deployments are kept up to date using self hosted [Renovate](https://www.mend.io/renovate/) bot updating manifests in the Git repository. Incoming HTTP traffic is routed to cluster using [Nginx Ingress Controller](https://kubernetes.github.io/ingress-nginx/) and certificates are issued by [cert-manager](https://cert-manager.io/) with [Let's Encrypt](https://letsencrypt.org/) ACME issuer with [cert-manager-webhook-ovh](https://github.com/aureq/cert-manager-webhook-ovh) resolving DNS-01 challanges. Cluster also runs [CloudNativePG](https://cloudnative-pg.io/) operator for managing PostgreSQL databases. Router is running [Mikrotik RouterOS](https://help.mikrotik.com/docs/spaces/ROS/pages/328059/RouterOS) and its configuration is managed via [Ansible](https://docs.ansible.com/) playbook in this repo. High level core cluster software architecture is shown on the diagram below.
The cluster itself is based on [Talos Linux](https://www.talos.dev/) (which is also a Kubernetes distribution) and uses [Cilium](https://cilium.io/) as CNI, IPAM, kube-proxy replacement, Load Balancer, and BGP control plane. Persistent volumes are managed by [OpenEBS LVM LocalPV](https://openebs.io/docs/user-guides/local-storage-user-guide/local-pv-lvm/lvm-overview). Applications are deployed using GitOps (this repo) and reconciled on cluster using [Flux](https://fluxcd.io/). Git repository is hosted on [Gitea](https://gitea.io/) running on a cluster itself. Secets are kept in [OpenBao](https://openbao.org/) (HashiCorp Vault fork) running on a cluster and synced to cluster objects using [Vault Secrets Operator](https://github.com/hashicorp/vault-secrets-operator). Deployments are kept up to date using self hosted [Renovate](https://www.mend.io/renovate/) bot updating manifests in the Git repository. There is a [Woodpecker](https://woodpecker-ci.org/) instance watching repositories on Gitea and scheduling jobs on cluster. Incoming HTTP traffic is routed to cluster using [Nginx Ingress Controller](https://kubernetes.github.io/ingress-nginx/) and certificates are issued by [cert-manager](https://cert-manager.io/) with [Let's Encrypt](https://letsencrypt.org/) ACME issuer with [cert-manager-webhook-ovh](https://github.com/aureq/cert-manager-webhook-ovh) resolving DNS-01 challanges. Cluster also runs [CloudNativePG](https://cloudnative-pg.io/) operator for managing PostgreSQL databases. Router is running [Mikrotik RouterOS](https://help.mikrotik.com/docs/spaces/ROS/pages/328059/RouterOS) and its configuration is managed via [Ansible](https://docs.ansible.com/) playbook in this repo. High level core cluster software architecture is shown on the diagram below.
> Talos Linux is an immutable Linux distribution purpose-built for running Kubernetes. The OS is distributed as an OCI (Docker) image and does not contain any package manager, shell, SSH, or any other tools for managing the system. Instead, all operations are performed using API, which can be accessed using `talosctl` CLI tool.
@@ -177,14 +177,23 @@ flowchart TD
vault_operator -- "Retrieves secrets" --> vault[OpenBao] -- "Secret storage" --> lv
vault -- "Auth method" --> kubeapi
gitea -- "Receives events" --> woodpecker[Woodpecker CI] -- "Schedules jobs" --> kubeapi
gitea -- "Stores repositories" --> lv
gitea --> renovate[Renovate Bot] -- "Updates manifests" --> gitea
gitea--> renovate[Renovate Bot] -- "Updates manifests" --> gitea
end
```
### Reconcilation paths of each component
- Kubernetes manifests are reconciled using Flux triggerred by Woodpecker CI on push
- RouterOS configs are applied by Ansible <!-- ran by Gitea Action on push -->
- Talos configs are applied using makefile <!-- switch to ansible and trigger on action push -->
- Vault policies are applied by running `synchronize-vault.py` <!-- triggerred by Gitea action on push -->
<!-- - Docker images are built and pushed to registry by Gitea Actions on push -->
<!-- TODO: Backups, monitoring, logging, deployment with ansible etc -->
## Software
@@ -228,6 +237,7 @@ flowchart TD
|------|------|-------------|
| <img src="docs/assets/devenv.svg" alt="devenv" height="50" width="50"> | devenv | Tool for declarative managment of development environment using Nix |
| <img src="docs/assets/renovate.svg" alt="Renovate" height="50" width="50"> | Renovate | Bot for keeping dependencies up to date |
| <img src="docs/assets/woodpecker.svg" alt="Woodpecker" height="50" width="50"> | Woodpecker CI | Continous Integration system |
### AI infrastructure

View File

@@ -0,0 +1,8 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- postgres-volume.yaml
- postgres-cluster.yaml
- secret.yaml
- release.yaml

View File

@@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: authentik

View File

@@ -0,0 +1,23 @@
---
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: authentik-postgresql-cluster-lvmhdd
namespace: authentik
spec:
instances: 1
imageName: ghcr.io/cloudnative-pg/postgresql:17.4
bootstrap:
initdb:
database: authentik
owner: authentik
storage:
pvcTemplate:
storageClassName: hdd-lvmpv
resources:
requests:
storage: 10Gi
volumeName: authentik-postgresql-cluster-lvmhdd-1

View File

@@ -1,13 +1,12 @@
---
apiVersion: local.openebs.io/v1alpha1
kind: LVMVolume
metadata:
labels:
kubernetes.io/nodename: anapistula-delrosalae
name: garm-lvmhdd
name: authentik-postgresql-cluster-lvmhdd-1
namespace: openebs
spec:
capacity: 5Gi
capacity: 10Gi
ownerNodeID: anapistula-delrosalae
shared: "yes"
thinProvision: "no"
@@ -17,10 +16,10 @@ spec:
kind: PersistentVolume
apiVersion: v1
metadata:
name: garm-lvmhdd
name: authentik-postgresql-cluster-lvmhdd-1
spec:
capacity:
storage: 5Gi
storage: 10Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
@@ -29,18 +28,6 @@ spec:
csi:
driver: local.csi.openebs.io
fsType: btrfs
volumeHandle: garm-lvmhdd
volumeHandle: authentik-postgresql-cluster-lvmhdd-1
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: garm-lvmhdd
namespace: garm
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 5Gi
storageClassName: hdd-lvmpv
volumeName: garm-lvmhdd
# PVCs are dynamically created by the Postgres operator

View File

@@ -0,0 +1,61 @@
---
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository
metadata:
name: authentik
namespace: authentik
spec:
interval: 24h
url: https://charts.goauthentik.io
---
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: authentik
namespace: authentik
spec:
interval: 30m
chart:
spec:
chart: authentik
version: 2026.2.1
sourceRef:
kind: HelmRepository
name: authentik
namespace: authentik
interval: 12h
values:
authentik:
postgresql:
host: authentik-postgresql-cluster-lvmhdd-rw
name: authentik
user: authentik
global:
env:
- name: AUTHENTIK_SECRET_KEY
valueFrom:
secretKeyRef:
name: authentik-secret
key: secret_key
- name: AUTHENTIK_POSTGRESQL__PASSWORD
valueFrom:
secretKeyRef:
name: authentik-postgresql-cluster-lvmhdd-app
key: password
postgresql:
enabled: false
server:
ingress:
enabled: true
ingressClassName: nginx-ingress
annotations:
cert-manager.io/cluster-issuer: letsencrypt
hosts:
- authentik.lumpiasty.xyz
tls:
- secretName: authentik-ingress
hosts:
- authentik.lumpiasty.xyz

View File

@@ -0,0 +1,38 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: authentik-secret
namespace: authentik
---
apiVersion: secrets.hashicorp.com/v1beta1
kind: VaultAuth
metadata:
name: authentik
namespace: authentik
spec:
method: kubernetes
mount: kubernetes
kubernetes:
role: authentik
serviceAccount: authentik-secret
---
apiVersion: secrets.hashicorp.com/v1beta1
kind: VaultStaticSecret
metadata:
name: authentik-secret
namespace: authentik
spec:
type: kv-v2
mount: secret
path: authentik
destination:
create: true
name: authentik-secret
type: Opaque
transformation:
excludeRaw: true
vaultAuthRef: authentik

View File

@@ -0,0 +1,48 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: crawl4ai-proxy
namespace: crawl4ai
spec:
replicas: 1
selector:
matchLabels:
app: crawl4ai-proxy
template:
metadata:
labels:
app: crawl4ai-proxy
spec:
containers:
- name: crawl4ai-proxy
image: gitea.lumpiasty.xyz/lumpiasty/crawl4ai-proxy-fit:latest
imagePullPolicy: Always
env:
- name: LISTEN_PORT
value: "8000"
- name: CRAWL4AI_ENDPOINT
value: http://crawl4ai.crawl4ai.svc.cluster.local:11235/crawl
ports:
- name: http
containerPort: 8000
readinessProbe:
tcpSocket:
port: http
initialDelaySeconds: 3
periodSeconds: 10
timeoutSeconds: 2
failureThreshold: 6
livenessProbe:
tcpSocket:
port: http
initialDelaySeconds: 10
periodSeconds: 15
timeoutSeconds: 2
failureThreshold: 6
resources:
requests:
cpu: 25m
memory: 32Mi
limits:
cpu: 200m
memory: 128Mi

View File

@@ -0,0 +1,5 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- deployment.yaml
- service.yaml

View File

@@ -0,0 +1,14 @@
apiVersion: v1
kind: Service
metadata:
name: crawl4ai-proxy
namespace: crawl4ai
spec:
type: ClusterIP
selector:
app: crawl4ai-proxy
ports:
- name: http
port: 8000
targetPort: 8000
protocol: TCP

View File

@@ -0,0 +1,62 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: crawl4ai
namespace: crawl4ai
spec:
replicas: 1
selector:
matchLabels:
app: crawl4ai
template:
metadata:
labels:
app: crawl4ai
spec:
containers:
- name: crawl4ai
image: unclecode/crawl4ai:latest
imagePullPolicy: IfNotPresent
env:
- name: CRAWL4AI_API_TOKEN
valueFrom:
secretKeyRef:
name: crawl4ai-secret
key: api_token
optional: false
- name: MAX_CONCURRENT_TASKS
value: "5"
ports:
- name: http
containerPort: 11235
readinessProbe:
httpGet:
path: /health
port: http
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 3
failureThreshold: 6
livenessProbe:
httpGet:
path: /health
port: http
initialDelaySeconds: 30
periodSeconds: 15
timeoutSeconds: 3
failureThreshold: 6
resources:
requests:
cpu: 500m
memory: 1Gi
limits:
cpu: "2"
memory: 4Gi
volumeMounts:
- name: dshm
mountPath: /dev/shm
volumes:
- name: dshm
emptyDir:
medium: Memory
sizeLimit: 1Gi

View File

@@ -2,10 +2,6 @@ apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- pvc.yaml
- configmap.yaml
- service.yaml
- ingress.yaml
- rbac.yaml
- secret.yaml
- deployment.yaml
- service.yaml

View File

@@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: crawl4ai

View File

@@ -1,32 +1,38 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: crawl4ai-secret
namespace: crawl4ai
---
apiVersion: secrets.hashicorp.com/v1beta1
kind: VaultAuth
metadata:
name: garm
namespace: garm
name: crawl4ai
namespace: crawl4ai
spec:
method: kubernetes
mount: kubernetes
kubernetes:
role: garm
serviceAccount: garm
role: crawl4ai
serviceAccount: crawl4ai-secret
---
apiVersion: secrets.hashicorp.com/v1beta1
kind: VaultStaticSecret
metadata:
name: garm-config
namespace: garm
name: crawl4ai-secret
namespace: crawl4ai
spec:
type: kv-v2
mount: secret
path: garm
path: crawl4ai
destination:
create: true
name: garm-config
name: crawl4ai-secret
type: Opaque
transformation:
excludeRaw: true
vaultAuthRef: garm
vaultAuthRef: crawl4ai

View File

@@ -1,14 +1,14 @@
apiVersion: v1
kind: Service
metadata:
name: garm
namespace: garm
name: crawl4ai
namespace: crawl4ai
spec:
type: ClusterIP
selector:
app: garm
app: crawl4ai
ports:
- name: http
port: 9997
targetPort: 9997
port: 11235
targetPort: 11235
protocol: TCP

View File

@@ -1,49 +0,0 @@
# garm
This app deploys `garm` with external `garm-provider-k8s`.
- API/UI ingress: `https://garm.lumpiasty.xyz`
- Internal service DNS: `http://garm.garm.svc.cluster.local:9997`
## Vault secret requirements
`VaultStaticSecret` reads `secret/data/garm` and expects at least:
- `jwt_auth_secret`
- `database_passphrase` (must be 32 characters)
## Connect garm to Gitea
After Flux reconciles this app, initialize garm and add Gitea endpoint/credentials.
```bash
# 1) Initialize garm (from your local devenv shell)
garm-cli init \
--name homelab \
--url https://garm.lumpiasty.xyz \
--username admin \
--email admin@lumpiasty.xyz \
--password '<STRONG_ADMIN_PASSWORD>' \
--metadata-url http://garm.garm.svc.cluster.local:9997/api/v1/metadata \
--callback-url http://garm.garm.svc.cluster.local:9997/api/v1/callbacks \
--webhook-url http://garm.garm.svc.cluster.local:9997/webhooks
# 2) Add Gitea endpoint
garm-cli gitea endpoint create \
--name local-gitea \
--description 'Cluster Gitea' \
--base-url http://gitea-http.gitea.svc.cluster.local:80 \
--api-base-url http://gitea-http.gitea.svc.cluster.local:80/api/v1
# 3) Add Gitea PAT credentials
garm-cli gitea credentials add \
--name gitea-pat \
--description 'PAT for garm' \
--endpoint local-gitea \
--auth-type pat \
--pat-oauth-token '<GITEA_PAT_WITH_write:repository,write:organization>'
```
Then add repositories/orgs and create pools against provider `kubernetes_external`.
If Gitea refuses webhook installation to cluster-local URLs, set `gitea.config.webhook.ALLOWED_HOST_LIST` in `apps/gitea/release.yaml`.

View File

@@ -1,19 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: garm-provider-k8s-config
namespace: garm
data:
provider-config.yaml: |
kubeConfigPath: ""
runnerNamespace: "garm-runners"
podTemplate:
spec:
restartPolicy: Never
flavors:
default:
requests:
cpu: 100m
memory: 512Mi
limits:
memory: 2Gi

View File

@@ -1,106 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: garm
namespace: garm
spec:
replicas: 1
selector:
matchLabels:
app: garm
template:
metadata:
labels:
app: garm
spec:
serviceAccountName: garm
initContainers:
- name: render-garm-config
image: alpine:3.21
env:
- name: JWT_AUTH_SECRET
valueFrom:
secretKeyRef:
name: garm-config
key: jwt_auth_secret
- name: DATABASE_PASSPHRASE
valueFrom:
secretKeyRef:
name: garm-config
key: database_passphrase
command:
- /bin/sh
- -ec
- |
cat <<EOF > /etc/garm/config.toml
[default]
enable_webhook_management = true
[logging]
enable_log_streamer = true
log_format = "text"
log_level = "info"
log_source = false
[metrics]
enable = true
disable_auth = false
[jwt_auth]
secret = "${JWT_AUTH_SECRET}"
time_to_live = "8760h"
[apiserver]
bind = "0.0.0.0"
port = 9997
use_tls = false
[apiserver.webui]
enable = true
[database]
backend = "sqlite3"
passphrase = "${DATABASE_PASSPHRASE}"
[database.sqlite3]
db_file = "/data/garm.db"
busy_timeout_seconds = 5
[[provider]]
name = "kubernetes_external"
description = "Kubernetes provider"
provider_type = "external"
[provider.external]
config_file = "/etc/garm/provider-config.yaml"
provider_executable = "/opt/garm/providers.d/garm-provider-k8s"
environment_variables = ["KUBERNETES_"]
EOF
volumeMounts:
- name: config-dir
mountPath: /etc/garm
containers:
- name: garm
image: gitea.lumpiasty.xyz/lumpiasty/garm-k8s:r1380
imagePullPolicy: IfNotPresent
command:
- /bin/garm
- --config
- /etc/garm/config.toml
ports:
- name: http
containerPort: 9997
volumeMounts:
- name: data
mountPath: /data
- name: config-dir
mountPath: /etc/garm
- name: provider-config
mountPath: /etc/garm/provider-config.yaml
subPath: provider-config.yaml
volumes:
- name: data
persistentVolumeClaim:
claimName: garm-lvmhdd
- name: config-dir
emptyDir: {}
- name: provider-config
configMap:
name: garm-provider-k8s-config

View File

@@ -1,5 +0,0 @@
# renovate: datasource=github-refs depName=cloudbase/garm versioning=git
GARM_COMMIT=818a9dddccba5f2843f185e6a846770988f31fc5
GARM_COMMIT_NUMBER=1380
GARM_IMAGE_REPO=gitea.lumpiasty.xyz/lumpiasty/garm-k8s
GARM_IMAGE=gitea.lumpiasty.xyz/lumpiasty/garm-k8s:r1380

View File

@@ -1,24 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
namespace: garm
name: garm
annotations:
cert-manager.io/cluster-issuer: letsencrypt
spec:
ingressClassName: nginx-ingress
rules:
- host: garm.lumpiasty.xyz
http:
paths:
- backend:
service:
name: garm
port:
number: 9997
path: /
pathType: Prefix
tls:
- hosts:
- garm.lumpiasty.xyz
secretName: garm-ingress

View File

@@ -1,9 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: garm
---
apiVersion: v1
kind: Namespace
metadata:
name: garm-runners

View File

@@ -1,51 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: garm
namespace: garm
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: garm-provider-k8s
namespace: garm-runners
rules:
- apiGroups: [""]
resources: ["pods", "pods/log", "configmaps", "secrets", "events"]
verbs: ["create", "delete", "get", "list", "patch", "update", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: garm-provider-k8s
namespace: garm-runners
subjects:
- kind: ServiceAccount
name: garm
namespace: garm
roleRef:
kind: Role
name: garm-provider-k8s
apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: garm-namespace-manager
rules:
- apiGroups: [""]
resources: ["namespaces"]
verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: garm-namespace-manager
subjects:
- kind: ServiceAccount
name: garm
namespace: garm
roleRef:
kind: ClusterRole
name: garm-namespace-manager
apiGroup: rbac.authorization.k8s.io

View File

@@ -73,7 +73,7 @@ spec:
ISSUE_INDEXER_TYPE: bleve
REPO_INDEXER_ENABLED: true
webhook:
ALLOWED_HOST_LIST: garm.garm.svc.cluster.local
ALLOWED_HOST_LIST: woodpecker.lumpiasty.xyz
admin:
username: GiteaAdmin
email: gi@tea.com

View File

@@ -18,7 +18,7 @@ spec:
chart:
spec:
chart: immich
version: 1.1.1
version: 1.2.2
sourceRef:
kind: HelmRepository
name: secustor

View File

@@ -1,6 +1,9 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- crawl4ai
- crawl4ai-proxy
- authentik
- gitea
- renovate
- librechat
@@ -11,4 +14,4 @@ resources:
- searxng
- ispeak3
- openwebui
- garm
- woodpecker

View File

@@ -4,22 +4,19 @@ logToStdout: "both" # proxy and upstream
macros:
base_args: "--no-warmup --port ${PORT}"
common_args: "--fit-target 1536 --fit-ctx 65536 --no-warmup --port ${PORT}"
common_args: "--fit-target 1536 --no-warmup --port ${PORT}"
ctx_128k: "--ctx-size 131072"
ctx_256k: "--ctx-size 262144"
gemma_sampling: "--prio 2 --temp 1.0 --repeat-penalty 1.0 --min-p 0.00 --top-k 64 --top-p 0.95"
qwen35_sampling: "--temp 0.6 --top-p 0.95 --top-k 20 --min-p 0.00 -ctk q4_0 -ctv q4_0"
qwen35_35b_args: "--temp 1.0 --min-p 0.00 --top-p 0.95 --top-k 20 -ctk q4_0 -ctv q4_0"
qwen35_sampling: "--temp 0.6 --top-p 0.95 --top-k 20 --min-p 0.00 -ctk q8_0 -ctv q8_0"
qwen35_35b_args: "--temp 1.0 --min-p 0.00 --top-p 0.95 --top-k 20 -ctk q8_0 -ctv q8_0"
qwen35_35b_heretic_mmproj: "--mmproj-url https://huggingface.co/unsloth/Qwen3.5-35B-A3B-GGUF/resolve/main/mmproj-F16.gguf --mmproj /root/.cache/llama.cpp/unsloth_Qwen3.5-35B-A3B-GGUF_mmproj-F16.gguf"
qwen35_4b_heretic_mmproj: "--mmproj-url https://huggingface.co/unsloth/Qwen3.5-4B-GGUF/resolve/main/mmproj-F16.gguf --mmproj /root/.cache/llama.cpp/unsloth_Qwen3.5-4B-GGUF_mmproj-F16.gguf"
glm47_flash_args: "--temp 0.7 --top-p 1.0 --min-p 0.01 --repeat-penalty 1.0"
gemma4_sampling: "--temp 1.0 --top-p 0.95 --top-k 64"
thinking_on: "--chat-template-kwargs '{\"enable_thinking\": true}'"
thinking_off: "--chat-template-kwargs '{\"enable_thinking\": false}'"
peers:
openrouter:
proxy: https://openrouter.ai/api
apiKey: ${env.OPENROUTER_API_KEY}
models:
- z-ai/glm-5
hooks:
on_startup:
preload:
@@ -38,6 +35,7 @@ models:
cmd: |
/app/llama-server
-hf unsloth/gemma-3-12b-it-GGUF:Q4_K_M
${ctx_128k}
${gemma_sampling}
${common_args}
@@ -45,6 +43,7 @@ models:
cmd: |
/app/llama-server
-hf unsloth/gemma-3-12b-it-GGUF:Q4_K_M
${ctx_128k}
${gemma_sampling}
--no-mmproj
${common_args}
@@ -53,6 +52,7 @@ models:
cmd: |
/app/llama-server
-hf unsloth/gemma-3-4b-it-GGUF:Q4_K_M
${ctx_128k}
${gemma_sampling}
${common_args}
@@ -60,6 +60,7 @@ models:
cmd: |
/app/llama-server
-hf unsloth/gemma-3-4b-it-GGUF:Q4_K_M
${ctx_128k}
${gemma_sampling}
--no-mmproj
${common_args}
@@ -75,13 +76,14 @@ models:
--top-p 0.95
--top-k 40
--repeat-penalty 1.0
-ctk q4_0 -ctv q4_0
-ctk q8_0 -ctv q8_0
${common_args}
"Qwen3.5-35B-A3B-GGUF:Q4_K_M":
cmd: |
/app/llama-server
-hf unsloth/Qwen3.5-35B-A3B-GGUF:Q4_K_M
${ctx_256k}
${qwen35_35b_args}
${common_args}
@@ -89,6 +91,7 @@ models:
cmd: |
/app/llama-server
-hf unsloth/Qwen3.5-35B-A3B-GGUF:Q4_K_M
${ctx_256k}
${qwen35_35b_args}
${common_args}
${thinking_off}
@@ -100,6 +103,7 @@ models:
/app/llama-server
-hf mradermacher/Qwen3.5-35B-A3B-heretic-GGUF:Q4_K_M
${qwen35_35b_heretic_mmproj}
${ctx_256k}
${qwen35_35b_args}
${common_args}
@@ -108,6 +112,7 @@ models:
/app/llama-server
-hf mradermacher/Qwen3.5-35B-A3B-heretic-GGUF:Q4_K_M
${qwen35_35b_heretic_mmproj}
${ctx_256k}
${qwen35_35b_args}
${common_args}
${thinking_off}
@@ -116,6 +121,7 @@ models:
cmd: |
/app/llama-server
-hf unsloth/Qwen3.5-0.8B-GGUF:Q4_K_XL
${ctx_256k}
${qwen35_sampling}
${base_args}
${thinking_on}
@@ -133,6 +139,7 @@ models:
cmd: |
/app/llama-server
-hf unsloth/Qwen3.5-2B-GGUF:Q4_K_M
${ctx_256k}
${qwen35_sampling}
${common_args}
${thinking_on}
@@ -141,6 +148,7 @@ models:
cmd: |
/app/llama-server
-hf unsloth/Qwen3.5-2B-GGUF:Q4_K_M
${ctx_256k}
${qwen35_sampling}
${common_args}
${thinking_off}
@@ -149,6 +157,7 @@ models:
cmd: |
/app/llama-server
-hf unsloth/Qwen3.5-4B-GGUF:Q4_K_M
${ctx_128k}
${qwen35_sampling}
${common_args}
${thinking_on}
@@ -157,6 +166,7 @@ models:
cmd: |
/app/llama-server
-hf unsloth/Qwen3.5-4B-GGUF:Q4_K_M
${ctx_128k}
${qwen35_sampling}
${common_args}
${thinking_off}
@@ -166,6 +176,7 @@ models:
/app/llama-server
-hf mradermacher/Qwen3.5-4B-heretic-GGUF:Q4_K_M
${qwen35_4b_heretic_mmproj}
${ctx_128k}
${qwen35_sampling}
${common_args}
${thinking_on}
@@ -175,6 +186,7 @@ models:
/app/llama-server
-hf mradermacher/Qwen3.5-4B-heretic-GGUF:Q4_K_M
${qwen35_4b_heretic_mmproj}
${ctx_128k}
${qwen35_sampling}
${common_args}
${thinking_off}
@@ -183,6 +195,7 @@ models:
cmd: |
/app/llama-server
-hf unsloth/Qwen3.5-9B-GGUF:Q4_K_M
${ctx_256k}
${qwen35_sampling}
${common_args}
${thinking_on}
@@ -191,6 +204,7 @@ models:
cmd: |
/app/llama-server
-hf unsloth/Qwen3.5-9B-GGUF:Q4_K_M
${ctx_256k}
${qwen35_sampling}
${common_args}
${thinking_off}
@@ -199,6 +213,7 @@ models:
cmd: |
/app/llama-server
-hf unsloth/Qwen3.5-9B-GGUF:Q3_K_M
${ctx_256k}
${qwen35_sampling}
${common_args}
${thinking_on}
@@ -207,6 +222,7 @@ models:
cmd: |
/app/llama-server
-hf unsloth/Qwen3.5-9B-GGUF:Q3_K_M
${ctx_256k}
${qwen35_sampling}
${common_args}
${thinking_off}
@@ -215,6 +231,7 @@ models:
cmd: |
/app/llama-server
-hf unsloth/Qwen3.5-27B-GGUF:Q3_K_M
${ctx_256k}
${qwen35_sampling}
${common_args}
${thinking_on}
@@ -223,6 +240,46 @@ models:
cmd: |
/app/llama-server
-hf unsloth/Qwen3.5-27B-GGUF:Q3_K_M
${ctx_256k}
${qwen35_sampling}
${common_args}
${thinking_off}
"GLM-4.7-Flash-GGUF:Q4_K_M":
cmd: |
/app/llama-server
-hf unsloth/GLM-4.7-Flash-GGUF:Q4_K_M
${glm47_flash_args}
${common_args}
"gemma-4-26B-A4B-it:UD-Q4_K_XL":
cmd: |
/app/llama-server
-hf unsloth/gemma-4-26B-A4B-it-GGUF:UD-Q4_K_XL \
${ctx_256k}
${gemma4_sampling}
${common_args}
"gemma-4-26B-A4B-it:UD-Q2_K_XL":
cmd: |
/app/llama-server
-hf unsloth/gemma-4-26B-A4B-it-GGUF:UD-Q2_K_XL \
${ctx_256k}
${gemma4_sampling}
${common_args}
"unsloth/gemma-4-E4B-it-GGUF:UD-Q4_K_XL":
cmd: |
/app/llama-server
-hf unsloth/gemma-4-E4B-it-GGUF:UD-Q4_K_XL \
${ctx_128k}
${gemma4_sampling}
${common_args}
"unsloth/gemma-4-E2B-it-GGUF:UD-Q4_K_XL":
cmd: |
/app/llama-server
-hf unsloth/gemma-4-E2B-it-GGUF:UD-Q4_K_XL \
${ctx_128k}
${gemma4_sampling}
${common_args}

View File

@@ -18,7 +18,7 @@ spec:
spec:
containers:
- name: llama-swap
image: ghcr.io/mostlygeek/llama-swap:v197-vulkan-b8248
image: ghcr.io/mostlygeek/llama-swap:v199-vulkan-b8637
imagePullPolicy: IfNotPresent
command:
- /app/llama-swap
@@ -29,12 +29,6 @@ spec:
- containerPort: 8080
name: http
protocol: TCP
env:
- name: OPENROUTER_API_KEY
valueFrom:
secretKeyRef:
name: llama-openrouter
key: OPENROUTER_API_KEY
volumeMounts:
- name: models
mountPath: /root/.cache

View File

@@ -7,7 +7,7 @@ metadata:
name: llama-models-lvmssd
namespace: openebs
spec:
capacity: 200Gi
capacity: "322122547200"
ownerNodeID: anapistula-delrosalae
shared: "yes"
thinProvision: "no"
@@ -20,7 +20,7 @@ metadata:
name: llama-models-lvmssd
spec:
capacity:
storage: 200Gi
storage: 300Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
@@ -41,6 +41,6 @@ spec:
- ReadWriteOnce
resources:
requests:
storage: 200Gi
storage: 300Gi
storageClassName: ssd-lvmpv
volumeName: llama-models-lvmssd

View File

@@ -36,26 +36,3 @@ spec:
excludeRaw: true
vaultAuthRef: llama
---
apiVersion: secrets.hashicorp.com/v1beta1
kind: VaultStaticSecret
metadata:
name: llama-openrouter
namespace: llama
spec:
type: kv-v2
mount: secret
path: openrouter
destination:
create: true
name: llama-openrouter
type: Opaque
transformation:
excludeRaw: true
templates:
OPENROUTER_API_KEY:
text: '{{ get .Secrets "API_KEY" }}'
vaultAuthRef: llama

View File

@@ -4,5 +4,6 @@ resources:
- namespace.yaml
- pvc.yaml
- pvc-pipelines.yaml
- secret.yaml
- release.yaml
- ingress.yaml

View File

@@ -18,7 +18,7 @@ spec:
chart:
spec:
chart: open-webui
version: 12.10.0
version: 12.13.0
sourceRef:
kind: HelmRepository
name: open-webui
@@ -44,3 +44,30 @@ spec:
persistence:
enabled: true
existingClaim: openwebui-pipelines-lvmhdd
# SSO with Authentik
extraEnvVars:
- name: WEBUI_URL
value: "https://openwebui.lumpiasty.xyz"
- name: OAUTH_CLIENT_ID
valueFrom:
secretKeyRef:
name: openwebui-authentik
key: client_id
- name: OAUTH_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: openwebui-authentik
key: client_secret
- name: OAUTH_PROVIDER_NAME
value: "authentik"
- name: OPENID_PROVIDER_URL
value: "https://authentik.lumpiasty.xyz/application/o/open-web-ui/.well-known/openid-configuration"
- name: OPENID_REDIRECT_URI
value: "https://openwebui.lumpiasty.xyz/oauth/oidc/callback"
- name: ENABLE_OAUTH_SIGNUP
value: "true"
- name: ENABLE_LOGIN_FORM
value: "false"
- name: OAUTH_MERGE_ACCOUNTS_BY_EMAIL
value: "true"

View File

@@ -0,0 +1,43 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: openwebui-secret
namespace: openwebui
---
apiVersion: secrets.hashicorp.com/v1beta1
kind: VaultAuth
metadata:
name: openwebui
namespace: openwebui
spec:
method: kubernetes
mount: kubernetes
kubernetes:
role: openwebui
serviceAccount: openwebui-secret
---
apiVersion: secrets.hashicorp.com/v1beta1
kind: VaultStaticSecret
metadata:
name: openwebui-authentik
namespace: openwebui
spec:
type: kv-v2
mount: secret
path: authentik/openwebui
destination:
create: true
name: openwebui-authentik
type: Opaque
transformation:
excludeRaw: true
templates:
client_id:
text: '{{ get .Secrets "client_id" }}'
client_secret:
text: '{{ get .Secrets "client_secret" }}'
vaultAuthRef: openwebui

View File

@@ -9,4 +9,3 @@ data:
RENOVATE_ENDPOINT: https://gitea.lumpiasty.xyz/api/v1
RENOVATE_PLATFORM: gitea
RENOVATE_GIT_AUTHOR: Renovate Bot <renovate@lumpiasty.xyz>
RENOVATE_ALLOWED_COMMANDS: '["^node utils/update-garm-cli-hash\\.mjs$"]'

View File

@@ -15,7 +15,7 @@ spec:
- name: renovate
# Update this to the latest available and then enable Renovate on
# the manifest
image: renovate/renovate:43.64.6-full
image: renovate/renovate:43.104.3-full
envFrom:
- secretRef:
name: renovate-gitea-token

View File

@@ -0,0 +1,8 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- postgres-volume.yaml
- postgres-cluster.yaml
- release.yaml
- secret.yaml

View File

@@ -0,0 +1,5 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: woodpecker

View File

@@ -0,0 +1,23 @@
---
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: woodpecker-postgresql-cluster
namespace: woodpecker
spec:
instances: 1
imageName: ghcr.io/cloudnative-pg/postgresql:17.4
bootstrap:
initdb:
database: woodpecker
owner: woodpecker
storage:
pvcTemplate:
storageClassName: ssd-lvmpv
resources:
requests:
storage: 10Gi
volumeName: woodpecker-postgresql-cluster-lvmssd

View File

@@ -0,0 +1,33 @@
apiVersion: local.openebs.io/v1alpha1
kind: LVMVolume
metadata:
labels:
kubernetes.io/nodename: anapistula-delrosalae
name: woodpecker-postgresql-cluster-lvmssd
namespace: openebs
spec:
capacity: 10Gi
ownerNodeID: anapistula-delrosalae
shared: "yes"
thinProvision: "no"
vgPattern: ^openebs-ssd$
volGroup: openebs-ssd
---
kind: PersistentVolume
apiVersion: v1
metadata:
name: woodpecker-postgresql-cluster-lvmssd
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: ssd-lvmpv
volumeMode: Filesystem
csi:
driver: local.csi.openebs.io
fsType: btrfs
volumeHandle: woodpecker-postgresql-cluster-lvmssd
---
# PVC is dynamically created by the Postgres operator

View File

@@ -0,0 +1,115 @@
---
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository
metadata:
name: woodpecker
namespace: woodpecker
spec:
interval: 24h
url: https://woodpecker-ci.org/
---
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: woodpecker
namespace: woodpecker
spec:
interval: 30m
chart:
spec:
chart: woodpecker
version: 3.5.1
sourceRef:
kind: HelmRepository
name: woodpecker
namespace: woodpecker
interval: 12h
values:
server:
enabled: true
statefulSet:
replicaCount: 1
persistentVolume:
enabled: false # Using Postgresql database
env:
WOODPECKER_HOST: "https://woodpecker.lumpiasty.xyz"
# Gitea integration
WOODPECKER_GITEA: "true"
WOODPECKER_GITEA_URL: "https://gitea.lumpiasty.xyz"
# PostgreSQL database configuration
WOODPECKER_DATABASE_DRIVER: postgres
# Password is loaded from woodpecker-postgresql-cluster-app secret (created by CNPG)
WOODPECKER_DATABASE_DATASOURCE:
valueFrom:
secretKeyRef:
name: woodpecker-postgresql-cluster-app
key: fqdn-uri
# Allow logging in from all accounts on Gitea
WOODPECKER_OPEN: "true"
# Make lumpiasty admin
WOODPECKER_ADMIN: GiteaAdmin
createAgentSecret: true
extraSecretNamesForEnvFrom:
- woodpecker-secrets
ingress:
enabled: true
ingressClassName: nginx-ingress
annotations:
cert-manager.io/cluster-issuer: letsencrypt
acme.cert-manager.io/http01-edit-in-place: "true"
hosts:
- host: woodpecker.lumpiasty.xyz
paths:
- path: /
backend:
serviceName: woodpecker-server
servicePort: 80
tls:
- hosts:
- woodpecker.lumpiasty.xyz
secretName: woodpecker-ingress
resources:
requests:
cpu: 100m
memory: 256Mi
service:
type: ClusterIP
port: 80
agent:
enabled: true
replicaCount: 2
env:
WOODPECKER_SERVER: "woodpecker-server:9000"
WOODPECKER_BACKEND: kubernetes
WOODPECKER_BACKEND_K8S_NAMESPACE: woodpecker
WOODPECKER_BACKEND_K8S_STORAGE_CLASS: ssd-lvmpv
WOODPECKER_BACKEND_K8S_VOLUME_SIZE: 10G
WOODPECKER_BACKEND_K8S_STORAGE_RWX: false
WOODPECKER_CONNECT_RETRY_COUNT: "5"
mapAgentSecret: true
extraSecretNamesForEnvFrom:
- woodpecker-secrets
persistence:
enabled: false
serviceAccount:
create: true
rbac:
create: true
resources:
requests:
cpu: 100m
memory: 128Mi

View File

@@ -0,0 +1,62 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: woodpecker-secret
namespace: woodpecker
---
apiVersion: secrets.hashicorp.com/v1beta1
kind: VaultAuth
metadata:
name: woodpecker
namespace: woodpecker
spec:
method: kubernetes
mount: kubernetes
kubernetes:
role: woodpecker
serviceAccount: woodpecker-secret
---
# Main woodpecker secrets from Vault
# Requires vault kv put secret/woodpecker \
# WOODPECKER_AGENT_SECRET="$(openssl rand -hex 32)" \
# WOODPECKER_GITEA_CLIENT="<gitea-oauth-client>" \
# WOODPECKER_GITEA_SECRET="<gitea-oauth-secret>"
# Note: Database password comes from CNPG secret (woodpecker-postgresql-cluster-app)
apiVersion: secrets.hashicorp.com/v1beta1
kind: VaultStaticSecret
metadata:
name: woodpecker-secrets
namespace: woodpecker
spec:
type: kv-v2
mount: secret
path: woodpecker
destination:
create: true
name: woodpecker-secrets
type: Opaque
transformation:
excludeRaw: true
vaultAuthRef: woodpecker
---
# Container registry credentials for Kaniko
# Requires vault kv put secret/container-registry \
# REGISTRY_USERNAME="<username>" \
# REGISTRY_PASSWORD="<token>"
apiVersion: secrets.hashicorp.com/v1beta1
kind: VaultStaticSecret
metadata:
name: container-registry
namespace: woodpecker
spec:
type: kv-v2
mount: secret
path: container-registry
destination:
create: true
name: container-registry
type: Opaque
transformation:
excludeRaw: true
vaultAuthRef: woodpecker

View File

@@ -3,11 +3,11 @@
"devenv": {
"locked": {
"dir": "src/modules",
"lastModified": 1773504385,
"narHash": "sha256-ANaeR+xVHxjGz36VI4qlZUbdhrlSE0xU7O7AUJKw3zU=",
"lastModified": 1775201809,
"narHash": "sha256-WmpoCegCQ6Q2ZyxqO05zlz/7XXjt/l2iut4Nk5Nt+W4=",
"owner": "cachix",
"repo": "devenv",
"rev": "4bce49e6f60c69e99eeb643efbbf74125cefd329",
"rev": "42a5505d4700e791732e48a38b4cca05a755f94b",
"type": "github"
},
"original": {
@@ -45,11 +45,11 @@
"treefmt-nix": "treefmt-nix"
},
"locked": {
"lastModified": 1773451905,
"narHash": "sha256-S/bukFEwbOYQbnR5UpciwYA42aEt1w5LK73GwARhsaA=",
"lastModified": 1775175041,
"narHash": "sha256-lYCPSMIV26VazREzl/TIpbWhBXJ+vJ0EJ+308TrX/6w=",
"owner": "a1994sc",
"repo": "krew2nix",
"rev": "bc779a8cf59ebf76ae60556bfe2d781a0a4cdbd9",
"rev": "15c594042f1ba80ce97ab190a9c684a44c613590",
"type": "github"
},
"original": {
@@ -60,11 +60,11 @@
},
"nixpkgs": {
"locked": {
"lastModified": 1773389992,
"narHash": "sha256-wvfdLLWJ2I9oEpDd9PfMA8osfIZicoQ5MT1jIwNs9Tk=",
"lastModified": 1775036866,
"narHash": "sha256-ZojAnPuCdy657PbTq5V0Y+AHKhZAIwSIT2cb8UgAz/U=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "c06b4ae3d6599a672a6210b7021d699c351eebda",
"rev": "6201e203d09599479a3b3450ed24fa81537ebc4e",
"type": "github"
},
"original": {

View File

@@ -6,8 +6,6 @@ let
hvac
librouteros
]);
garm-cli = pkgs.callPackage ./nix/garm-cli.nix { };
in
{
# Overlays - apply krew2nix to get kubectl with krew support
@@ -43,9 +41,18 @@ in
openbao
pv-migrate
mermaid-cli
opencode
garm-cli
(
# Wrapping opencode to set the OPENCODE_ENABLE_EXA environment variable
runCommand "opencode" {
buildInputs = [ makeWrapper ];
} ''
mkdir -p $out/bin
makeWrapper ${pkgs.opencode}/bin/opencode $out/bin/opencode \
--set OPENCODE_ENABLE_EXA "1"
''
)
tea
woodpecker-cli
];
# Scripts

View File

@@ -1,28 +0,0 @@
FROM golang:1.25-alpine AS build
ARG GARM_COMMIT
ARG GARM_PROVIDER_K8S_VERSION=0.3.2
RUN apk add --no-cache ca-certificates git wget tar build-base util-linux-dev linux-headers
WORKDIR /src
RUN git clone https://github.com/cloudbase/garm.git . && git checkout "${GARM_COMMIT}"
RUN CGO_ENABLED=1 GOOS=linux GOARCH=amd64 \
go build -trimpath \
-tags osusergo,netgo,sqlite_omit_load_extension \
-ldflags="-linkmode external -extldflags '-static' -s -w" \
-o /out/garm ./cmd/garm
RUN mkdir -p /out/providers.d \
&& wget -qO /tmp/garm-provider-k8s.tar.gz "https://github.com/mercedes-benz/garm-provider-k8s/releases/download/v${GARM_PROVIDER_K8S_VERSION}/garm-provider-k8s_Linux_x86_64.tar.gz" \
&& tar -xzf /tmp/garm-provider-k8s.tar.gz -C /out/providers.d \
&& chmod 0755 /out/providers.d/garm-provider-k8s
FROM busybox
COPY --from=build /out/garm /bin/garm
COPY --from=build /out/providers.d/garm-provider-k8s /opt/garm/providers.d/garm-provider-k8s
COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
ENTRYPOINT ["/bin/garm"]

View File

@@ -0,0 +1,10 @@
<svg xmlns="http://www.w3.org/2000/svg" width="284.538" height="253.96">
<style>
@media (prefers-color-scheme: dark) {
path {
fill: white;
}
}
</style>
<path d="M162.51 33.188c-26.77.411-54.004 6.885-71.494 3.745-1.313-.232-2.124 1.338-1.171 2.265 14.749 14.003 20.335 28.16 36.718 30.065l.476.103c-7.567 7.799-14.028 18.018-18.571 31.171-4.89 14.106-6.268 29.421-7.89 47.105-2.445 26.332-5.173 56.152-20.038 93.54a246.489 246.489 0 0 0-13.27 45.946h22.652a221.202 221.202 0 0 1 11.249-37.786c16.049-40.374 19.073-73.257 21.505-99.693 1.493-16.255 2.806-30.309 6.796-41.853 11.647-33.527 39.408-40.889 61.056-36.693 21.004 4.067 41.673 20.502 40.592 44.016-.772 15.985-7.76 23.166-12.87 28.43-2.793 2.883-5.47 5.611-6.731 9.498-3.037 9.19.101 19.434 8.494 27.568 22.24 20.734 34.338 59.717 33.681 106.513h22.176c.592-52.935-13.951-97.839-40.503-122.626-2.097-2.021-2.69-3.604-3.191-3.347 1.222-1.544 3.217-3.346 4.633-4.813 29.382-21.79 77.813-1.892 107.054 9.653 7.58 2.985 11.274-4.338 4.067-8.623-25.097-14.84-76.54-54.016-105.368-79.718-4.029-3.54-6.796-7.8-11.455-11.738-15.547-27.439-41.84-33.127-68.597-32.728Zm35.238 60.27a15.161 15.161 0 0 0-2.008.232 15.161 15.161 0 0 0-1.506 29.434 15.154 15.154 0 0 0 9.473-28.79 15.161 15.161 0 0 0-5.959-.876zm-44.286 147.17a2.033 2.033 0 0 0-1.133.374c-1.08.772-1.93 3.05-.772 5.701 5.38 12.394 9.1 25.445 12.536 40.413h22.484c-5.676-16.629-16.307-34.055-27.851-43.978-2.008-1.737-3.913-2.574-5.251-2.51z" style="stroke-width:12.8704" transform="translate(-67.27 -33.169)"/>
</svg>

After

Width:  |  Height:  |  Size: 1.5 KiB

View File

@@ -0,0 +1,32 @@
# Roles with needed access for OpenBao's Kubernetes secret engine
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: k8s-full-secrets-abilities
rules:
- apiGroups: [""]
resources: ["namespaces"]
verbs: ["get"]
- apiGroups: [""]
resources: ["serviceaccounts", "serviceaccounts/token"]
verbs: ["create", "update", "delete"]
- apiGroups: ["rbac.authorization.k8s.io"]
resources: ["rolebindings", "clusterrolebindings"]
verbs: ["create", "update", "delete"]
- apiGroups: ["rbac.authorization.k8s.io"]
resources: ["roles", "clusterroles"]
verbs: ["bind", "escalate", "create", "update", "delete"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: openbao-token-creator-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: k8s-full-secrets-abilities
subjects:
- kind: ServiceAccount
name: openbao
namespace: openbao

View File

@@ -18,7 +18,7 @@ spec:
chart:
spec:
chart: cert-manager-webhook-ovh
version: 0.9.4
version: 0.9.5
sourceRef:
kind: HelmRepository
name: cert-manager-webhook-ovh

View File

@@ -23,7 +23,7 @@ spec:
chart:
spec:
chart: cert-manager
version: v1.20.0
version: v1.20.1
sourceRef:
kind: HelmRepository
name: cert-manager

View File

@@ -23,7 +23,7 @@ spec:
chart:
spec:
chart: cilium
version: 1.19.1
version: 1.19.2
sourceRef:
kind: HelmRepository
name: cilium

View File

@@ -23,7 +23,7 @@ spec:
chart:
spec:
chart: k8up
version: 4.8.6
version: 4.9.0
sourceRef:
kind: HelmRepository
name: k8up-io

View File

@@ -23,7 +23,7 @@ spec:
chart:
spec:
chart: ingress-nginx
version: 4.15.0
version: 4.15.1
sourceRef:
kind: HelmRepository
name: ingress-nginx

View File

@@ -23,7 +23,7 @@ spec:
chart:
spec:
chart: openbao
version: 0.25.7
version: 0.26.2
sourceRef:
kind: HelmRepository
name: openbao

View File

@@ -25,3 +25,4 @@ resources:
- configs/openbao-volume.yaml
- controllers/openbao.yaml
- configs/openbao-k8s-se-role.yaml

View File

@@ -1,40 +0,0 @@
{ lib, buildGoModule, fetchFromGitHub, installShellFiles }:
buildGoModule rec {
pname = "garm-cli";
version = "r1380";
garmCommit = "818a9dddccba5f2843f185e6a846770988f31fc5";
src = fetchFromGitHub {
owner = "cloudbase";
repo = "garm";
rev = garmCommit;
hash = "sha256-CTqqabNYUMSrmnQVCWml1/vkDw+OP1uJo1KFhBSZpYY=";
};
subPackages = [ "cmd/garm-cli" ];
nativeBuildInputs = [ installShellFiles ];
vendorHash = null;
ldflags = [
"-s"
"-w"
"-X main.version=${version}"
];
postInstall = ''
installShellCompletion --cmd garm-cli \
--bash <($out/bin/garm-cli completion bash) \
--fish <($out/bin/garm-cli completion fish) \
--zsh <($out/bin/garm-cli completion zsh)
'';
meta = {
description = "CLI for GitHub Actions Runner Manager";
homepage = "https://github.com/cloudbase/garm";
license = lib.licenses.asl20;
mainProgram = "garm-cli";
};
}

View File

@@ -10,57 +10,8 @@
"gotk-components\\.ya?ml$"
]
},
"customManagers": [
{
"customType": "regex",
"description": "Track garm-cli pinned main commit",
"managerFilePatterns": ["^nix/garm-cli\\.nix$"],
"matchStrings": ["garmCommit = \\\"(?<currentValue>[a-f0-9]{40})\\\";"],
"depNameTemplate": "cloudbase/garm",
"datasourceTemplate": "github-refs",
"versioningTemplate": "git"
},
{
"customType": "regex",
"description": "Track garm-provider-k8s release in garm image Dockerfile",
"managerFilePatterns": ["^docker/garm/Dockerfile$"],
"matchStrings": ["ARG GARM_PROVIDER_K8S_VERSION=(?<currentValue>[0-9]+\\.[0-9]+\\.[0-9]+)"],
"depNameTemplate": "mercedes-benz/garm-provider-k8s",
"datasourceTemplate": "github-releases",
"versioningTemplate": "semver"
},
{
"customType": "regex",
"description": "Track pinned garm main commit",
"managerFilePatterns": ["^apps/garm/image-source\\.env$"],
"matchStrings": ["GARM_COMMIT=(?<currentValue>[a-f0-9]{40})"],
"depNameTemplate": "cloudbase/garm",
"datasourceTemplate": "github-refs",
"versioningTemplate": "git"
}
],
"prHourlyLimit": 9,
"packageRules": [
{
"matchManagers": ["custom.regex"],
"matchDepNames": ["cloudbase/garm"],
"matchFileNames": ["nix/garm-cli.nix"],
"postUpgradeTasks": {
"commands": ["node utils/update-garm-cli-hash.mjs"],
"fileFilters": ["nix/garm-cli.nix"],
"executionMode": "update"
}
},
{
"matchManagers": ["custom.regex"],
"matchDepNames": ["cloudbase/garm"],
"matchFileNames": ["apps/garm/image-source.env"],
"postUpgradeTasks": {
"commands": ["node utils/update-garm-image-pin.mjs"],
"fileFilters": ["apps/garm/image-source.env", "apps/garm/deployment.yaml"],
"executionMode": "update"
}
},
{
"matchDatasources": ["docker"],
"matchPackageNames": ["ghcr.io/mostlygeek/llama-swap"],

View File

@@ -2,6 +2,7 @@
import argparse
import os
import pathlib
from typing import Any, cast
import hvac
@@ -42,7 +43,7 @@ def synchronize_auth_kubernetes_config(client: hvac.Client):
def synchronize_kubernetes_roles(client: hvac.Client):
kubernetes = Kubernetes(client.adapter)
policy_dir = os.path.join(os.path.dirname(__file__), '../vault/kubernetes-roles/')
policy_dir = os.path.join(os.path.dirname(__file__), '../vault/kubernetes-auth-roles/')
roles: dict[str, Any] = {} # pyright:ignore[reportExplicitAny]
for filename in os.listdir(policy_dir):
@@ -67,6 +68,69 @@ def synchronize_kubernetes_roles(client: hvac.Client):
# Using write data instead of kubernetes.create_role, we can pass raw yaml
_ = client.write_data(f'/auth/kubernetes/role/{role_name}', data=role_content) # pyright:ignore[reportAny]
def synchronize_approle_auth(client: hvac.Client):
if client.sys.list_auth_methods().get('approle/') is None:
print('Enabling AppRole auth method')
client.sys.enable_auth_method('approle', 'AppRole authorization for CI')
roles_dir = pathlib.Path(__file__).parent.joinpath('../vault/approles/')
roles: dict[str, Any] = {}
for filename in roles_dir.iterdir():
with filename.open('r') as f:
role = yaml.safe_load(f.read())
assert type(role) is dict
roles[filename.stem] = role
roles_on_vault: list[str] = []
roles_response = client.list("auth/approle/roles")
if roles_response is not None:
roles_on_vault = roles_response['data']['keys']
for role in roles_on_vault:
if role not in roles:
print(f'Deleting role: {role}')
client.delete(f'auth/approle/role/{role}')
for role_name, role_content in roles.items():
print(f'Updating role: {role_name}')
client.write_data(f'auth/approle/role/{role_name}', data=role_content)
def synchronize_kubernetes_secretengine(client: hvac.Client):
# Ensure kubernetes secret engine is enabled
if client.sys.list_mounted_secrets_engines().get('kubernetes/') is None:
print('Enabling kubernetes secret engine')
client.sys.enable_secrets_engine('kubernetes', 'kubernetes', 'Cluster access')
# Write empty config (all defaults, working on the same cluster)
client.write('kubernetes/config', None)
policy_dir = pathlib.Path(__file__).parent.joinpath('../vault/kubernetes-se-roles/')
roles: dict[str, Any] = {}
for filename in policy_dir.iterdir():
with filename.open('r') as f:
role = yaml.safe_load(f.read())
assert type(role) is dict
# generated_role_rules must be json or yaml formatted string, convert it
if 'generated_role_rules' in role and type(role['generated_role_rules']) is not str:
role['generated_role_rules'] = yaml.safe_dump(role['generated_role_rules'])
roles[filename.stem] = role
roles_on_vault: list[str] = []
roles_response = client.list("kubernetes/roles")
if roles_response is not None:
roles_on_vault = roles_response['data']['keys']
for role in roles_on_vault:
if role not in roles:
print(f'Deleting role: {role}')
client.delete(f'kubernetes/roles/{role}')
for role_name, role_content in roles.items():
print(f'Updating role: {role_name}')
client.write_data(f'kubernetes/roles/{role_name}', data=role_content)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
prog="synchronizeVault",
@@ -82,5 +146,11 @@ if __name__ == '__main__':
print('Synchronizing kubernetes config')
synchronize_auth_kubernetes_config(client)
print('Synchronizing kubernetes roles')
print('Synchronizing kubernetes auth roles')
synchronize_kubernetes_roles(client)
print('Synchronizing AppRole auth method')
synchronize_approle_auth(client)
print('Synchronizing kubernetes secret engine')
synchronize_kubernetes_secretengine(client)

View File

@@ -1,320 +0,0 @@
import { createHash } from "node:crypto";
import { Buffer } from "node:buffer";
import fs from "node:fs";
import https from "node:https";
import zlib from "node:zlib";
const nixFile = "nix/garm-cli.nix";
function die(message) {
console.error(message);
process.exit(1);
}
function readText(filePath) {
try {
return fs.readFileSync(filePath, "utf8");
} catch {
die(`Missing ${filePath}`);
}
}
function extractVersion(text) {
const match = text.match(/^\s*version\s*=\s*"([^"]+)";/m);
if (!match) {
die(`Unable to extract version from ${nixFile}`);
}
return match[1];
}
function extractCommit(text) {
const match = text.match(/^\s*garmCommit\s*=\s*"([a-f0-9]{40})";/m);
return match ? match[1] : null;
}
function writeU64LE(hash, value) {
const buf = Buffer.alloc(8);
buf.writeBigUInt64LE(BigInt(value), 0);
hash.update(buf);
}
function writeNarString(hash, data) {
writeU64LE(hash, data.length);
hash.update(data);
const pad = (8 - (data.length % 8)) % 8;
if (pad) {
hash.update(Buffer.alloc(pad));
}
}
function writeNarText(hash, text) {
writeNarString(hash, Buffer.from(text, "utf8"));
}
function parseOctal(field) {
const clean = field.toString("ascii").replace(/\0.*$/, "").trim();
if (!clean) {
return 0;
}
return Number.parseInt(clean, 8);
}
function parseTarHeader(block) {
const name = block.subarray(0, 100).toString("utf8").replace(/\0.*$/, "");
const mode = parseOctal(block.subarray(100, 108));
const size = parseOctal(block.subarray(124, 136));
const typeflagRaw = block[156];
const typeflag = typeflagRaw === 0 ? "0" : String.fromCharCode(typeflagRaw);
const linkname = block.subarray(157, 257).toString("utf8").replace(/\0.*$/, "");
const prefix = block.subarray(345, 500).toString("utf8").replace(/\0.*$/, "");
return {
name: prefix ? `${prefix}/${name}` : name,
mode,
size,
typeflag,
linkname,
};
}
function parsePax(data) {
const out = {};
let i = 0;
while (i < data.length) {
let sp = i;
while (sp < data.length && data[sp] !== 0x20) sp += 1;
if (sp >= data.length) break;
const len = Number.parseInt(data.subarray(i, sp).toString("utf8"), 10);
if (!Number.isFinite(len) || len <= 0) break;
const record = data.subarray(sp + 1, i + len).toString("utf8");
const eq = record.indexOf("=");
if (eq > 0) {
const key = record.slice(0, eq);
const value = record.slice(eq + 1).replace(/\n$/, "");
out[key] = value;
}
i += len;
}
return out;
}
function parseTarEntries(archiveBuffer) {
const gz = zlib.gunzipSync(archiveBuffer);
const entries = [];
let i = 0;
let pendingPax = null;
let longName = null;
let longLink = null;
while (i + 512 <= gz.length) {
const header = gz.subarray(i, i + 512);
i += 512;
if (header.every((b) => b === 0)) {
break;
}
const h = parseTarHeader(header);
const data = gz.subarray(i, i + h.size);
const dataPad = (512 - (h.size % 512)) % 512;
i += h.size + dataPad;
if (h.typeflag === "x") {
pendingPax = parsePax(data);
continue;
}
if (h.typeflag === "g") {
continue;
}
if (h.typeflag === "L") {
longName = data.toString("utf8").replace(/\0.*$/, "");
continue;
}
if (h.typeflag === "K") {
longLink = data.toString("utf8").replace(/\0.*$/, "");
continue;
}
const path = pendingPax?.path ?? longName ?? h.name;
const linkpath = pendingPax?.linkpath ?? longLink ?? h.linkname;
entries.push({
path,
typeflag: h.typeflag,
mode: h.mode,
linkname: linkpath,
data,
});
pendingPax = null;
longName = null;
longLink = null;
}
return entries;
}
function stripTopDir(path) {
const cleaned = path.replace(/^\.?\//, "").replace(/\/$/, "");
const idx = cleaned.indexOf("/");
if (idx === -1) return "";
return cleaned.slice(idx + 1);
}
function ensureDir(root, relPath) {
if (!relPath) return root;
const parts = relPath.split("/").filter(Boolean);
let cur = root;
for (const part of parts) {
let child = cur.children.get(part);
if (!child) {
child = { kind: "directory", children: new Map() };
cur.children.set(part, child);
}
if (child.kind !== "directory") {
die(`Path conflict while building tree at ${relPath}`);
}
cur = child;
}
return cur;
}
function buildTree(entries) {
const root = { kind: "directory", children: new Map() };
for (const entry of entries) {
const rel = stripTopDir(entry.path);
if (!rel) {
continue;
}
const parts = rel.split("/").filter(Boolean);
const name = parts.pop();
const parent = ensureDir(root, parts.join("/"));
if (entry.typeflag === "5") {
const existing = parent.children.get(name);
if (!existing) {
parent.children.set(name, { kind: "directory", children: new Map() });
} else if (existing.kind !== "directory") {
die(`Path conflict at ${rel}`);
}
continue;
}
if (entry.typeflag === "2") {
parent.children.set(name, { kind: "symlink", target: entry.linkname });
continue;
}
if (entry.typeflag === "0") {
parent.children.set(name, {
kind: "regular",
executable: (entry.mode & 0o111) !== 0,
contents: Buffer.from(entry.data),
});
continue;
}
}
return root;
}
function compareUtf8(a, b) {
return Buffer.from(a, "utf8").compare(Buffer.from(b, "utf8"));
}
function narDump(hash, node) {
if (node.kind === "directory") {
writeNarText(hash, "(");
writeNarText(hash, "type");
writeNarText(hash, "directory");
const names = [...node.children.keys()].sort(compareUtf8);
for (const name of names) {
writeNarText(hash, "entry");
writeNarText(hash, "(");
writeNarText(hash, "name");
writeNarString(hash, Buffer.from(name, "utf8"));
writeNarText(hash, "node");
narDump(hash, node.children.get(name));
writeNarText(hash, ")");
}
writeNarText(hash, ")");
return;
}
if (node.kind === "symlink") {
writeNarText(hash, "(");
writeNarText(hash, "type");
writeNarText(hash, "symlink");
writeNarText(hash, "target");
writeNarString(hash, Buffer.from(node.target, "utf8"));
writeNarText(hash, ")");
return;
}
writeNarText(hash, "(");
writeNarText(hash, "type");
writeNarText(hash, "regular");
if (node.executable) {
writeNarText(hash, "executable");
writeNarText(hash, "");
}
writeNarText(hash, "contents");
writeNarString(hash, node.contents);
writeNarText(hash, ")");
}
function fetchBuffer(url) {
return new Promise((resolve, reject) => {
https
.get(url, (res) => {
if (res.statusCode && res.statusCode >= 300 && res.statusCode < 400 && res.headers.location) {
const redirectUrl = new URL(res.headers.location, url).toString();
res.resume();
fetchBuffer(redirectUrl).then(resolve, reject);
return;
}
if (!res.statusCode || res.statusCode < 200 || res.statusCode >= 300) {
reject(new Error(`Failed to fetch ${url}: ${res.statusCode ?? "unknown"}`));
res.resume();
return;
}
const chunks = [];
res.on("data", (chunk) => chunks.push(chunk));
res.on("end", () => resolve(Buffer.concat(chunks)));
})
.on("error", reject);
});
}
function computeSRIFromGitHubTar(ref) {
const url = `https://github.com/cloudbase/garm/archive/${ref}.tar.gz`;
return fetchBuffer(url).then((archive) => {
const entries = parseTarEntries(archive);
const root = buildTree(entries);
const hash = createHash("sha256");
writeNarText(hash, "nix-archive-1");
narDump(hash, root);
return `sha256-${hash.digest("base64")}`;
});
}
function updateHash(text, sri) {
const pattern = /(^\s*hash\s*=\s*")sha256-[^"]+(";)/m;
if (!pattern.test(text)) {
die(`Unable to update hash in ${nixFile}`);
}
const next = text.replace(pattern, `$1${sri}$2`);
return next;
}
async function main() {
const text = readText(nixFile);
const version = extractVersion(text);
const commit = extractCommit(text);
const ref = commit ?? `v${version}`;
const sri = await computeSRIFromGitHubTar(ref);
const updated = updateHash(text, sri);
fs.writeFileSync(nixFile, updated, "utf8");
console.log(`Updated ${nixFile} hash to ${sri}`);
}
main().catch((err) => die(err.message));

View File

@@ -1,91 +0,0 @@
import fs from "node:fs";
import os from "node:os";
import path from "node:path";
import { execFileSync } from "node:child_process";
const pinFile = "apps/garm/image-source.env";
const deploymentFile = "apps/garm/deployment.yaml";
function fail(message) {
console.error(message);
process.exit(1);
}
function parseEnvFile(content) {
const vars = {};
for (const line of content.split(/\r?\n/)) {
if (!line || line.startsWith("#")) {
continue;
}
const idx = line.indexOf("=");
if (idx === -1) {
continue;
}
const key = line.slice(0, idx).trim();
const value = line.slice(idx + 1).trim();
vars[key] = value;
}
return vars;
}
function updateOrAdd(content, key, value) {
const pattern = new RegExp(`^${key}=.*$`, "m");
if (pattern.test(content)) {
return content.replace(pattern, `${key}=${value}`);
}
return `${content.trimEnd()}\n${key}=${value}\n`;
}
function gitOut(args, options = {}) {
return execFileSync("git", args, {
encoding: "utf8",
...options,
}).trim();
}
function gitRun(args, options = {}) {
execFileSync("git", args, options);
}
const pinContent = fs.readFileSync(pinFile, "utf8");
const vars = parseEnvFile(pinContent);
const commit = vars.GARM_COMMIT;
const imageRepo = vars.GARM_IMAGE_REPO || "gitea.lumpiasty.xyz/lumpiasty/garm-k8s";
if (!commit || !/^[0-9a-f]{40}$/.test(commit)) {
fail(`Invalid or missing GARM_COMMIT in ${pinFile}`);
}
const tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), "garm-main-"));
let commitNumber;
try {
gitRun(["clone", "--filter=blob:none", "https://github.com/cloudbase/garm.git", tmpDir], {
stdio: "ignore",
});
commitNumber = gitOut(["-C", tmpDir, "rev-list", "--count", commit]);
} finally {
fs.rmSync(tmpDir, { recursive: true, force: true });
}
if (!/^\d+$/.test(commitNumber)) {
fail(`Unable to resolve commit number for ${commit}`);
}
const image = `${imageRepo}:r${commitNumber}`;
let nextPin = pinContent;
nextPin = updateOrAdd(nextPin, "GARM_COMMIT_NUMBER", commitNumber);
nextPin = updateOrAdd(nextPin, "GARM_IMAGE_REPO", imageRepo);
nextPin = updateOrAdd(nextPin, "GARM_IMAGE", image);
fs.writeFileSync(pinFile, nextPin, "utf8");
const deployment = fs.readFileSync(deploymentFile, "utf8");
const imagePattern = /image:\s*(?:ghcr\.io\/cloudbase\/garm:[^\s]+|gitea\.lumpiasty\.xyz\/(?:Lumpiasty|lumpiasty)\/garm(?:-k8s)?:[^\s]+)/;
if (!imagePattern.test(deployment)) {
fail(`Unable to update garm image in ${deploymentFile}`);
}
const updatedDeployment = deployment.replace(imagePattern, `image: ${image}`);
fs.writeFileSync(deploymentFile, updatedDeployment, "utf8");
console.log(`Pinned garm image to ${image}`);

View File

@@ -0,0 +1,4 @@
token_ttl: 20m
token_max_ttl: 20m
policies:
- flux-reconcile

View File

@@ -0,0 +1,6 @@
bound_service_account_names:
- authentik-secret
bound_service_account_namespaces:
- authentik
token_policies:
- authentik

View File

@@ -1,6 +1,6 @@
bound_service_account_names:
- garm
- crawl4ai-secret
bound_service_account_namespaces:
- garm
- crawl4ai
token_policies:
- garm
- crawl4ai

View File

@@ -0,0 +1,6 @@
bound_service_account_names:
- openwebui-secret
bound_service_account_namespaces:
- openwebui
token_policies:
- openwebui

View File

@@ -0,0 +1,6 @@
bound_service_account_names:
- woodpecker-secret
bound_service_account_namespaces:
- woodpecker
token_policies:
- woodpecker

View File

@@ -0,0 +1,6 @@
allowed_kubernetes_namespaces: flux-system
generated_role_rules:
rules:
- apiGroups: ["source.toolkit.fluxcd.io"]
resources: ["gitrepositories"]
verbs: ["get", "patch", "watch"]

View File

@@ -0,0 +1,3 @@
path "secret/data/authentik" {
capabilities = ["read"]
}

View File

@@ -0,0 +1,3 @@
path "secret/data/crawl4ai" {
capabilities = ["read"]
}

View File

@@ -0,0 +1,3 @@
path "kubernetes/creds/flux-reconcile" {
capabilities = ["update"]
}

View File

@@ -1,7 +0,0 @@
path "secret/data/garm" {
capabilities = ["read"]
}
path "secret/data/backblaze" {
capabilities = ["read"]
}

View File

@@ -0,0 +1,3 @@
path "secret/data/authentik/openwebui" {
capabilities = ["read"]
}

View File

@@ -0,0 +1,7 @@
path "secret/data/woodpecker" {
capabilities = ["read"]
}
path "secret/data/container-registry" {
capabilities = ["read"]
}