Compare commits
34 Commits
aa57015749
...
fresh-star
| Author | SHA1 | Date | |
|---|---|---|---|
| 9b8434dc8e | |||
| e90a1807ea | |||
| 08a423d9b0 | |||
| 817cdd2ec7 | |||
| a0814e76ee | |||
| da163398a5 | |||
| 8160a52176 | |||
| ad3b2229c2 | |||
| 57c2c7ea8d | |||
| f2d60e0b15 | |||
| 9d5dd332fc | |||
| e923fc3c30 | |||
| 1945f2a9bc | |||
| fdd6755c2f | |||
| 3d85148c5a | |||
| ab5a551124 | |||
| 1bb357b3c8 | |||
| 6a0b544bad | |||
| 4e30c9b94d | |||
| dfafadb4e3 | |||
| ae42e342ca | |||
| 670312d75b | |||
| 0ce1a797fc | |||
| 3d53b4b10b | |||
| 98f63b1576 | |||
| edba33b552 | |||
| 054df42d8b | |||
| 08db022d0d | |||
| e485a4fc7f | |||
| 9e74ed6a19 | |||
| 42e89c9bb7 | |||
| 99bc04b76a | |||
| 7ee77e33d4 | |||
| 8bdd5f2196 |
49
.woodpecker/flux-reconcile-source.yaml
Normal file
49
.woodpecker/flux-reconcile-source.yaml
Normal file
@@ -0,0 +1,49 @@
|
||||
when:
|
||||
- event: push
|
||||
branch: fresh-start
|
||||
|
||||
skip_clone: true
|
||||
|
||||
steps:
|
||||
- name: Get kubernetes access from OpenBao
|
||||
image: quay.io/openbao/openbao:2.5.2
|
||||
environment:
|
||||
VAULT_ADDR: https://openbao.lumpiasty.xyz:8200
|
||||
ROLE_ID:
|
||||
from_secret: flux_reconcile_role_id
|
||||
SECRET_ID:
|
||||
from_secret: flux_reconcile_secret_id
|
||||
commands:
|
||||
- bao write -field token auth/approle/login
|
||||
role_id=$ROLE_ID
|
||||
secret_id=$SECRET_ID > /woodpecker/.vault_id
|
||||
- export VAULT_TOKEN=$(cat /woodpecker/.vault_id)
|
||||
- bao write -format json -f /kubernetes/creds/flux-reconcile > /woodpecker/kube_credentials
|
||||
- name: Construct Kubeconfig
|
||||
image: alpine/k8s:1.32.13
|
||||
environment:
|
||||
KUBECONFIG: /woodpecker/kubeconfig
|
||||
commands:
|
||||
- kubectl config set-cluster cluster
|
||||
--server=https://$KUBERNETES_SERVICE_HOST
|
||||
--certificate-authority=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
||||
- kubectl config set-credentials cluster
|
||||
--token=$(jq -r .data.service_account_token /woodpecker/kube_credentials)
|
||||
- kubectl config set-context cluster
|
||||
--cluster cluster
|
||||
--user cluster
|
||||
--namespace flux-system
|
||||
- kubectl config use-context cluster
|
||||
- name: Reconcile git source
|
||||
image: ghcr.io/fluxcd/flux-cli:v2.8.3
|
||||
environment:
|
||||
KUBECONFIG: /woodpecker/kubeconfig
|
||||
commands:
|
||||
- flux reconcile source git flux-system
|
||||
- name: Invalidate OpenBao token
|
||||
image: quay.io/openbao/openbao:2.5.2
|
||||
environment:
|
||||
VAULT_ADDR: https://openbao.lumpiasty.xyz:8200
|
||||
commands:
|
||||
- export VAULT_TOKEN=$(cat /woodpecker/.vault_id)
|
||||
- bao write -f auth/token/revoke-self
|
||||
18
Makefile
18
Makefile
@@ -1,6 +1,6 @@
|
||||
SHELL := /usr/bin/env bash
|
||||
|
||||
.PHONY: install-router gen-talos-config apply-talos-config get-kubeconfig garm-image-build garm-image-push garm-image-build-push
|
||||
.PHONY: install-router gen-talos-config apply-talos-config get-kubeconfig
|
||||
|
||||
install-router:
|
||||
ansible-playbook ansible/playbook.yml -i ansible/hosts
|
||||
@@ -27,19 +27,3 @@ apply-talos-config:
|
||||
|
||||
get-kubeconfig:
|
||||
talosctl -n anapistula-delrosalae kubeconfig talos/generated/kubeconfig
|
||||
|
||||
garm-image-build:
|
||||
set -euo pipefail; \
|
||||
source apps/garm/image-source.env; \
|
||||
docker build \
|
||||
-f docker/garm/Dockerfile \
|
||||
--build-arg GARM_COMMIT=$$GARM_COMMIT \
|
||||
-t $$GARM_IMAGE \
|
||||
.
|
||||
|
||||
garm-image-push:
|
||||
set -euo pipefail; \
|
||||
source apps/garm/image-source.env; \
|
||||
docker push $$GARM_IMAGE
|
||||
|
||||
garm-image-build-push: garm-image-build garm-image-push
|
||||
|
||||
16
README.md
16
README.md
@@ -141,7 +141,7 @@ Currently the k8s cluster consists of single node (hostname anapistula-delrosala
|
||||
|
||||
## Software stack
|
||||
|
||||
The cluster itself is based on [Talos Linux](https://www.talos.dev/) (which is also a Kubernetes distribution) and uses [Cilium](https://cilium.io/) as CNI, IPAM, kube-proxy replacement, Load Balancer, and BGP control plane. Persistent volumes are managed by [OpenEBS LVM LocalPV](https://openebs.io/docs/user-guides/local-storage-user-guide/local-pv-lvm/lvm-overview). Applications are deployed using GitOps (this repo) and reconciled on cluster using [Flux](https://fluxcd.io/). Git repository is hosted on [Gitea](https://gitea.io/) running on a cluster itself. Secets are kept in [OpenBao](https://openbao.org/) (HashiCorp Vault fork) running on a cluster and synced to cluster objects using [Vault Secrets Operator](https://github.com/hashicorp/vault-secrets-operator). Deployments are kept up to date using self hosted [Renovate](https://www.mend.io/renovate/) bot updating manifests in the Git repository. Incoming HTTP traffic is routed to cluster using [Nginx Ingress Controller](https://kubernetes.github.io/ingress-nginx/) and certificates are issued by [cert-manager](https://cert-manager.io/) with [Let's Encrypt](https://letsencrypt.org/) ACME issuer with [cert-manager-webhook-ovh](https://github.com/aureq/cert-manager-webhook-ovh) resolving DNS-01 challanges. Cluster also runs [CloudNativePG](https://cloudnative-pg.io/) operator for managing PostgreSQL databases. Router is running [Mikrotik RouterOS](https://help.mikrotik.com/docs/spaces/ROS/pages/328059/RouterOS) and its configuration is managed via [Ansible](https://docs.ansible.com/) playbook in this repo. High level core cluster software architecture is shown on the diagram below.
|
||||
The cluster itself is based on [Talos Linux](https://www.talos.dev/) (which is also a Kubernetes distribution) and uses [Cilium](https://cilium.io/) as CNI, IPAM, kube-proxy replacement, Load Balancer, and BGP control plane. Persistent volumes are managed by [OpenEBS LVM LocalPV](https://openebs.io/docs/user-guides/local-storage-user-guide/local-pv-lvm/lvm-overview). Applications are deployed using GitOps (this repo) and reconciled on cluster using [Flux](https://fluxcd.io/). Git repository is hosted on [Gitea](https://gitea.io/) running on a cluster itself. Secets are kept in [OpenBao](https://openbao.org/) (HashiCorp Vault fork) running on a cluster and synced to cluster objects using [Vault Secrets Operator](https://github.com/hashicorp/vault-secrets-operator). Deployments are kept up to date using self hosted [Renovate](https://www.mend.io/renovate/) bot updating manifests in the Git repository. There is a [Woodpecker](https://woodpecker-ci.org/) instance watching repositories on Gitea and scheduling jobs on cluster. Incoming HTTP traffic is routed to cluster using [Nginx Ingress Controller](https://kubernetes.github.io/ingress-nginx/) and certificates are issued by [cert-manager](https://cert-manager.io/) with [Let's Encrypt](https://letsencrypt.org/) ACME issuer with [cert-manager-webhook-ovh](https://github.com/aureq/cert-manager-webhook-ovh) resolving DNS-01 challanges. Cluster also runs [CloudNativePG](https://cloudnative-pg.io/) operator for managing PostgreSQL databases. Router is running [Mikrotik RouterOS](https://help.mikrotik.com/docs/spaces/ROS/pages/328059/RouterOS) and its configuration is managed via [Ansible](https://docs.ansible.com/) playbook in this repo. High level core cluster software architecture is shown on the diagram below.
|
||||
|
||||
> Talos Linux is an immutable Linux distribution purpose-built for running Kubernetes. The OS is distributed as an OCI (Docker) image and does not contain any package manager, shell, SSH, or any other tools for managing the system. Instead, all operations are performed using API, which can be accessed using `talosctl` CLI tool.
|
||||
|
||||
@@ -177,14 +177,23 @@ flowchart TD
|
||||
vault_operator -- "Retrieves secrets" --> vault[OpenBao] -- "Secret storage" --> lv
|
||||
vault -- "Auth method" --> kubeapi
|
||||
|
||||
gitea -- "Receives events" --> woodpecker[Woodpecker CI] -- "Schedules jobs" --> kubeapi
|
||||
|
||||
gitea -- "Stores repositories" --> lv
|
||||
|
||||
gitea --> renovate[Renovate Bot] -- "Updates manifests" --> gitea
|
||||
|
||||
gitea--> renovate[Renovate Bot] -- "Updates manifests" --> gitea
|
||||
|
||||
end
|
||||
```
|
||||
|
||||
### Reconcilation paths of each component
|
||||
|
||||
- Kubernetes manifests are reconciled using Flux triggerred by Woodpecker CI on push
|
||||
- RouterOS configs are applied by Ansible <!-- ran by Gitea Action on push -->
|
||||
- Talos configs are applied using makefile <!-- switch to ansible and trigger on action push -->
|
||||
- Vault policies are applied by running `synchronize-vault.py` <!-- triggerred by Gitea action on push -->
|
||||
<!-- - Docker images are built and pushed to registry by Gitea Actions on push -->
|
||||
|
||||
<!-- TODO: Backups, monitoring, logging, deployment with ansible etc -->
|
||||
|
||||
## Software
|
||||
@@ -228,6 +237,7 @@ flowchart TD
|
||||
|------|------|-------------|
|
||||
| <img src="docs/assets/devenv.svg" alt="devenv" height="50" width="50"> | devenv | Tool for declarative managment of development environment using Nix |
|
||||
| <img src="docs/assets/renovate.svg" alt="Renovate" height="50" width="50"> | Renovate | Bot for keeping dependencies up to date |
|
||||
| <img src="docs/assets/woodpecker.svg" alt="Woodpecker" height="50" width="50"> | Woodpecker CI | Continous Integration system |
|
||||
|
||||
### AI infrastructure
|
||||
|
||||
|
||||
@@ -1,49 +0,0 @@
|
||||
# garm
|
||||
|
||||
This app deploys `garm` with external `garm-provider-k8s`.
|
||||
|
||||
- API/UI ingress: `https://garm.lumpiasty.xyz`
|
||||
- Internal service DNS: `http://garm.garm.svc.cluster.local:9997`
|
||||
|
||||
## Vault secret requirements
|
||||
|
||||
`VaultStaticSecret` reads `secret/data/garm` and expects at least:
|
||||
|
||||
- `jwt_auth_secret`
|
||||
- `database_passphrase` (must be 32 characters)
|
||||
|
||||
## Connect garm to Gitea
|
||||
|
||||
After Flux reconciles this app, initialize garm and add Gitea endpoint/credentials.
|
||||
|
||||
```bash
|
||||
# 1) Initialize garm (from your local devenv shell)
|
||||
garm-cli init \
|
||||
--name homelab \
|
||||
--url https://garm.lumpiasty.xyz \
|
||||
--username admin \
|
||||
--email admin@lumpiasty.xyz \
|
||||
--password '<STRONG_ADMIN_PASSWORD>' \
|
||||
--metadata-url http://garm.garm.svc.cluster.local:9997/api/v1/metadata \
|
||||
--callback-url http://garm.garm.svc.cluster.local:9997/api/v1/callbacks \
|
||||
--webhook-url http://garm.garm.svc.cluster.local:9997/webhooks
|
||||
|
||||
# 2) Add Gitea endpoint
|
||||
garm-cli gitea endpoint create \
|
||||
--name local-gitea \
|
||||
--description 'Cluster Gitea' \
|
||||
--base-url http://gitea-http.gitea.svc.cluster.local:80 \
|
||||
--api-base-url http://gitea-http.gitea.svc.cluster.local:80/api/v1
|
||||
|
||||
# 3) Add Gitea PAT credentials
|
||||
garm-cli gitea credentials add \
|
||||
--name gitea-pat \
|
||||
--description 'PAT for garm' \
|
||||
--endpoint local-gitea \
|
||||
--auth-type pat \
|
||||
--pat-oauth-token '<GITEA_PAT_WITH_write:repository,write:organization>'
|
||||
```
|
||||
|
||||
Then add repositories/orgs and create pools against provider `kubernetes_external`.
|
||||
|
||||
If Gitea refuses webhook installation to cluster-local URLs, set `gitea.config.webhook.ALLOWED_HOST_LIST` in `apps/gitea/release.yaml`.
|
||||
@@ -1,19 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: garm-provider-k8s-config
|
||||
namespace: garm
|
||||
data:
|
||||
provider-config.yaml: |
|
||||
kubeConfigPath: ""
|
||||
runnerNamespace: "garm-runners"
|
||||
podTemplate:
|
||||
spec:
|
||||
restartPolicy: Never
|
||||
flavors:
|
||||
default:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 512Mi
|
||||
limits:
|
||||
memory: 2Gi
|
||||
@@ -1,106 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: garm
|
||||
namespace: garm
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: garm
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: garm
|
||||
spec:
|
||||
serviceAccountName: garm
|
||||
initContainers:
|
||||
- name: render-garm-config
|
||||
image: alpine:3.23
|
||||
env:
|
||||
- name: JWT_AUTH_SECRET
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: garm-config
|
||||
key: jwt_auth_secret
|
||||
- name: DATABASE_PASSPHRASE
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: garm-config
|
||||
key: database_passphrase
|
||||
command:
|
||||
- /bin/sh
|
||||
- -ec
|
||||
- |
|
||||
cat <<EOF > /etc/garm/config.toml
|
||||
[default]
|
||||
enable_webhook_management = true
|
||||
|
||||
[logging]
|
||||
enable_log_streamer = true
|
||||
log_format = "text"
|
||||
log_level = "info"
|
||||
log_source = false
|
||||
|
||||
[metrics]
|
||||
enable = true
|
||||
disable_auth = false
|
||||
|
||||
[jwt_auth]
|
||||
secret = "${JWT_AUTH_SECRET}"
|
||||
time_to_live = "8760h"
|
||||
|
||||
[apiserver]
|
||||
bind = "0.0.0.0"
|
||||
port = 9997
|
||||
use_tls = false
|
||||
[apiserver.webui]
|
||||
enable = true
|
||||
|
||||
[database]
|
||||
backend = "sqlite3"
|
||||
passphrase = "${DATABASE_PASSPHRASE}"
|
||||
[database.sqlite3]
|
||||
db_file = "/data/garm.db"
|
||||
busy_timeout_seconds = 5
|
||||
|
||||
[[provider]]
|
||||
name = "kubernetes_external"
|
||||
description = "Kubernetes provider"
|
||||
provider_type = "external"
|
||||
[provider.external]
|
||||
config_file = "/etc/garm/provider-config.yaml"
|
||||
provider_executable = "/opt/garm/providers.d/garm-provider-k8s"
|
||||
environment_variables = ["KUBERNETES_"]
|
||||
EOF
|
||||
volumeMounts:
|
||||
- name: config-dir
|
||||
mountPath: /etc/garm
|
||||
containers:
|
||||
- name: garm
|
||||
image: gitea.lumpiasty.xyz/lumpiasty/garm-k8s:r1380
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- /bin/garm
|
||||
- --config
|
||||
- /etc/garm/config.toml
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 9997
|
||||
volumeMounts:
|
||||
- name: data
|
||||
mountPath: /data
|
||||
- name: config-dir
|
||||
mountPath: /etc/garm
|
||||
- name: provider-config
|
||||
mountPath: /etc/garm/provider-config.yaml
|
||||
subPath: provider-config.yaml
|
||||
volumes:
|
||||
- name: data
|
||||
persistentVolumeClaim:
|
||||
claimName: garm-lvmhdd
|
||||
- name: config-dir
|
||||
emptyDir: {}
|
||||
- name: provider-config
|
||||
configMap:
|
||||
name: garm-provider-k8s-config
|
||||
@@ -1,5 +0,0 @@
|
||||
# renovate: datasource=github-refs depName=cloudbase/garm versioning=git
|
||||
GARM_COMMIT=818a9dddccba5f2843f185e6a846770988f31fc5
|
||||
GARM_COMMIT_NUMBER=1380
|
||||
GARM_IMAGE_REPO=gitea.lumpiasty.xyz/lumpiasty/garm-k8s
|
||||
GARM_IMAGE=gitea.lumpiasty.xyz/lumpiasty/garm-k8s:r1380
|
||||
@@ -1,24 +0,0 @@
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
namespace: garm
|
||||
name: garm
|
||||
annotations:
|
||||
cert-manager.io/cluster-issuer: letsencrypt
|
||||
spec:
|
||||
ingressClassName: nginx-ingress
|
||||
rules:
|
||||
- host: garm.lumpiasty.xyz
|
||||
http:
|
||||
paths:
|
||||
- backend:
|
||||
service:
|
||||
name: garm
|
||||
port:
|
||||
number: 9997
|
||||
path: /
|
||||
pathType: Prefix
|
||||
tls:
|
||||
- hosts:
|
||||
- garm.lumpiasty.xyz
|
||||
secretName: garm-ingress
|
||||
@@ -1,9 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: garm
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: garm-runners
|
||||
@@ -1,51 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: garm
|
||||
namespace: garm
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: garm-provider-k8s
|
||||
namespace: garm-runners
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["pods", "pods/log", "configmaps", "secrets", "events"]
|
||||
verbs: ["create", "delete", "get", "list", "patch", "update", "watch"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: garm-provider-k8s
|
||||
namespace: garm-runners
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: garm
|
||||
namespace: garm
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: garm-provider-k8s
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: garm-namespace-manager
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["namespaces"]
|
||||
verbs: ["get"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: garm-namespace-manager
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: garm
|
||||
namespace: garm
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: garm-namespace-manager
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
@@ -1,32 +0,0 @@
|
||||
---
|
||||
apiVersion: secrets.hashicorp.com/v1beta1
|
||||
kind: VaultAuth
|
||||
metadata:
|
||||
name: garm
|
||||
namespace: garm
|
||||
spec:
|
||||
method: kubernetes
|
||||
mount: kubernetes
|
||||
kubernetes:
|
||||
role: garm
|
||||
serviceAccount: garm
|
||||
---
|
||||
apiVersion: secrets.hashicorp.com/v1beta1
|
||||
kind: VaultStaticSecret
|
||||
metadata:
|
||||
name: garm-config
|
||||
namespace: garm
|
||||
spec:
|
||||
type: kv-v2
|
||||
|
||||
mount: secret
|
||||
path: garm
|
||||
|
||||
destination:
|
||||
create: true
|
||||
name: garm-config
|
||||
type: Opaque
|
||||
transformation:
|
||||
excludeRaw: true
|
||||
|
||||
vaultAuthRef: garm
|
||||
@@ -1,14 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: garm
|
||||
namespace: garm
|
||||
spec:
|
||||
type: ClusterIP
|
||||
selector:
|
||||
app: garm
|
||||
ports:
|
||||
- name: http
|
||||
port: 9997
|
||||
targetPort: 9997
|
||||
protocol: TCP
|
||||
@@ -73,7 +73,7 @@ spec:
|
||||
ISSUE_INDEXER_TYPE: bleve
|
||||
REPO_INDEXER_ENABLED: true
|
||||
webhook:
|
||||
ALLOWED_HOST_LIST: garm.garm.svc.cluster.local
|
||||
ALLOWED_HOST_LIST: woodpecker.lumpiasty.xyz
|
||||
admin:
|
||||
username: GiteaAdmin
|
||||
email: gi@tea.com
|
||||
|
||||
@@ -18,7 +18,7 @@ spec:
|
||||
chart:
|
||||
spec:
|
||||
chart: immich
|
||||
version: 1.2.1
|
||||
version: 1.2.2
|
||||
sourceRef:
|
||||
kind: HelmRepository
|
||||
name: secustor
|
||||
|
||||
@@ -14,4 +14,4 @@ resources:
|
||||
- searxng
|
||||
- ispeak3
|
||||
- openwebui
|
||||
- garm
|
||||
- woodpecker
|
||||
|
||||
@@ -4,25 +4,19 @@ logToStdout: "both" # proxy and upstream
|
||||
|
||||
macros:
|
||||
base_args: "--no-warmup --port ${PORT}"
|
||||
common_args: "--fit-target 1024 --no-warmup --port ${PORT}"
|
||||
gemma3_ctx_128k: "--ctx-size 131072"
|
||||
qwen35_ctx_256k: "--ctx-size 262144"
|
||||
common_args: "--fit-target 1536 --no-warmup --port ${PORT}"
|
||||
ctx_128k: "--ctx-size 131072"
|
||||
ctx_256k: "--ctx-size 262144"
|
||||
gemma_sampling: "--prio 2 --temp 1.0 --repeat-penalty 1.0 --min-p 0.00 --top-k 64 --top-p 0.95"
|
||||
qwen35_sampling: "--temp 0.6 --top-p 0.95 --top-k 20 --min-p 0.00 -ctk q8_0 -ctv q8_0"
|
||||
qwen35_35b_args: "--temp 1.0 --min-p 0.00 --top-p 0.95 --top-k 20 -ctk q8_0 -ctv q8_0"
|
||||
qwen35_35b_heretic_mmproj: "--mmproj-url https://huggingface.co/unsloth/Qwen3.5-35B-A3B-GGUF/resolve/main/mmproj-F16.gguf --mmproj /root/.cache/llama.cpp/unsloth_Qwen3.5-35B-A3B-GGUF_mmproj-F16.gguf"
|
||||
qwen35_4b_heretic_mmproj: "--mmproj-url https://huggingface.co/unsloth/Qwen3.5-4B-GGUF/resolve/main/mmproj-F16.gguf --mmproj /root/.cache/llama.cpp/unsloth_Qwen3.5-4B-GGUF_mmproj-F16.gguf"
|
||||
glm47_flash_args: "--temp 0.7 --top-p 1.0 --min-p 0.01 --repeat-penalty 1.0"
|
||||
gemma4_sampling: "--temp 1.0 --top-p 0.95 --top-k 64"
|
||||
thinking_on: "--chat-template-kwargs '{\"enable_thinking\": true}'"
|
||||
thinking_off: "--chat-template-kwargs '{\"enable_thinking\": false}'"
|
||||
|
||||
peers:
|
||||
openrouter:
|
||||
proxy: https://openrouter.ai/api
|
||||
apiKey: ${env.OPENROUTER_API_KEY}
|
||||
models:
|
||||
- z-ai/glm-5
|
||||
|
||||
hooks:
|
||||
on_startup:
|
||||
preload:
|
||||
@@ -41,7 +35,7 @@ models:
|
||||
cmd: |
|
||||
/app/llama-server
|
||||
-hf unsloth/gemma-3-12b-it-GGUF:Q4_K_M
|
||||
${gemma3_ctx_128k}
|
||||
${ctx_128k}
|
||||
${gemma_sampling}
|
||||
${common_args}
|
||||
|
||||
@@ -49,7 +43,7 @@ models:
|
||||
cmd: |
|
||||
/app/llama-server
|
||||
-hf unsloth/gemma-3-12b-it-GGUF:Q4_K_M
|
||||
${gemma3_ctx_128k}
|
||||
${ctx_128k}
|
||||
${gemma_sampling}
|
||||
--no-mmproj
|
||||
${common_args}
|
||||
@@ -58,7 +52,7 @@ models:
|
||||
cmd: |
|
||||
/app/llama-server
|
||||
-hf unsloth/gemma-3-4b-it-GGUF:Q4_K_M
|
||||
${gemma3_ctx_128k}
|
||||
${ctx_128k}
|
||||
${gemma_sampling}
|
||||
${common_args}
|
||||
|
||||
@@ -66,7 +60,7 @@ models:
|
||||
cmd: |
|
||||
/app/llama-server
|
||||
-hf unsloth/gemma-3-4b-it-GGUF:Q4_K_M
|
||||
${gemma3_ctx_128k}
|
||||
${ctx_128k}
|
||||
${gemma_sampling}
|
||||
--no-mmproj
|
||||
${common_args}
|
||||
@@ -89,7 +83,7 @@ models:
|
||||
cmd: |
|
||||
/app/llama-server
|
||||
-hf unsloth/Qwen3.5-35B-A3B-GGUF:Q4_K_M
|
||||
${qwen35_ctx_256k}
|
||||
${ctx_256k}
|
||||
${qwen35_35b_args}
|
||||
${common_args}
|
||||
|
||||
@@ -97,7 +91,7 @@ models:
|
||||
cmd: |
|
||||
/app/llama-server
|
||||
-hf unsloth/Qwen3.5-35B-A3B-GGUF:Q4_K_M
|
||||
${qwen35_ctx_256k}
|
||||
${ctx_256k}
|
||||
${qwen35_35b_args}
|
||||
${common_args}
|
||||
${thinking_off}
|
||||
@@ -109,7 +103,7 @@ models:
|
||||
/app/llama-server
|
||||
-hf mradermacher/Qwen3.5-35B-A3B-heretic-GGUF:Q4_K_M
|
||||
${qwen35_35b_heretic_mmproj}
|
||||
${qwen35_ctx_256k}
|
||||
${ctx_256k}
|
||||
${qwen35_35b_args}
|
||||
${common_args}
|
||||
|
||||
@@ -118,7 +112,7 @@ models:
|
||||
/app/llama-server
|
||||
-hf mradermacher/Qwen3.5-35B-A3B-heretic-GGUF:Q4_K_M
|
||||
${qwen35_35b_heretic_mmproj}
|
||||
${qwen35_ctx_256k}
|
||||
${ctx_256k}
|
||||
${qwen35_35b_args}
|
||||
${common_args}
|
||||
${thinking_off}
|
||||
@@ -127,7 +121,7 @@ models:
|
||||
cmd: |
|
||||
/app/llama-server
|
||||
-hf unsloth/Qwen3.5-0.8B-GGUF:Q4_K_XL
|
||||
${qwen35_ctx_256k}
|
||||
${ctx_256k}
|
||||
${qwen35_sampling}
|
||||
${base_args}
|
||||
${thinking_on}
|
||||
@@ -145,7 +139,7 @@ models:
|
||||
cmd: |
|
||||
/app/llama-server
|
||||
-hf unsloth/Qwen3.5-2B-GGUF:Q4_K_M
|
||||
${qwen35_ctx_256k}
|
||||
${ctx_256k}
|
||||
${qwen35_sampling}
|
||||
${common_args}
|
||||
${thinking_on}
|
||||
@@ -154,7 +148,7 @@ models:
|
||||
cmd: |
|
||||
/app/llama-server
|
||||
-hf unsloth/Qwen3.5-2B-GGUF:Q4_K_M
|
||||
${qwen35_ctx_256k}
|
||||
${ctx_256k}
|
||||
${qwen35_sampling}
|
||||
${common_args}
|
||||
${thinking_off}
|
||||
@@ -163,7 +157,7 @@ models:
|
||||
cmd: |
|
||||
/app/llama-server
|
||||
-hf unsloth/Qwen3.5-4B-GGUF:Q4_K_M
|
||||
${qwen35_ctx_256k}
|
||||
${ctx_128k}
|
||||
${qwen35_sampling}
|
||||
${common_args}
|
||||
${thinking_on}
|
||||
@@ -172,7 +166,7 @@ models:
|
||||
cmd: |
|
||||
/app/llama-server
|
||||
-hf unsloth/Qwen3.5-4B-GGUF:Q4_K_M
|
||||
${qwen35_ctx_256k}
|
||||
${ctx_128k}
|
||||
${qwen35_sampling}
|
||||
${common_args}
|
||||
${thinking_off}
|
||||
@@ -182,7 +176,7 @@ models:
|
||||
/app/llama-server
|
||||
-hf mradermacher/Qwen3.5-4B-heretic-GGUF:Q4_K_M
|
||||
${qwen35_4b_heretic_mmproj}
|
||||
${qwen35_ctx_256k}
|
||||
${ctx_128k}
|
||||
${qwen35_sampling}
|
||||
${common_args}
|
||||
${thinking_on}
|
||||
@@ -192,7 +186,7 @@ models:
|
||||
/app/llama-server
|
||||
-hf mradermacher/Qwen3.5-4B-heretic-GGUF:Q4_K_M
|
||||
${qwen35_4b_heretic_mmproj}
|
||||
${qwen35_ctx_256k}
|
||||
${ctx_128k}
|
||||
${qwen35_sampling}
|
||||
${common_args}
|
||||
${thinking_off}
|
||||
@@ -201,7 +195,7 @@ models:
|
||||
cmd: |
|
||||
/app/llama-server
|
||||
-hf unsloth/Qwen3.5-9B-GGUF:Q4_K_M
|
||||
${qwen35_ctx_256k}
|
||||
${ctx_256k}
|
||||
${qwen35_sampling}
|
||||
${common_args}
|
||||
${thinking_on}
|
||||
@@ -210,7 +204,7 @@ models:
|
||||
cmd: |
|
||||
/app/llama-server
|
||||
-hf unsloth/Qwen3.5-9B-GGUF:Q4_K_M
|
||||
${qwen35_ctx_256k}
|
||||
${ctx_256k}
|
||||
${qwen35_sampling}
|
||||
${common_args}
|
||||
${thinking_off}
|
||||
@@ -219,7 +213,7 @@ models:
|
||||
cmd: |
|
||||
/app/llama-server
|
||||
-hf unsloth/Qwen3.5-9B-GGUF:Q3_K_M
|
||||
${qwen35_ctx_256k}
|
||||
${ctx_256k}
|
||||
${qwen35_sampling}
|
||||
${common_args}
|
||||
${thinking_on}
|
||||
@@ -228,7 +222,7 @@ models:
|
||||
cmd: |
|
||||
/app/llama-server
|
||||
-hf unsloth/Qwen3.5-9B-GGUF:Q3_K_M
|
||||
${qwen35_ctx_256k}
|
||||
${ctx_256k}
|
||||
${qwen35_sampling}
|
||||
${common_args}
|
||||
${thinking_off}
|
||||
@@ -237,7 +231,7 @@ models:
|
||||
cmd: |
|
||||
/app/llama-server
|
||||
-hf unsloth/Qwen3.5-27B-GGUF:Q3_K_M
|
||||
${qwen35_ctx_256k}
|
||||
${ctx_256k}
|
||||
${qwen35_sampling}
|
||||
${common_args}
|
||||
${thinking_on}
|
||||
@@ -246,7 +240,7 @@ models:
|
||||
cmd: |
|
||||
/app/llama-server
|
||||
-hf unsloth/Qwen3.5-27B-GGUF:Q3_K_M
|
||||
${qwen35_ctx_256k}
|
||||
${ctx_256k}
|
||||
${qwen35_sampling}
|
||||
${common_args}
|
||||
${thinking_off}
|
||||
@@ -257,3 +251,35 @@ models:
|
||||
-hf unsloth/GLM-4.7-Flash-GGUF:Q4_K_M
|
||||
${glm47_flash_args}
|
||||
${common_args}
|
||||
|
||||
"gemma-4-26B-A4B-it:UD-Q4_K_XL":
|
||||
cmd: |
|
||||
/app/llama-server
|
||||
-hf unsloth/gemma-4-26B-A4B-it-GGUF:UD-Q4_K_XL \
|
||||
${ctx_256k}
|
||||
${gemma4_sampling}
|
||||
${common_args}
|
||||
|
||||
"gemma-4-26B-A4B-it:UD-Q2_K_XL":
|
||||
cmd: |
|
||||
/app/llama-server
|
||||
-hf unsloth/gemma-4-26B-A4B-it-GGUF:UD-Q2_K_XL \
|
||||
${ctx_256k}
|
||||
${gemma4_sampling}
|
||||
${common_args}
|
||||
|
||||
"unsloth/gemma-4-E4B-it-GGUF:UD-Q4_K_XL":
|
||||
cmd: |
|
||||
/app/llama-server
|
||||
-hf unsloth/gemma-4-E4B-it-GGUF:UD-Q4_K_XL \
|
||||
${ctx_128k}
|
||||
${gemma4_sampling}
|
||||
${common_args}
|
||||
|
||||
"unsloth/gemma-4-E2B-it-GGUF:UD-Q4_K_XL":
|
||||
cmd: |
|
||||
/app/llama-server
|
||||
-hf unsloth/gemma-4-E2B-it-GGUF:UD-Q4_K_XL \
|
||||
${ctx_128k}
|
||||
${gemma4_sampling}
|
||||
${common_args}
|
||||
|
||||
@@ -18,7 +18,7 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- name: llama-swap
|
||||
image: ghcr.io/mostlygeek/llama-swap:v199-vulkan-b8547
|
||||
image: ghcr.io/mostlygeek/llama-swap:v199-vulkan-b8643
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- /app/llama-swap
|
||||
@@ -29,12 +29,6 @@ spec:
|
||||
- containerPort: 8080
|
||||
name: http
|
||||
protocol: TCP
|
||||
env:
|
||||
- name: OPENROUTER_API_KEY
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: llama-openrouter
|
||||
key: OPENROUTER_API_KEY
|
||||
volumeMounts:
|
||||
- name: models
|
||||
mountPath: /root/.cache
|
||||
|
||||
@@ -7,7 +7,7 @@ metadata:
|
||||
name: llama-models-lvmssd
|
||||
namespace: openebs
|
||||
spec:
|
||||
capacity: 200Gi
|
||||
capacity: "322122547200"
|
||||
ownerNodeID: anapistula-delrosalae
|
||||
shared: "yes"
|
||||
thinProvision: "no"
|
||||
@@ -20,7 +20,7 @@ metadata:
|
||||
name: llama-models-lvmssd
|
||||
spec:
|
||||
capacity:
|
||||
storage: 200Gi
|
||||
storage: 300Gi
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
persistentVolumeReclaimPolicy: Retain
|
||||
@@ -41,6 +41,6 @@ spec:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 200Gi
|
||||
storage: 300Gi
|
||||
storageClassName: ssd-lvmpv
|
||||
volumeName: llama-models-lvmssd
|
||||
@@ -36,26 +36,3 @@ spec:
|
||||
excludeRaw: true
|
||||
|
||||
vaultAuthRef: llama
|
||||
---
|
||||
apiVersion: secrets.hashicorp.com/v1beta1
|
||||
kind: VaultStaticSecret
|
||||
metadata:
|
||||
name: llama-openrouter
|
||||
namespace: llama
|
||||
spec:
|
||||
type: kv-v2
|
||||
|
||||
mount: secret
|
||||
path: openrouter
|
||||
|
||||
destination:
|
||||
create: true
|
||||
name: llama-openrouter
|
||||
type: Opaque
|
||||
transformation:
|
||||
excludeRaw: true
|
||||
templates:
|
||||
OPENROUTER_API_KEY:
|
||||
text: '{{ get .Secrets "API_KEY" }}'
|
||||
|
||||
vaultAuthRef: llama
|
||||
|
||||
@@ -9,4 +9,3 @@ data:
|
||||
RENOVATE_ENDPOINT: https://gitea.lumpiasty.xyz/api/v1
|
||||
RENOVATE_PLATFORM: gitea
|
||||
RENOVATE_GIT_AUTHOR: Renovate Bot <renovate@lumpiasty.xyz>
|
||||
RENOVATE_ALLOWED_COMMANDS: '["^node utils/update-garm-cli-hash\\.mjs$", "^node utils/update-garm-image-pin\\.mjs$"]'
|
||||
|
||||
@@ -15,7 +15,7 @@ spec:
|
||||
- name: renovate
|
||||
# Update this to the latest available and then enable Renovate on
|
||||
# the manifest
|
||||
image: renovate/renovate:43.96.0-full
|
||||
image: renovate/renovate:43.95.0-full
|
||||
envFrom:
|
||||
- secretRef:
|
||||
name: renovate-gitea-token
|
||||
|
||||
@@ -2,10 +2,7 @@ apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
resources:
|
||||
- namespace.yaml
|
||||
- pvc.yaml
|
||||
- configmap.yaml
|
||||
- service.yaml
|
||||
- ingress.yaml
|
||||
- rbac.yaml
|
||||
- postgres-volume.yaml
|
||||
- postgres-cluster.yaml
|
||||
- release.yaml
|
||||
- secret.yaml
|
||||
- deployment.yaml
|
||||
5
apps/woodpecker/namespace.yaml
Normal file
5
apps/woodpecker/namespace.yaml
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: woodpecker
|
||||
23
apps/woodpecker/postgres-cluster.yaml
Normal file
23
apps/woodpecker/postgres-cluster.yaml
Normal file
@@ -0,0 +1,23 @@
|
||||
---
|
||||
apiVersion: postgresql.cnpg.io/v1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: woodpecker-postgresql-cluster
|
||||
namespace: woodpecker
|
||||
spec:
|
||||
instances: 1
|
||||
|
||||
imageName: ghcr.io/cloudnative-pg/postgresql:17.4
|
||||
|
||||
bootstrap:
|
||||
initdb:
|
||||
database: woodpecker
|
||||
owner: woodpecker
|
||||
|
||||
storage:
|
||||
pvcTemplate:
|
||||
storageClassName: ssd-lvmpv
|
||||
resources:
|
||||
requests:
|
||||
storage: 10Gi
|
||||
volumeName: woodpecker-postgresql-cluster-lvmssd
|
||||
@@ -1,46 +1,33 @@
|
||||
---
|
||||
apiVersion: local.openebs.io/v1alpha1
|
||||
kind: LVMVolume
|
||||
metadata:
|
||||
labels:
|
||||
kubernetes.io/nodename: anapistula-delrosalae
|
||||
name: garm-lvmhdd
|
||||
name: woodpecker-postgresql-cluster-lvmssd
|
||||
namespace: openebs
|
||||
spec:
|
||||
capacity: 5Gi
|
||||
capacity: 10Gi
|
||||
ownerNodeID: anapistula-delrosalae
|
||||
shared: "yes"
|
||||
thinProvision: "no"
|
||||
vgPattern: ^openebs-hdd$
|
||||
volGroup: openebs-hdd
|
||||
vgPattern: ^openebs-ssd$
|
||||
volGroup: openebs-ssd
|
||||
---
|
||||
kind: PersistentVolume
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: garm-lvmhdd
|
||||
name: woodpecker-postgresql-cluster-lvmssd
|
||||
spec:
|
||||
capacity:
|
||||
storage: 5Gi
|
||||
storage: 10Gi
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
persistentVolumeReclaimPolicy: Retain
|
||||
storageClassName: hdd-lvmpv
|
||||
storageClassName: ssd-lvmpv
|
||||
volumeMode: Filesystem
|
||||
csi:
|
||||
driver: local.csi.openebs.io
|
||||
fsType: btrfs
|
||||
volumeHandle: garm-lvmhdd
|
||||
volumeHandle: woodpecker-postgresql-cluster-lvmssd
|
||||
---
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: garm-lvmhdd
|
||||
namespace: garm
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 5Gi
|
||||
storageClassName: hdd-lvmpv
|
||||
volumeName: garm-lvmhdd
|
||||
# PVC is dynamically created by the Postgres operator
|
||||
115
apps/woodpecker/release.yaml
Normal file
115
apps/woodpecker/release.yaml
Normal file
@@ -0,0 +1,115 @@
|
||||
---
|
||||
apiVersion: source.toolkit.fluxcd.io/v1
|
||||
kind: HelmRepository
|
||||
metadata:
|
||||
name: woodpecker
|
||||
namespace: woodpecker
|
||||
spec:
|
||||
interval: 24h
|
||||
url: https://woodpecker-ci.org/
|
||||
---
|
||||
apiVersion: helm.toolkit.fluxcd.io/v2
|
||||
kind: HelmRelease
|
||||
metadata:
|
||||
name: woodpecker
|
||||
namespace: woodpecker
|
||||
spec:
|
||||
interval: 30m
|
||||
chart:
|
||||
spec:
|
||||
chart: woodpecker
|
||||
version: 3.5.1
|
||||
sourceRef:
|
||||
kind: HelmRepository
|
||||
name: woodpecker
|
||||
namespace: woodpecker
|
||||
interval: 12h
|
||||
values:
|
||||
server:
|
||||
enabled: true
|
||||
statefulSet:
|
||||
replicaCount: 1
|
||||
|
||||
persistentVolume:
|
||||
enabled: false # Using Postgresql database
|
||||
|
||||
env:
|
||||
WOODPECKER_HOST: "https://woodpecker.lumpiasty.xyz"
|
||||
# Gitea integration
|
||||
WOODPECKER_GITEA: "true"
|
||||
WOODPECKER_GITEA_URL: "https://gitea.lumpiasty.xyz"
|
||||
# PostgreSQL database configuration
|
||||
WOODPECKER_DATABASE_DRIVER: postgres
|
||||
# Password is loaded from woodpecker-postgresql-cluster-app secret (created by CNPG)
|
||||
WOODPECKER_DATABASE_DATASOURCE:
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: woodpecker-postgresql-cluster-app
|
||||
key: fqdn-uri
|
||||
# Allow logging in from all accounts on Gitea
|
||||
WOODPECKER_OPEN: "true"
|
||||
# Make lumpiasty admin
|
||||
WOODPECKER_ADMIN: GiteaAdmin
|
||||
|
||||
createAgentSecret: true
|
||||
|
||||
extraSecretNamesForEnvFrom:
|
||||
- woodpecker-secrets
|
||||
|
||||
ingress:
|
||||
enabled: true
|
||||
ingressClassName: nginx-ingress
|
||||
annotations:
|
||||
cert-manager.io/cluster-issuer: letsencrypt
|
||||
acme.cert-manager.io/http01-edit-in-place: "true"
|
||||
hosts:
|
||||
- host: woodpecker.lumpiasty.xyz
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: woodpecker-server
|
||||
servicePort: 80
|
||||
tls:
|
||||
- hosts:
|
||||
- woodpecker.lumpiasty.xyz
|
||||
secretName: woodpecker-ingress
|
||||
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 256Mi
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
port: 80
|
||||
|
||||
agent:
|
||||
enabled: true
|
||||
replicaCount: 2
|
||||
|
||||
env:
|
||||
WOODPECKER_SERVER: "woodpecker-server:9000"
|
||||
WOODPECKER_BACKEND: kubernetes
|
||||
WOODPECKER_BACKEND_K8S_NAMESPACE: woodpecker
|
||||
WOODPECKER_BACKEND_K8S_STORAGE_CLASS: ssd-lvmpv
|
||||
WOODPECKER_BACKEND_K8S_VOLUME_SIZE: 10G
|
||||
WOODPECKER_BACKEND_K8S_STORAGE_RWX: false
|
||||
WOODPECKER_CONNECT_RETRY_COUNT: "5"
|
||||
|
||||
mapAgentSecret: true
|
||||
|
||||
extraSecretNamesForEnvFrom:
|
||||
- woodpecker-secrets
|
||||
|
||||
persistence:
|
||||
enabled: false
|
||||
|
||||
serviceAccount:
|
||||
create: true
|
||||
rbac:
|
||||
create: true
|
||||
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 128Mi
|
||||
62
apps/woodpecker/secret.yaml
Normal file
62
apps/woodpecker/secret.yaml
Normal file
@@ -0,0 +1,62 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: woodpecker-secret
|
||||
namespace: woodpecker
|
||||
---
|
||||
apiVersion: secrets.hashicorp.com/v1beta1
|
||||
kind: VaultAuth
|
||||
metadata:
|
||||
name: woodpecker
|
||||
namespace: woodpecker
|
||||
spec:
|
||||
method: kubernetes
|
||||
mount: kubernetes
|
||||
kubernetes:
|
||||
role: woodpecker
|
||||
serviceAccount: woodpecker-secret
|
||||
---
|
||||
# Main woodpecker secrets from Vault
|
||||
# Requires vault kv put secret/woodpecker \
|
||||
# WOODPECKER_AGENT_SECRET="$(openssl rand -hex 32)" \
|
||||
# WOODPECKER_GITEA_CLIENT="<gitea-oauth-client>" \
|
||||
# WOODPECKER_GITEA_SECRET="<gitea-oauth-secret>"
|
||||
# Note: Database password comes from CNPG secret (woodpecker-postgresql-cluster-app)
|
||||
apiVersion: secrets.hashicorp.com/v1beta1
|
||||
kind: VaultStaticSecret
|
||||
metadata:
|
||||
name: woodpecker-secrets
|
||||
namespace: woodpecker
|
||||
spec:
|
||||
type: kv-v2
|
||||
mount: secret
|
||||
path: woodpecker
|
||||
destination:
|
||||
create: true
|
||||
name: woodpecker-secrets
|
||||
type: Opaque
|
||||
transformation:
|
||||
excludeRaw: true
|
||||
vaultAuthRef: woodpecker
|
||||
---
|
||||
# Container registry credentials for Kaniko
|
||||
# Requires vault kv put secret/container-registry \
|
||||
# REGISTRY_USERNAME="<username>" \
|
||||
# REGISTRY_PASSWORD="<token>"
|
||||
apiVersion: secrets.hashicorp.com/v1beta1
|
||||
kind: VaultStaticSecret
|
||||
metadata:
|
||||
name: container-registry
|
||||
namespace: woodpecker
|
||||
spec:
|
||||
type: kv-v2
|
||||
mount: secret
|
||||
path: container-registry
|
||||
destination:
|
||||
create: true
|
||||
name: container-registry
|
||||
type: Opaque
|
||||
transformation:
|
||||
excludeRaw: true
|
||||
vaultAuthRef: woodpecker
|
||||
18
devenv.lock
18
devenv.lock
@@ -3,11 +3,11 @@
|
||||
"devenv": {
|
||||
"locked": {
|
||||
"dir": "src/modules",
|
||||
"lastModified": 1773504385,
|
||||
"narHash": "sha256-ANaeR+xVHxjGz36VI4qlZUbdhrlSE0xU7O7AUJKw3zU=",
|
||||
"lastModified": 1775201809,
|
||||
"narHash": "sha256-WmpoCegCQ6Q2ZyxqO05zlz/7XXjt/l2iut4Nk5Nt+W4=",
|
||||
"owner": "cachix",
|
||||
"repo": "devenv",
|
||||
"rev": "4bce49e6f60c69e99eeb643efbbf74125cefd329",
|
||||
"rev": "42a5505d4700e791732e48a38b4cca05a755f94b",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -45,11 +45,11 @@
|
||||
"treefmt-nix": "treefmt-nix"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1773451905,
|
||||
"narHash": "sha256-S/bukFEwbOYQbnR5UpciwYA42aEt1w5LK73GwARhsaA=",
|
||||
"lastModified": 1775175041,
|
||||
"narHash": "sha256-lYCPSMIV26VazREzl/TIpbWhBXJ+vJ0EJ+308TrX/6w=",
|
||||
"owner": "a1994sc",
|
||||
"repo": "krew2nix",
|
||||
"rev": "bc779a8cf59ebf76ae60556bfe2d781a0a4cdbd9",
|
||||
"rev": "15c594042f1ba80ce97ab190a9c684a44c613590",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -60,11 +60,11 @@
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1773389992,
|
||||
"narHash": "sha256-wvfdLLWJ2I9oEpDd9PfMA8osfIZicoQ5MT1jIwNs9Tk=",
|
||||
"lastModified": 1775036866,
|
||||
"narHash": "sha256-ZojAnPuCdy657PbTq5V0Y+AHKhZAIwSIT2cb8UgAz/U=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "c06b4ae3d6599a672a6210b7021d699c351eebda",
|
||||
"rev": "6201e203d09599479a3b3450ed24fa81537ebc4e",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
||||
15
devenv.nix
15
devenv.nix
@@ -6,8 +6,6 @@ let
|
||||
hvac
|
||||
librouteros
|
||||
]);
|
||||
|
||||
garm-cli = pkgs.callPackage ./nix/garm-cli.nix { };
|
||||
in
|
||||
{
|
||||
# Overlays - apply krew2nix to get kubectl with krew support
|
||||
@@ -43,9 +41,18 @@ in
|
||||
openbao
|
||||
pv-migrate
|
||||
mermaid-cli
|
||||
opencode
|
||||
garm-cli
|
||||
(
|
||||
# Wrapping opencode to set the OPENCODE_ENABLE_EXA environment variable
|
||||
runCommand "opencode" {
|
||||
buildInputs = [ makeWrapper ];
|
||||
} ''
|
||||
mkdir -p $out/bin
|
||||
makeWrapper ${pkgs.opencode}/bin/opencode $out/bin/opencode \
|
||||
--set OPENCODE_ENABLE_EXA "1"
|
||||
''
|
||||
)
|
||||
tea
|
||||
woodpecker-cli
|
||||
];
|
||||
|
||||
# Scripts
|
||||
|
||||
@@ -1,28 +0,0 @@
|
||||
FROM golang:1.26-alpine AS build
|
||||
|
||||
ARG GARM_COMMIT
|
||||
ARG GARM_PROVIDER_K8S_VERSION=0.3.2
|
||||
|
||||
RUN apk add --no-cache ca-certificates git wget tar build-base util-linux-dev linux-headers
|
||||
|
||||
WORKDIR /src
|
||||
RUN git clone https://github.com/cloudbase/garm.git . && git checkout "${GARM_COMMIT}"
|
||||
|
||||
RUN CGO_ENABLED=1 GOOS=linux GOARCH=amd64 \
|
||||
go build -trimpath \
|
||||
-tags osusergo,netgo,sqlite_omit_load_extension \
|
||||
-ldflags="-linkmode external -extldflags '-static' -s -w" \
|
||||
-o /out/garm ./cmd/garm
|
||||
|
||||
RUN mkdir -p /out/providers.d \
|
||||
&& wget -qO /tmp/garm-provider-k8s.tar.gz "https://github.com/mercedes-benz/garm-provider-k8s/releases/download/v${GARM_PROVIDER_K8S_VERSION}/garm-provider-k8s_Linux_x86_64.tar.gz" \
|
||||
&& tar -xzf /tmp/garm-provider-k8s.tar.gz -C /out/providers.d \
|
||||
&& chmod 0755 /out/providers.d/garm-provider-k8s
|
||||
|
||||
FROM busybox
|
||||
|
||||
COPY --from=build /out/garm /bin/garm
|
||||
COPY --from=build /out/providers.d/garm-provider-k8s /opt/garm/providers.d/garm-provider-k8s
|
||||
COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
|
||||
|
||||
ENTRYPOINT ["/bin/garm"]
|
||||
10
docs/assets/woodpecker.svg
Normal file
10
docs/assets/woodpecker.svg
Normal file
@@ -0,0 +1,10 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="284.538" height="253.96">
|
||||
<style>
|
||||
@media (prefers-color-scheme: dark) {
|
||||
path {
|
||||
fill: white;
|
||||
}
|
||||
}
|
||||
</style>
|
||||
<path d="M162.51 33.188c-26.77.411-54.004 6.885-71.494 3.745-1.313-.232-2.124 1.338-1.171 2.265 14.749 14.003 20.335 28.16 36.718 30.065l.476.103c-7.567 7.799-14.028 18.018-18.571 31.171-4.89 14.106-6.268 29.421-7.89 47.105-2.445 26.332-5.173 56.152-20.038 93.54a246.489 246.489 0 0 0-13.27 45.946h22.652a221.202 221.202 0 0 1 11.249-37.786c16.049-40.374 19.073-73.257 21.505-99.693 1.493-16.255 2.806-30.309 6.796-41.853 11.647-33.527 39.408-40.889 61.056-36.693 21.004 4.067 41.673 20.502 40.592 44.016-.772 15.985-7.76 23.166-12.87 28.43-2.793 2.883-5.47 5.611-6.731 9.498-3.037 9.19.101 19.434 8.494 27.568 22.24 20.734 34.338 59.717 33.681 106.513h22.176c.592-52.935-13.951-97.839-40.503-122.626-2.097-2.021-2.69-3.604-3.191-3.347 1.222-1.544 3.217-3.346 4.633-4.813 29.382-21.79 77.813-1.892 107.054 9.653 7.58 2.985 11.274-4.338 4.067-8.623-25.097-14.84-76.54-54.016-105.368-79.718-4.029-3.54-6.796-7.8-11.455-11.738-15.547-27.439-41.84-33.127-68.597-32.728Zm35.238 60.27a15.161 15.161 0 0 0-2.008.232 15.161 15.161 0 0 0-1.506 29.434 15.154 15.154 0 0 0 9.473-28.79 15.161 15.161 0 0 0-5.959-.876zm-44.286 147.17a2.033 2.033 0 0 0-1.133.374c-1.08.772-1.93 3.05-.772 5.701 5.38 12.394 9.1 25.445 12.536 40.413h22.484c-5.676-16.629-16.307-34.055-27.851-43.978-2.008-1.737-3.913-2.574-5.251-2.51z" style="stroke-width:12.8704" transform="translate(-67.27 -33.169)"/>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 1.5 KiB |
32
infra/configs/openbao-k8s-se-role.yaml
Normal file
32
infra/configs/openbao-k8s-se-role.yaml
Normal file
@@ -0,0 +1,32 @@
|
||||
# Roles with needed access for OpenBao's Kubernetes secret engine
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: k8s-full-secrets-abilities
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["namespaces"]
|
||||
verbs: ["get"]
|
||||
- apiGroups: [""]
|
||||
resources: ["serviceaccounts", "serviceaccounts/token"]
|
||||
verbs: ["create", "update", "delete"]
|
||||
- apiGroups: ["rbac.authorization.k8s.io"]
|
||||
resources: ["rolebindings", "clusterrolebindings"]
|
||||
verbs: ["create", "update", "delete"]
|
||||
- apiGroups: ["rbac.authorization.k8s.io"]
|
||||
resources: ["roles", "clusterroles"]
|
||||
verbs: ["bind", "escalate", "create", "update", "delete"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: openbao-token-creator-binding
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: k8s-full-secrets-abilities
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: openbao
|
||||
namespace: openbao
|
||||
@@ -18,7 +18,7 @@ spec:
|
||||
chart:
|
||||
spec:
|
||||
chart: cert-manager-webhook-ovh
|
||||
version: 0.9.5
|
||||
version: 0.9.6
|
||||
sourceRef:
|
||||
kind: HelmRepository
|
||||
name: cert-manager-webhook-ovh
|
||||
|
||||
@@ -23,7 +23,7 @@ spec:
|
||||
chart:
|
||||
spec:
|
||||
chart: cert-manager
|
||||
version: v1.20.0
|
||||
version: v1.20.1
|
||||
sourceRef:
|
||||
kind: HelmRepository
|
||||
name: cert-manager
|
||||
|
||||
@@ -25,3 +25,4 @@ resources:
|
||||
|
||||
- configs/openbao-volume.yaml
|
||||
- controllers/openbao.yaml
|
||||
- configs/openbao-k8s-se-role.yaml
|
||||
|
||||
@@ -1,45 +0,0 @@
|
||||
{ lib, buildGoModule, fetchFromGitHub, installShellFiles }:
|
||||
|
||||
buildGoModule rec {
|
||||
pname = "garm-cli";
|
||||
version = "r1380";
|
||||
garmCommit = "818a9dddccba5f2843f185e6a846770988f31fc5";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "cloudbase";
|
||||
repo = "garm";
|
||||
rev = garmCommit;
|
||||
hash = "sha256-CTqqabNYUMSrmnQVCWml1/vkDw+OP1uJo1KFhBSZpYY=";
|
||||
};
|
||||
|
||||
subPackages = [ "cmd/garm-cli" ];
|
||||
|
||||
nativeBuildInputs = [ installShellFiles ];
|
||||
|
||||
vendorHash = null;
|
||||
|
||||
ldflags = [
|
||||
"-s"
|
||||
"-w"
|
||||
"-X main.version=${version}"
|
||||
];
|
||||
|
||||
postInstall = ''
|
||||
# We need to set a temporary HOME for the completion scripts as workaround
|
||||
# because garm-cli tries to write config to the home directory
|
||||
# when generating the completion scripts
|
||||
export HOME="$(mktemp -d)"
|
||||
|
||||
installShellCompletion --cmd garm-cli \
|
||||
--bash <($out/bin/garm-cli completion bash) \
|
||||
--fish <($out/bin/garm-cli completion fish) \
|
||||
--zsh <($out/bin/garm-cli completion zsh)
|
||||
'';
|
||||
|
||||
meta = {
|
||||
description = "CLI for GitHub Actions Runner Manager";
|
||||
homepage = "https://github.com/cloudbase/garm";
|
||||
license = lib.licenses.asl20;
|
||||
mainProgram = "garm-cli";
|
||||
};
|
||||
}
|
||||
@@ -10,57 +10,8 @@
|
||||
"gotk-components\\.ya?ml$"
|
||||
]
|
||||
},
|
||||
"customManagers": [
|
||||
{
|
||||
"customType": "regex",
|
||||
"description": "Track garm-cli pinned main commit",
|
||||
"managerFilePatterns": ["^nix/garm-cli\\.nix$"],
|
||||
"matchStrings": ["garmCommit = \\\"(?<currentValue>[a-f0-9]{40})\\\";"],
|
||||
"depNameTemplate": "cloudbase/garm",
|
||||
"datasourceTemplate": "github-refs",
|
||||
"versioningTemplate": "git"
|
||||
},
|
||||
{
|
||||
"customType": "regex",
|
||||
"description": "Track garm-provider-k8s release in garm image Dockerfile",
|
||||
"managerFilePatterns": ["^docker/garm/Dockerfile$"],
|
||||
"matchStrings": ["ARG GARM_PROVIDER_K8S_VERSION=(?<currentValue>[0-9]+\\.[0-9]+\\.[0-9]+)"],
|
||||
"depNameTemplate": "mercedes-benz/garm-provider-k8s",
|
||||
"datasourceTemplate": "github-releases",
|
||||
"versioningTemplate": "semver"
|
||||
},
|
||||
{
|
||||
"customType": "regex",
|
||||
"description": "Track pinned garm main commit",
|
||||
"managerFilePatterns": ["^apps/garm/image-source\\.env$"],
|
||||
"matchStrings": ["GARM_COMMIT=(?<currentValue>[a-f0-9]{40})"],
|
||||
"depNameTemplate": "cloudbase/garm",
|
||||
"datasourceTemplate": "github-refs",
|
||||
"versioningTemplate": "git"
|
||||
}
|
||||
],
|
||||
"prHourlyLimit": 9,
|
||||
"packageRules": [
|
||||
{
|
||||
"matchManagers": ["custom.regex"],
|
||||
"matchDepNames": ["cloudbase/garm"],
|
||||
"matchFileNames": ["nix/garm-cli.nix"],
|
||||
"postUpgradeTasks": {
|
||||
"commands": ["node utils/update-garm-cli-hash.mjs"],
|
||||
"fileFilters": ["nix/garm-cli.nix"],
|
||||
"executionMode": "update"
|
||||
}
|
||||
},
|
||||
{
|
||||
"matchManagers": ["custom.regex"],
|
||||
"matchDepNames": ["cloudbase/garm"],
|
||||
"matchFileNames": ["apps/garm/image-source.env"],
|
||||
"postUpgradeTasks": {
|
||||
"commands": ["node utils/update-garm-image-pin.mjs"],
|
||||
"fileFilters": ["apps/garm/image-source.env", "apps/garm/deployment.yaml"],
|
||||
"executionMode": "update"
|
||||
}
|
||||
},
|
||||
{
|
||||
"matchDatasources": ["docker"],
|
||||
"matchPackageNames": ["ghcr.io/mostlygeek/llama-swap"],
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import pathlib
|
||||
from typing import Any, cast
|
||||
|
||||
import hvac
|
||||
@@ -42,7 +43,7 @@ def synchronize_auth_kubernetes_config(client: hvac.Client):
|
||||
def synchronize_kubernetes_roles(client: hvac.Client):
|
||||
kubernetes = Kubernetes(client.adapter)
|
||||
|
||||
policy_dir = os.path.join(os.path.dirname(__file__), '../vault/kubernetes-roles/')
|
||||
policy_dir = os.path.join(os.path.dirname(__file__), '../vault/kubernetes-auth-roles/')
|
||||
|
||||
roles: dict[str, Any] = {} # pyright:ignore[reportExplicitAny]
|
||||
for filename in os.listdir(policy_dir):
|
||||
@@ -67,6 +68,69 @@ def synchronize_kubernetes_roles(client: hvac.Client):
|
||||
# Using write data instead of kubernetes.create_role, we can pass raw yaml
|
||||
_ = client.write_data(f'/auth/kubernetes/role/{role_name}', data=role_content) # pyright:ignore[reportAny]
|
||||
|
||||
def synchronize_approle_auth(client: hvac.Client):
|
||||
if client.sys.list_auth_methods().get('approle/') is None:
|
||||
print('Enabling AppRole auth method')
|
||||
client.sys.enable_auth_method('approle', 'AppRole authorization for CI')
|
||||
|
||||
roles_dir = pathlib.Path(__file__).parent.joinpath('../vault/approles/')
|
||||
roles: dict[str, Any] = {}
|
||||
|
||||
for filename in roles_dir.iterdir():
|
||||
with filename.open('r') as f:
|
||||
role = yaml.safe_load(f.read())
|
||||
assert type(role) is dict
|
||||
roles[filename.stem] = role
|
||||
|
||||
roles_on_vault: list[str] = []
|
||||
roles_response = client.list("auth/approle/roles")
|
||||
if roles_response is not None:
|
||||
roles_on_vault = roles_response['data']['keys']
|
||||
|
||||
for role in roles_on_vault:
|
||||
if role not in roles:
|
||||
print(f'Deleting role: {role}')
|
||||
client.delete(f'auth/approle/role/{role}')
|
||||
|
||||
for role_name, role_content in roles.items():
|
||||
print(f'Updating role: {role_name}')
|
||||
client.write_data(f'auth/approle/role/{role_name}', data=role_content)
|
||||
|
||||
def synchronize_kubernetes_secretengine(client: hvac.Client):
|
||||
# Ensure kubernetes secret engine is enabled
|
||||
if client.sys.list_mounted_secrets_engines().get('kubernetes/') is None:
|
||||
print('Enabling kubernetes secret engine')
|
||||
client.sys.enable_secrets_engine('kubernetes', 'kubernetes', 'Cluster access')
|
||||
|
||||
# Write empty config (all defaults, working on the same cluster)
|
||||
client.write('kubernetes/config', None)
|
||||
|
||||
policy_dir = pathlib.Path(__file__).parent.joinpath('../vault/kubernetes-se-roles/')
|
||||
roles: dict[str, Any] = {}
|
||||
|
||||
for filename in policy_dir.iterdir():
|
||||
with filename.open('r') as f:
|
||||
role = yaml.safe_load(f.read())
|
||||
assert type(role) is dict
|
||||
# generated_role_rules must be json or yaml formatted string, convert it
|
||||
if 'generated_role_rules' in role and type(role['generated_role_rules']) is not str:
|
||||
role['generated_role_rules'] = yaml.safe_dump(role['generated_role_rules'])
|
||||
roles[filename.stem] = role
|
||||
|
||||
roles_on_vault: list[str] = []
|
||||
roles_response = client.list("kubernetes/roles")
|
||||
if roles_response is not None:
|
||||
roles_on_vault = roles_response['data']['keys']
|
||||
|
||||
for role in roles_on_vault:
|
||||
if role not in roles:
|
||||
print(f'Deleting role: {role}')
|
||||
client.delete(f'kubernetes/roles/{role}')
|
||||
|
||||
for role_name, role_content in roles.items():
|
||||
print(f'Updating role: {role_name}')
|
||||
client.write_data(f'kubernetes/roles/{role_name}', data=role_content)
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser(
|
||||
prog="synchronizeVault",
|
||||
@@ -82,5 +146,11 @@ if __name__ == '__main__':
|
||||
print('Synchronizing kubernetes config')
|
||||
synchronize_auth_kubernetes_config(client)
|
||||
|
||||
print('Synchronizing kubernetes roles')
|
||||
print('Synchronizing kubernetes auth roles')
|
||||
synchronize_kubernetes_roles(client)
|
||||
|
||||
print('Synchronizing AppRole auth method')
|
||||
synchronize_approle_auth(client)
|
||||
|
||||
print('Synchronizing kubernetes secret engine')
|
||||
synchronize_kubernetes_secretengine(client)
|
||||
|
||||
@@ -1,320 +0,0 @@
|
||||
import { createHash } from "node:crypto";
|
||||
import { Buffer } from "node:buffer";
|
||||
import fs from "node:fs";
|
||||
import https from "node:https";
|
||||
import zlib from "node:zlib";
|
||||
|
||||
const nixFile = "nix/garm-cli.nix";
|
||||
|
||||
function die(message) {
|
||||
console.error(message);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
function readText(filePath) {
|
||||
try {
|
||||
return fs.readFileSync(filePath, "utf8");
|
||||
} catch {
|
||||
die(`Missing ${filePath}`);
|
||||
}
|
||||
}
|
||||
|
||||
function extractVersion(text) {
|
||||
const match = text.match(/^\s*version\s*=\s*"([^"]+)";/m);
|
||||
if (!match) {
|
||||
die(`Unable to extract version from ${nixFile}`);
|
||||
}
|
||||
return match[1];
|
||||
}
|
||||
|
||||
function extractCommit(text) {
|
||||
const match = text.match(/^\s*garmCommit\s*=\s*"([a-f0-9]{40})";/m);
|
||||
return match ? match[1] : null;
|
||||
}
|
||||
|
||||
function writeU64LE(hash, value) {
|
||||
const buf = Buffer.alloc(8);
|
||||
buf.writeBigUInt64LE(BigInt(value), 0);
|
||||
hash.update(buf);
|
||||
}
|
||||
|
||||
function writeNarString(hash, data) {
|
||||
writeU64LE(hash, data.length);
|
||||
hash.update(data);
|
||||
const pad = (8 - (data.length % 8)) % 8;
|
||||
if (pad) {
|
||||
hash.update(Buffer.alloc(pad));
|
||||
}
|
||||
}
|
||||
|
||||
function writeNarText(hash, text) {
|
||||
writeNarString(hash, Buffer.from(text, "utf8"));
|
||||
}
|
||||
|
||||
function parseOctal(field) {
|
||||
const clean = field.toString("ascii").replace(/\0.*$/, "").trim();
|
||||
if (!clean) {
|
||||
return 0;
|
||||
}
|
||||
return Number.parseInt(clean, 8);
|
||||
}
|
||||
|
||||
function parseTarHeader(block) {
|
||||
const name = block.subarray(0, 100).toString("utf8").replace(/\0.*$/, "");
|
||||
const mode = parseOctal(block.subarray(100, 108));
|
||||
const size = parseOctal(block.subarray(124, 136));
|
||||
const typeflagRaw = block[156];
|
||||
const typeflag = typeflagRaw === 0 ? "0" : String.fromCharCode(typeflagRaw);
|
||||
const linkname = block.subarray(157, 257).toString("utf8").replace(/\0.*$/, "");
|
||||
const prefix = block.subarray(345, 500).toString("utf8").replace(/\0.*$/, "");
|
||||
return {
|
||||
name: prefix ? `${prefix}/${name}` : name,
|
||||
mode,
|
||||
size,
|
||||
typeflag,
|
||||
linkname,
|
||||
};
|
||||
}
|
||||
|
||||
function parsePax(data) {
|
||||
const out = {};
|
||||
let i = 0;
|
||||
while (i < data.length) {
|
||||
let sp = i;
|
||||
while (sp < data.length && data[sp] !== 0x20) sp += 1;
|
||||
if (sp >= data.length) break;
|
||||
const len = Number.parseInt(data.subarray(i, sp).toString("utf8"), 10);
|
||||
if (!Number.isFinite(len) || len <= 0) break;
|
||||
const record = data.subarray(sp + 1, i + len).toString("utf8");
|
||||
const eq = record.indexOf("=");
|
||||
if (eq > 0) {
|
||||
const key = record.slice(0, eq);
|
||||
const value = record.slice(eq + 1).replace(/\n$/, "");
|
||||
out[key] = value;
|
||||
}
|
||||
i += len;
|
||||
}
|
||||
return out;
|
||||
}
|
||||
|
||||
function parseTarEntries(archiveBuffer) {
|
||||
const gz = zlib.gunzipSync(archiveBuffer);
|
||||
const entries = [];
|
||||
let i = 0;
|
||||
let pendingPax = null;
|
||||
let longName = null;
|
||||
let longLink = null;
|
||||
|
||||
while (i + 512 <= gz.length) {
|
||||
const header = gz.subarray(i, i + 512);
|
||||
i += 512;
|
||||
|
||||
if (header.every((b) => b === 0)) {
|
||||
break;
|
||||
}
|
||||
|
||||
const h = parseTarHeader(header);
|
||||
const data = gz.subarray(i, i + h.size);
|
||||
const dataPad = (512 - (h.size % 512)) % 512;
|
||||
i += h.size + dataPad;
|
||||
|
||||
if (h.typeflag === "x") {
|
||||
pendingPax = parsePax(data);
|
||||
continue;
|
||||
}
|
||||
if (h.typeflag === "g") {
|
||||
continue;
|
||||
}
|
||||
if (h.typeflag === "L") {
|
||||
longName = data.toString("utf8").replace(/\0.*$/, "");
|
||||
continue;
|
||||
}
|
||||
if (h.typeflag === "K") {
|
||||
longLink = data.toString("utf8").replace(/\0.*$/, "");
|
||||
continue;
|
||||
}
|
||||
|
||||
const path = pendingPax?.path ?? longName ?? h.name;
|
||||
const linkpath = pendingPax?.linkpath ?? longLink ?? h.linkname;
|
||||
|
||||
entries.push({
|
||||
path,
|
||||
typeflag: h.typeflag,
|
||||
mode: h.mode,
|
||||
linkname: linkpath,
|
||||
data,
|
||||
});
|
||||
|
||||
pendingPax = null;
|
||||
longName = null;
|
||||
longLink = null;
|
||||
}
|
||||
|
||||
return entries;
|
||||
}
|
||||
|
||||
function stripTopDir(path) {
|
||||
const cleaned = path.replace(/^\.?\//, "").replace(/\/$/, "");
|
||||
const idx = cleaned.indexOf("/");
|
||||
if (idx === -1) return "";
|
||||
return cleaned.slice(idx + 1);
|
||||
}
|
||||
|
||||
function ensureDir(root, relPath) {
|
||||
if (!relPath) return root;
|
||||
const parts = relPath.split("/").filter(Boolean);
|
||||
let cur = root;
|
||||
for (const part of parts) {
|
||||
let child = cur.children.get(part);
|
||||
if (!child) {
|
||||
child = { kind: "directory", children: new Map() };
|
||||
cur.children.set(part, child);
|
||||
}
|
||||
if (child.kind !== "directory") {
|
||||
die(`Path conflict while building tree at ${relPath}`);
|
||||
}
|
||||
cur = child;
|
||||
}
|
||||
return cur;
|
||||
}
|
||||
|
||||
function buildTree(entries) {
|
||||
const root = { kind: "directory", children: new Map() };
|
||||
for (const entry of entries) {
|
||||
const rel = stripTopDir(entry.path);
|
||||
if (!rel) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const parts = rel.split("/").filter(Boolean);
|
||||
const name = parts.pop();
|
||||
const parent = ensureDir(root, parts.join("/"));
|
||||
|
||||
if (entry.typeflag === "5") {
|
||||
const existing = parent.children.get(name);
|
||||
if (!existing) {
|
||||
parent.children.set(name, { kind: "directory", children: new Map() });
|
||||
} else if (existing.kind !== "directory") {
|
||||
die(`Path conflict at ${rel}`);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
if (entry.typeflag === "2") {
|
||||
parent.children.set(name, { kind: "symlink", target: entry.linkname });
|
||||
continue;
|
||||
}
|
||||
|
||||
if (entry.typeflag === "0") {
|
||||
parent.children.set(name, {
|
||||
kind: "regular",
|
||||
executable: (entry.mode & 0o111) !== 0,
|
||||
contents: Buffer.from(entry.data),
|
||||
});
|
||||
continue;
|
||||
}
|
||||
}
|
||||
return root;
|
||||
}
|
||||
|
||||
function compareUtf8(a, b) {
|
||||
return Buffer.from(a, "utf8").compare(Buffer.from(b, "utf8"));
|
||||
}
|
||||
|
||||
function narDump(hash, node) {
|
||||
if (node.kind === "directory") {
|
||||
writeNarText(hash, "(");
|
||||
writeNarText(hash, "type");
|
||||
writeNarText(hash, "directory");
|
||||
const names = [...node.children.keys()].sort(compareUtf8);
|
||||
for (const name of names) {
|
||||
writeNarText(hash, "entry");
|
||||
writeNarText(hash, "(");
|
||||
writeNarText(hash, "name");
|
||||
writeNarString(hash, Buffer.from(name, "utf8"));
|
||||
writeNarText(hash, "node");
|
||||
narDump(hash, node.children.get(name));
|
||||
writeNarText(hash, ")");
|
||||
}
|
||||
writeNarText(hash, ")");
|
||||
return;
|
||||
}
|
||||
|
||||
if (node.kind === "symlink") {
|
||||
writeNarText(hash, "(");
|
||||
writeNarText(hash, "type");
|
||||
writeNarText(hash, "symlink");
|
||||
writeNarText(hash, "target");
|
||||
writeNarString(hash, Buffer.from(node.target, "utf8"));
|
||||
writeNarText(hash, ")");
|
||||
return;
|
||||
}
|
||||
|
||||
writeNarText(hash, "(");
|
||||
writeNarText(hash, "type");
|
||||
writeNarText(hash, "regular");
|
||||
if (node.executable) {
|
||||
writeNarText(hash, "executable");
|
||||
writeNarText(hash, "");
|
||||
}
|
||||
writeNarText(hash, "contents");
|
||||
writeNarString(hash, node.contents);
|
||||
writeNarText(hash, ")");
|
||||
}
|
||||
|
||||
function fetchBuffer(url) {
|
||||
return new Promise((resolve, reject) => {
|
||||
https
|
||||
.get(url, (res) => {
|
||||
if (res.statusCode && res.statusCode >= 300 && res.statusCode < 400 && res.headers.location) {
|
||||
const redirectUrl = new URL(res.headers.location, url).toString();
|
||||
res.resume();
|
||||
fetchBuffer(redirectUrl).then(resolve, reject);
|
||||
return;
|
||||
}
|
||||
if (!res.statusCode || res.statusCode < 200 || res.statusCode >= 300) {
|
||||
reject(new Error(`Failed to fetch ${url}: ${res.statusCode ?? "unknown"}`));
|
||||
res.resume();
|
||||
return;
|
||||
}
|
||||
const chunks = [];
|
||||
res.on("data", (chunk) => chunks.push(chunk));
|
||||
res.on("end", () => resolve(Buffer.concat(chunks)));
|
||||
})
|
||||
.on("error", reject);
|
||||
});
|
||||
}
|
||||
|
||||
function computeSRIFromGitHubTar(ref) {
|
||||
const url = `https://github.com/cloudbase/garm/archive/${ref}.tar.gz`;
|
||||
return fetchBuffer(url).then((archive) => {
|
||||
const entries = parseTarEntries(archive);
|
||||
const root = buildTree(entries);
|
||||
const hash = createHash("sha256");
|
||||
writeNarText(hash, "nix-archive-1");
|
||||
narDump(hash, root);
|
||||
return `sha256-${hash.digest("base64")}`;
|
||||
});
|
||||
}
|
||||
|
||||
function updateHash(text, sri) {
|
||||
const pattern = /(^\s*hash\s*=\s*")sha256-[^"]+(";)/m;
|
||||
if (!pattern.test(text)) {
|
||||
die(`Unable to update hash in ${nixFile}`);
|
||||
}
|
||||
const next = text.replace(pattern, `$1${sri}$2`);
|
||||
return next;
|
||||
}
|
||||
|
||||
async function main() {
|
||||
const text = readText(nixFile);
|
||||
const version = extractVersion(text);
|
||||
const commit = extractCommit(text);
|
||||
const ref = commit ?? `v${version}`;
|
||||
const sri = await computeSRIFromGitHubTar(ref);
|
||||
const updated = updateHash(text, sri);
|
||||
fs.writeFileSync(nixFile, updated, "utf8");
|
||||
console.log(`Updated ${nixFile} hash to ${sri}`);
|
||||
}
|
||||
|
||||
main().catch((err) => die(err.message));
|
||||
@@ -1,91 +0,0 @@
|
||||
import fs from "node:fs";
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
import { execFileSync } from "node:child_process";
|
||||
|
||||
const pinFile = "apps/garm/image-source.env";
|
||||
const deploymentFile = "apps/garm/deployment.yaml";
|
||||
|
||||
function fail(message) {
|
||||
console.error(message);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
function parseEnvFile(content) {
|
||||
const vars = {};
|
||||
for (const line of content.split(/\r?\n/)) {
|
||||
if (!line || line.startsWith("#")) {
|
||||
continue;
|
||||
}
|
||||
const idx = line.indexOf("=");
|
||||
if (idx === -1) {
|
||||
continue;
|
||||
}
|
||||
const key = line.slice(0, idx).trim();
|
||||
const value = line.slice(idx + 1).trim();
|
||||
vars[key] = value;
|
||||
}
|
||||
return vars;
|
||||
}
|
||||
|
||||
function updateOrAdd(content, key, value) {
|
||||
const pattern = new RegExp(`^${key}=.*$`, "m");
|
||||
if (pattern.test(content)) {
|
||||
return content.replace(pattern, `${key}=${value}`);
|
||||
}
|
||||
return `${content.trimEnd()}\n${key}=${value}\n`;
|
||||
}
|
||||
|
||||
function gitOut(args, options = {}) {
|
||||
return execFileSync("git", args, {
|
||||
encoding: "utf8",
|
||||
...options,
|
||||
}).trim();
|
||||
}
|
||||
|
||||
function gitRun(args, options = {}) {
|
||||
execFileSync("git", args, options);
|
||||
}
|
||||
|
||||
const pinContent = fs.readFileSync(pinFile, "utf8");
|
||||
const vars = parseEnvFile(pinContent);
|
||||
const commit = vars.GARM_COMMIT;
|
||||
const imageRepo = vars.GARM_IMAGE_REPO || "gitea.lumpiasty.xyz/lumpiasty/garm-k8s";
|
||||
|
||||
if (!commit || !/^[0-9a-f]{40}$/.test(commit)) {
|
||||
fail(`Invalid or missing GARM_COMMIT in ${pinFile}`);
|
||||
}
|
||||
|
||||
const tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), "garm-main-"));
|
||||
let commitNumber;
|
||||
try {
|
||||
gitRun(["clone", "--filter=blob:none", "https://github.com/cloudbase/garm.git", tmpDir], {
|
||||
stdio: "ignore",
|
||||
});
|
||||
commitNumber = gitOut(["-C", tmpDir, "rev-list", "--count", commit]);
|
||||
} finally {
|
||||
fs.rmSync(tmpDir, { recursive: true, force: true });
|
||||
}
|
||||
|
||||
if (!/^\d+$/.test(commitNumber)) {
|
||||
fail(`Unable to resolve commit number for ${commit}`);
|
||||
}
|
||||
|
||||
const image = `${imageRepo}:r${commitNumber}`;
|
||||
|
||||
let nextPin = pinContent;
|
||||
nextPin = updateOrAdd(nextPin, "GARM_COMMIT_NUMBER", commitNumber);
|
||||
nextPin = updateOrAdd(nextPin, "GARM_IMAGE_REPO", imageRepo);
|
||||
nextPin = updateOrAdd(nextPin, "GARM_IMAGE", image);
|
||||
fs.writeFileSync(pinFile, nextPin, "utf8");
|
||||
|
||||
const deployment = fs.readFileSync(deploymentFile, "utf8");
|
||||
const imagePattern = /image:\s*(?:ghcr\.io\/cloudbase\/garm:[^\s]+|gitea\.lumpiasty\.xyz\/(?:Lumpiasty|lumpiasty)\/garm(?:-k8s)?:[^\s]+)/;
|
||||
if (!imagePattern.test(deployment)) {
|
||||
fail(`Unable to update garm image in ${deploymentFile}`);
|
||||
}
|
||||
|
||||
const updatedDeployment = deployment.replace(imagePattern, `image: ${image}`);
|
||||
|
||||
fs.writeFileSync(deploymentFile, updatedDeployment, "utf8");
|
||||
console.log(`Pinned garm image to ${image}`);
|
||||
4
vault/approles/ci-flux-reconcile.yaml
Normal file
4
vault/approles/ci-flux-reconcile.yaml
Normal file
@@ -0,0 +1,4 @@
|
||||
token_ttl: 20m
|
||||
token_max_ttl: 20m
|
||||
policies:
|
||||
- flux-reconcile
|
||||
@@ -1,6 +1,6 @@
|
||||
bound_service_account_names:
|
||||
- garm
|
||||
- woodpecker-secret
|
||||
bound_service_account_namespaces:
|
||||
- garm
|
||||
- woodpecker
|
||||
token_policies:
|
||||
- garm
|
||||
- woodpecker
|
||||
6
vault/kubernetes-se-roles/flux-reconcile.yaml
Normal file
6
vault/kubernetes-se-roles/flux-reconcile.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
allowed_kubernetes_namespaces: flux-system
|
||||
generated_role_rules:
|
||||
rules:
|
||||
- apiGroups: ["source.toolkit.fluxcd.io"]
|
||||
resources: ["gitrepositories"]
|
||||
verbs: ["get", "patch", "watch"]
|
||||
0
vault/kubernetes-secretengine-config.yaml
Normal file
0
vault/kubernetes-secretengine-config.yaml
Normal file
3
vault/policy/flux-reconcile.hcl
Normal file
3
vault/policy/flux-reconcile.hcl
Normal file
@@ -0,0 +1,3 @@
|
||||
path "kubernetes/creds/flux-reconcile" {
|
||||
capabilities = ["update"]
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
path "secret/data/garm" {
|
||||
capabilities = ["read"]
|
||||
}
|
||||
|
||||
path "secret/data/backblaze" {
|
||||
capabilities = ["read"]
|
||||
}
|
||||
7
vault/policy/woodpecker.hcl
Normal file
7
vault/policy/woodpecker.hcl
Normal file
@@ -0,0 +1,7 @@
|
||||
path "secret/data/woodpecker" {
|
||||
capabilities = ["read"]
|
||||
}
|
||||
|
||||
path "secret/data/container-registry" {
|
||||
capabilities = ["read"]
|
||||
}
|
||||
Reference in New Issue
Block a user