78 Commits

Author SHA1 Message Date
977722f1b5 chore(deps): update helm release open-webui to v13 2026-04-03 00:00:40 +00:00
dfafadb4e3 add woodpecker to giitea's allowed host list 2026-04-02 23:01:14 +02:00
ae42e342ca add test workflow
All checks were successful
ci/woodpecker/push/my-first-workflow Pipeline was successful
2026-04-02 22:57:48 +02:00
670312d75b add woodpecker ci 2026-04-02 22:35:28 +02:00
0ce1a797fc Merge pull request 'chore(deps): update ghcr.io/mostlygeek/llama-swap docker tag to v199-vulkan-b8589' (#191) from renovate/ghcr.io-mostlygeek-llama-swap-199.x into fresh-start 2026-04-02 00:00:33 +00:00
3d53b4b10b chore(deps): update ghcr.io/mostlygeek/llama-swap docker tag to v199-vulkan-b8589 2026-04-02 00:00:30 +00:00
98f63b1576 Merge pull request 'chore(deps): update helm release immich to v1.2.2' (#190) from renovate/immich-1.x into fresh-start 2026-04-01 00:00:35 +00:00
edba33b552 chore(deps): update helm release immich to v1.2.2 2026-04-01 00:00:32 +00:00
054df42d8b update qwen3.5 4b ctx size to 128k 2026-03-30 21:05:00 +02:00
08db022d0d Merge pull request 'chore(deps): update ghcr.io/mostlygeek/llama-swap docker tag to v199-vulkan-b8576' (#189) from renovate/ghcr.io-mostlygeek-llama-swap-199.x into fresh-start 2026-03-30 00:00:52 +00:00
e485a4fc7f chore(deps): update ghcr.io/mostlygeek/llama-swap docker tag to v199-vulkan-b8576 2026-03-30 00:00:49 +00:00
9e74ed6a19 increase --fit-target to 1.5GB 2026-03-29 23:50:45 +02:00
42e89c9bb7 Merge pull request 'chore(deps): update ghcr.io/mostlygeek/llama-swap docker tag to v199-vulkan-b8562' (#188) from renovate/ghcr.io-mostlygeek-llama-swap-199.x into fresh-start 2026-03-29 00:00:53 +00:00
99bc04b76a chore(deps): update ghcr.io/mostlygeek/llama-swap docker tag to v199-vulkan-b8562 2026-03-29 00:00:50 +00:00
7ee77e33d4 Merge pull request 'chore(deps): update helm release cert-manager to v1.20.1' (#186) from renovate/cert-manager-1.x into fresh-start 2026-03-28 00:05:47 +00:00
8bdd5f2196 chore(deps): update helm release cert-manager to v1.20.1 2026-03-28 00:05:44 +00:00
1d8cb85bd4 Merge pull request 'chore(deps): update renovate/renovate docker tag to v43.95.0' (#163) from renovate/renovate-renovate-43.x into fresh-start
Reviewed-on: #163
2026-03-27 17:43:07 +00:00
eeb302b63b Merge pull request 'chore(deps): update helm release immich to v1.2.1' (#175) from renovate/immich-1.x into fresh-start
Reviewed-on: #175
2026-03-27 17:42:59 +00:00
69b437ed3b Merge pull request 'chore(deps): update helm release k8up to v4.9.0' (#182) from renovate/k8up-4.x into fresh-start
Reviewed-on: #182
2026-03-27 17:42:52 +00:00
54674a6e79 Merge pull request 'chore(deps): update helm release open-webui to v12.13.0' (#183) from renovate/open-webui-12.x into fresh-start
Reviewed-on: #183
2026-03-27 17:42:46 +00:00
a9da405326 chore(deps): update renovate/renovate docker tag to v43.95.0 2026-03-27 17:42:10 +00:00
264871bf68 Merge pull request 'chore(deps): update ghcr.io/mostlygeek/llama-swap docker tag to v199-vulkan-b8547' (#185) from renovate/ghcr.io-mostlygeek-llama-swap-199.x into fresh-start 2026-03-27 17:42:09 +00:00
6bcd0ba464 chore(deps): update helm release open-webui to v12.13.0 2026-03-27 17:42:07 +00:00
cb53301926 chore(deps): update ghcr.io/mostlygeek/llama-swap docker tag to v199-vulkan-b8547 2026-03-27 17:42:04 +00:00
110817b748 Merge pull request 'chore(deps): update ghcr.io/mostlygeek/llama-swap docker tag to v199' (#184) from renovate/ghcr.io-mostlygeek-llama-swap-199.x into fresh-start
Reviewed-on: #184
2026-03-27 17:40:38 +00:00
66cb3c9d82 chore(deps): update ghcr.io/mostlygeek/llama-swap docker tag to v199 2026-03-27 00:00:28 +00:00
42ae7af649 chore(deps): update helm release k8up to v4.9.0 2026-03-26 00:00:57 +00:00
cffcb1cc2d Merge pull request 'chore(deps): update helm release openbao to v0.26.2' (#181) from renovate/openbao-0.x into fresh-start 2026-03-26 00:00:57 +00:00
a4a7dd6fe6 Merge pull request 'chore(deps): update ghcr.io/mostlygeek/llama-swap docker tag to v198-vulkan-b8508' (#180) from renovate/ghcr.io-mostlygeek-llama-swap-198.x into fresh-start 2026-03-26 00:00:54 +00:00
52b8ca79dc chore(deps): update helm release openbao to v0.26.2 2026-03-26 00:00:54 +00:00
9a1fe1f740 chore(deps): update ghcr.io/mostlygeek/llama-swap docker tag to v198-vulkan-b8508 2026-03-26 00:00:49 +00:00
e996a60378 Merge pull request 'chore(deps): update helm release cert-manager-webhook-ovh to v0.9.5' (#179) from renovate/cert-manager-webhook-ovh-0.x into fresh-start 2026-03-25 00:00:35 +00:00
0ccd4d93f1 chore(deps): update helm release immich to v1.2.1 2026-03-25 00:00:34 +00:00
d667c6c0fc Merge pull request 'chore(deps): update ghcr.io/mostlygeek/llama-swap docker tag to v198-vulkan-b8496' (#178) from renovate/ghcr.io-mostlygeek-llama-swap-198.x into fresh-start 2026-03-25 00:00:33 +00:00
4254ebc9ef chore(deps): update helm release cert-manager-webhook-ovh to v0.9.5 2026-03-25 00:00:32 +00:00
8cf02fea0e chore(deps): update ghcr.io/mostlygeek/llama-swap docker tag to v198-vulkan-b8496 2026-03-25 00:00:29 +00:00
aa3c74d6a7 Merge pull request 'chore(deps): update helm release cilium to v1.19.2' (#177) from renovate/cilium-1.x into fresh-start 2026-03-24 00:00:44 +00:00
289089428e Merge pull request 'chore(deps): update ghcr.io/mostlygeek/llama-swap docker tag to v198-vulkan-b8477' (#176) from renovate/ghcr.io-mostlygeek-llama-swap-198.x into fresh-start 2026-03-24 00:00:41 +00:00
a93f6ec36f chore(deps): update helm release cilium to v1.19.2 2026-03-24 00:00:41 +00:00
1d85bf3a88 chore(deps): update ghcr.io/mostlygeek/llama-swap docker tag to v198-vulkan-b8477 2026-03-24 00:00:39 +00:00
f495debf25 Merge pull request 'chore(deps): update ghcr.io/mostlygeek/llama-swap docker tag to v198-vulkan-b8468' (#174) from renovate/ghcr.io-mostlygeek-llama-swap-198.x into fresh-start 2026-03-23 00:00:24 +00:00
bfede17c87 chore(deps): update ghcr.io/mostlygeek/llama-swap docker tag to v198-vulkan-b8468 2026-03-23 00:00:21 +00:00
08ca3f4c4e Merge pull request 'chore(deps): update ghcr.io/mostlygeek/llama-swap docker tag to v198-vulkan-b8461' (#173) from renovate/ghcr.io-mostlygeek-llama-swap-198.x into fresh-start 2026-03-22 00:00:27 +00:00
471c0ba62d chore(deps): update ghcr.io/mostlygeek/llama-swap docker tag to v198-vulkan-b8461 2026-03-22 00:00:23 +00:00
261141f509 Merge pull request 'chore(deps): update helm release k8up to v4.8.7' (#172) from renovate/k8up-4.x into fresh-start 2026-03-20 22:31:45 +00:00
86d5751842 Merge pull request 'chore(deps): update helm release immich to v1.1.3' (#171) from renovate/immich-1.x into fresh-start 2026-03-20 22:31:42 +00:00
43e531a3ca chore(deps): update helm release k8up to v4.8.7 2026-03-20 22:31:41 +00:00
9a0764268b Merge pull request 'chore(deps): update ghcr.io/mostlygeek/llama-swap docker tag to v198-vulkan-b8445' (#170) from renovate/ghcr.io-mostlygeek-llama-swap-198.x into fresh-start 2026-03-20 22:31:39 +00:00
7c88498756 chore(deps): update helm release immich to v1.1.3 2026-03-20 22:31:38 +00:00
8717526358 chore(deps): update ghcr.io/mostlygeek/llama-swap docker tag to v198-vulkan-b8445 2026-03-20 22:31:36 +00:00
b6a7e5092c Merge pull request 'chore(deps): update helm release ingress-nginx to v4.15.1' (#169) from renovate/ingress-nginx-4.x into fresh-start 2026-03-20 00:00:56 +00:00
27f7a5f29a Merge pull request 'chore(deps): update helm release immich to v1.1.2' (#168) from renovate/immich-1.x into fresh-start 2026-03-20 00:00:52 +00:00
9d0fd0981a chore(deps): update helm release ingress-nginx to v4.15.1 2026-03-20 00:00:52 +00:00
51bc53dbbc chore(deps): update helm release immich to v1.1.2 2026-03-20 00:00:50 +00:00
ce0b13ebb3 change kv cache quant to q8_0 2026-03-20 00:57:39 +01:00
516e157d39 Merge pull request 'chore(deps): update ghcr.io/mostlygeek/llama-swap docker tag to v198-vulkan-b8400' (#167) from renovate/ghcr.io-mostlygeek-llama-swap-198.x into fresh-start 2026-03-19 00:00:38 +00:00
73d6d1f15a chore(deps): update ghcr.io/mostlygeek/llama-swap docker tag to v198-vulkan-b8400 2026-03-19 00:00:34 +00:00
c51fc2a5ef Merge pull request 'chore(deps): update ghcr.io/mostlygeek/llama-swap docker tag to v198-vulkan-b8390' (#166) from renovate/ghcr.io-mostlygeek-llama-swap-198.x into fresh-start 2026-03-18 00:00:31 +00:00
8d994e7aa1 chore(deps): update ghcr.io/mostlygeek/llama-swap docker tag to v198-vulkan-b8390 2026-03-18 00:00:28 +00:00
5b551c6c6e switch pullPolicy to Always on crawl4ai-proxy 2026-03-17 01:47:29 +01:00
7e7b3e3d71 add max ctx on llama.cpp 2026-03-17 01:33:35 +01:00
9f315b38e3 use modded crawl4ai proxy image 2026-03-17 01:24:09 +01:00
3e1a806db1 Merge pull request 'chore(deps): update helm release openbao to v0.26.1' (#165) from renovate/openbao-0.x into fresh-start 2026-03-17 00:01:02 +00:00
f7dba45165 Merge pull request 'chore(deps): update ghcr.io/mostlygeek/llama-swap docker tag to v198-vulkan-b8369' (#164) from renovate/ghcr.io-mostlygeek-llama-swap-198.x into fresh-start 2026-03-17 00:01:00 +00:00
c8fac3201a chore(deps): update helm release openbao to v0.26.1 2026-03-17 00:01:00 +00:00
82864a4738 chore(deps): update ghcr.io/mostlygeek/llama-swap docker tag to v198-vulkan-b8369 2026-03-17 00:00:58 +00:00
b54c05b956 add crawl4ai-proxy for openwebui 2026-03-16 20:25:30 +01:00
afdada25a0 add crawl4ai deployment 2026-03-16 19:42:01 +01:00
79315d32db add GLM-4.7-Flash model 2026-03-16 18:19:28 +01:00
a2a5cd72a9 configure open webui to use sso from authentik 2026-03-16 17:30:16 +01:00
c2706a8af2 Merge pull request 'chore(deps): update renovate/renovate docker tag to v43.76.1' (#157) from renovate/renovate-renovate-43.x into fresh-start
Reviewed-on: #157
2026-03-15 17:40:55 +00:00
610ca0017e Merge pull request 'chore(deps): update ghcr.io/mostlygeek/llama-swap docker tag to v198-vulkan-b8352' (#162) from renovate/ghcr.io-mostlygeek-llama-swap-198.x into fresh-start 2026-03-15 17:40:29 +00:00
466932347a chore(deps): update renovate/renovate docker tag to v43.76.1 2026-03-15 17:40:29 +00:00
afbcea4e82 chore(deps): update ghcr.io/mostlygeek/llama-swap docker tag to v198-vulkan-b8352 2026-03-15 17:40:26 +00:00
20ad26ed31 Merge pull request 'chore(deps): update alpine docker tag to v3.23' (#158) from renovate/alpine-3.x into fresh-start
Reviewed-on: #158
2026-03-15 17:38:29 +00:00
7a2d1e0437 Merge pull request 'chore(deps): update helm release openbao to v0.26.0' (#159) from renovate/openbao-0.x into fresh-start
Reviewed-on: #159
2026-03-15 17:38:19 +00:00
7d90001f18 chore(deps): update alpine docker tag to v3.23 2026-03-15 00:00:30 +00:00
493f939551 chore(deps): update helm release openbao to v0.26.0 2026-03-14 00:00:29 +00:00
37 changed files with 606 additions and 16 deletions

View File

@@ -0,0 +1,15 @@
when:
- event: push
branch: fresh-start
steps:
- name: build
image: debian
commands:
- echo "This is the build step"
- echo "echo hello world" > executable
- name: a-test-step
image: golang:1.16
commands:
- echo "Testing ..."
- sh executable

View File

@@ -0,0 +1,48 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: crawl4ai-proxy
namespace: crawl4ai
spec:
replicas: 1
selector:
matchLabels:
app: crawl4ai-proxy
template:
metadata:
labels:
app: crawl4ai-proxy
spec:
containers:
- name: crawl4ai-proxy
image: gitea.lumpiasty.xyz/lumpiasty/crawl4ai-proxy-fit:latest
imagePullPolicy: Always
env:
- name: LISTEN_PORT
value: "8000"
- name: CRAWL4AI_ENDPOINT
value: http://crawl4ai.crawl4ai.svc.cluster.local:11235/crawl
ports:
- name: http
containerPort: 8000
readinessProbe:
tcpSocket:
port: http
initialDelaySeconds: 3
periodSeconds: 10
timeoutSeconds: 2
failureThreshold: 6
livenessProbe:
tcpSocket:
port: http
initialDelaySeconds: 10
periodSeconds: 15
timeoutSeconds: 2
failureThreshold: 6
resources:
requests:
cpu: 25m
memory: 32Mi
limits:
cpu: 200m
memory: 128Mi

View File

@@ -0,0 +1,5 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- deployment.yaml
- service.yaml

View File

@@ -0,0 +1,14 @@
apiVersion: v1
kind: Service
metadata:
name: crawl4ai-proxy
namespace: crawl4ai
spec:
type: ClusterIP
selector:
app: crawl4ai-proxy
ports:
- name: http
port: 8000
targetPort: 8000
protocol: TCP

View File

@@ -0,0 +1,62 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: crawl4ai
namespace: crawl4ai
spec:
replicas: 1
selector:
matchLabels:
app: crawl4ai
template:
metadata:
labels:
app: crawl4ai
spec:
containers:
- name: crawl4ai
image: unclecode/crawl4ai:latest
imagePullPolicy: IfNotPresent
env:
- name: CRAWL4AI_API_TOKEN
valueFrom:
secretKeyRef:
name: crawl4ai-secret
key: api_token
optional: false
- name: MAX_CONCURRENT_TASKS
value: "5"
ports:
- name: http
containerPort: 11235
readinessProbe:
httpGet:
path: /health
port: http
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 3
failureThreshold: 6
livenessProbe:
httpGet:
path: /health
port: http
initialDelaySeconds: 30
periodSeconds: 15
timeoutSeconds: 3
failureThreshold: 6
resources:
requests:
cpu: 500m
memory: 1Gi
limits:
cpu: "2"
memory: 4Gi
volumeMounts:
- name: dshm
mountPath: /dev/shm
volumes:
- name: dshm
emptyDir:
medium: Memory
sizeLimit: 1Gi

View File

@@ -0,0 +1,7 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- secret.yaml
- deployment.yaml
- service.yaml

View File

@@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: crawl4ai

38
apps/crawl4ai/secret.yaml Normal file
View File

@@ -0,0 +1,38 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: crawl4ai-secret
namespace: crawl4ai
---
apiVersion: secrets.hashicorp.com/v1beta1
kind: VaultAuth
metadata:
name: crawl4ai
namespace: crawl4ai
spec:
method: kubernetes
mount: kubernetes
kubernetes:
role: crawl4ai
serviceAccount: crawl4ai-secret
---
apiVersion: secrets.hashicorp.com/v1beta1
kind: VaultStaticSecret
metadata:
name: crawl4ai-secret
namespace: crawl4ai
spec:
type: kv-v2
mount: secret
path: crawl4ai
destination:
create: true
name: crawl4ai-secret
type: Opaque
transformation:
excludeRaw: true
vaultAuthRef: crawl4ai

View File

@@ -0,0 +1,14 @@
apiVersion: v1
kind: Service
metadata:
name: crawl4ai
namespace: crawl4ai
spec:
type: ClusterIP
selector:
app: crawl4ai
ports:
- name: http
port: 11235
targetPort: 11235
protocol: TCP

View File

@@ -16,7 +16,7 @@ spec:
serviceAccountName: garm
initContainers:
- name: render-garm-config
image: alpine:3.21
image: alpine:3.23
env:
- name: JWT_AUTH_SECRET
valueFrom:

View File

@@ -73,7 +73,7 @@ spec:
ISSUE_INDEXER_TYPE: bleve
REPO_INDEXER_ENABLED: true
webhook:
ALLOWED_HOST_LIST: garm.garm.svc.cluster.local
ALLOWED_HOST_LIST: garm.garm.svc.cluster.local,woodpecker.lumpiasty.xyz
admin:
username: GiteaAdmin
email: gi@tea.com

View File

@@ -18,7 +18,7 @@ spec:
chart:
spec:
chart: immich
version: 1.1.1
version: 1.2.2
sourceRef:
kind: HelmRepository
name: secustor

View File

@@ -1,6 +1,8 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- crawl4ai
- crawl4ai-proxy
- authentik
- gitea
- renovate
@@ -13,3 +15,4 @@ resources:
- ispeak3
- openwebui
- garm
- woodpecker

View File

@@ -4,12 +4,16 @@ logToStdout: "both" # proxy and upstream
macros:
base_args: "--no-warmup --port ${PORT}"
common_args: "--fit-target 1536 --fit-ctx 65536 --no-warmup --port ${PORT}"
common_args: "--fit-target 1536 --no-warmup --port ${PORT}"
gemma3_ctx_128k: "--ctx-size 131072"
qwen35_ctx_128k: "--ctx-size 131072"
qwen35_ctx_256k: "--ctx-size 262144"
gemma_sampling: "--prio 2 --temp 1.0 --repeat-penalty 1.0 --min-p 0.00 --top-k 64 --top-p 0.95"
qwen35_sampling: "--temp 0.6 --top-p 0.95 --top-k 20 --min-p 0.00 -ctk q4_0 -ctv q4_0"
qwen35_35b_args: "--temp 1.0 --min-p 0.00 --top-p 0.95 --top-k 20 -ctk q4_0 -ctv q4_0"
qwen35_sampling: "--temp 0.6 --top-p 0.95 --top-k 20 --min-p 0.00 -ctk q8_0 -ctv q8_0"
qwen35_35b_args: "--temp 1.0 --min-p 0.00 --top-p 0.95 --top-k 20 -ctk q8_0 -ctv q8_0"
qwen35_35b_heretic_mmproj: "--mmproj-url https://huggingface.co/unsloth/Qwen3.5-35B-A3B-GGUF/resolve/main/mmproj-F16.gguf --mmproj /root/.cache/llama.cpp/unsloth_Qwen3.5-35B-A3B-GGUF_mmproj-F16.gguf"
qwen35_4b_heretic_mmproj: "--mmproj-url https://huggingface.co/unsloth/Qwen3.5-4B-GGUF/resolve/main/mmproj-F16.gguf --mmproj /root/.cache/llama.cpp/unsloth_Qwen3.5-4B-GGUF_mmproj-F16.gguf"
glm47_flash_args: "--temp 0.7 --top-p 1.0 --min-p 0.01 --repeat-penalty 1.0"
thinking_on: "--chat-template-kwargs '{\"enable_thinking\": true}'"
thinking_off: "--chat-template-kwargs '{\"enable_thinking\": false}'"
@@ -38,6 +42,7 @@ models:
cmd: |
/app/llama-server
-hf unsloth/gemma-3-12b-it-GGUF:Q4_K_M
${gemma3_ctx_128k}
${gemma_sampling}
${common_args}
@@ -45,6 +50,7 @@ models:
cmd: |
/app/llama-server
-hf unsloth/gemma-3-12b-it-GGUF:Q4_K_M
${gemma3_ctx_128k}
${gemma_sampling}
--no-mmproj
${common_args}
@@ -53,6 +59,7 @@ models:
cmd: |
/app/llama-server
-hf unsloth/gemma-3-4b-it-GGUF:Q4_K_M
${gemma3_ctx_128k}
${gemma_sampling}
${common_args}
@@ -60,6 +67,7 @@ models:
cmd: |
/app/llama-server
-hf unsloth/gemma-3-4b-it-GGUF:Q4_K_M
${gemma3_ctx_128k}
${gemma_sampling}
--no-mmproj
${common_args}
@@ -75,13 +83,14 @@ models:
--top-p 0.95
--top-k 40
--repeat-penalty 1.0
-ctk q4_0 -ctv q4_0
-ctk q8_0 -ctv q8_0
${common_args}
"Qwen3.5-35B-A3B-GGUF:Q4_K_M":
cmd: |
/app/llama-server
-hf unsloth/Qwen3.5-35B-A3B-GGUF:Q4_K_M
${qwen35_ctx_256k}
${qwen35_35b_args}
${common_args}
@@ -89,6 +98,7 @@ models:
cmd: |
/app/llama-server
-hf unsloth/Qwen3.5-35B-A3B-GGUF:Q4_K_M
${qwen35_ctx_256k}
${qwen35_35b_args}
${common_args}
${thinking_off}
@@ -100,6 +110,7 @@ models:
/app/llama-server
-hf mradermacher/Qwen3.5-35B-A3B-heretic-GGUF:Q4_K_M
${qwen35_35b_heretic_mmproj}
${qwen35_ctx_256k}
${qwen35_35b_args}
${common_args}
@@ -108,6 +119,7 @@ models:
/app/llama-server
-hf mradermacher/Qwen3.5-35B-A3B-heretic-GGUF:Q4_K_M
${qwen35_35b_heretic_mmproj}
${qwen35_ctx_256k}
${qwen35_35b_args}
${common_args}
${thinking_off}
@@ -116,6 +128,7 @@ models:
cmd: |
/app/llama-server
-hf unsloth/Qwen3.5-0.8B-GGUF:Q4_K_XL
${qwen35_ctx_256k}
${qwen35_sampling}
${base_args}
${thinking_on}
@@ -133,6 +146,7 @@ models:
cmd: |
/app/llama-server
-hf unsloth/Qwen3.5-2B-GGUF:Q4_K_M
${qwen35_ctx_256k}
${qwen35_sampling}
${common_args}
${thinking_on}
@@ -141,6 +155,7 @@ models:
cmd: |
/app/llama-server
-hf unsloth/Qwen3.5-2B-GGUF:Q4_K_M
${qwen35_ctx_256k}
${qwen35_sampling}
${common_args}
${thinking_off}
@@ -149,6 +164,7 @@ models:
cmd: |
/app/llama-server
-hf unsloth/Qwen3.5-4B-GGUF:Q4_K_M
${qwen35_ctx_128k}
${qwen35_sampling}
${common_args}
${thinking_on}
@@ -157,6 +173,7 @@ models:
cmd: |
/app/llama-server
-hf unsloth/Qwen3.5-4B-GGUF:Q4_K_M
${qwen35_ctx_128k}
${qwen35_sampling}
${common_args}
${thinking_off}
@@ -166,6 +183,7 @@ models:
/app/llama-server
-hf mradermacher/Qwen3.5-4B-heretic-GGUF:Q4_K_M
${qwen35_4b_heretic_mmproj}
${qwen35_ctx_128k}
${qwen35_sampling}
${common_args}
${thinking_on}
@@ -175,6 +193,7 @@ models:
/app/llama-server
-hf mradermacher/Qwen3.5-4B-heretic-GGUF:Q4_K_M
${qwen35_4b_heretic_mmproj}
${qwen35_ctx_128k}
${qwen35_sampling}
${common_args}
${thinking_off}
@@ -183,6 +202,7 @@ models:
cmd: |
/app/llama-server
-hf unsloth/Qwen3.5-9B-GGUF:Q4_K_M
${qwen35_ctx_256k}
${qwen35_sampling}
${common_args}
${thinking_on}
@@ -191,6 +211,7 @@ models:
cmd: |
/app/llama-server
-hf unsloth/Qwen3.5-9B-GGUF:Q4_K_M
${qwen35_ctx_256k}
${qwen35_sampling}
${common_args}
${thinking_off}
@@ -199,6 +220,7 @@ models:
cmd: |
/app/llama-server
-hf unsloth/Qwen3.5-9B-GGUF:Q3_K_M
${qwen35_ctx_256k}
${qwen35_sampling}
${common_args}
${thinking_on}
@@ -207,6 +229,7 @@ models:
cmd: |
/app/llama-server
-hf unsloth/Qwen3.5-9B-GGUF:Q3_K_M
${qwen35_ctx_256k}
${qwen35_sampling}
${common_args}
${thinking_off}
@@ -215,6 +238,7 @@ models:
cmd: |
/app/llama-server
-hf unsloth/Qwen3.5-27B-GGUF:Q3_K_M
${qwen35_ctx_256k}
${qwen35_sampling}
${common_args}
${thinking_on}
@@ -223,6 +247,14 @@ models:
cmd: |
/app/llama-server
-hf unsloth/Qwen3.5-27B-GGUF:Q3_K_M
${qwen35_ctx_256k}
${qwen35_sampling}
${common_args}
${thinking_off}
"GLM-4.7-Flash-GGUF:Q4_K_M":
cmd: |
/app/llama-server
-hf unsloth/GLM-4.7-Flash-GGUF:Q4_K_M
${glm47_flash_args}
${common_args}

View File

@@ -18,7 +18,7 @@ spec:
spec:
containers:
- name: llama-swap
image: ghcr.io/mostlygeek/llama-swap:v198-vulkan-b8248
image: ghcr.io/mostlygeek/llama-swap:v199-vulkan-b8589
imagePullPolicy: IfNotPresent
command:
- /app/llama-swap

View File

@@ -4,5 +4,6 @@ resources:
- namespace.yaml
- pvc.yaml
- pvc-pipelines.yaml
- secret.yaml
- release.yaml
- ingress.yaml

View File

@@ -18,7 +18,7 @@ spec:
chart:
spec:
chart: open-webui
version: 12.10.0
version: 13.0.1
sourceRef:
kind: HelmRepository
name: open-webui
@@ -44,3 +44,30 @@ spec:
persistence:
enabled: true
existingClaim: openwebui-pipelines-lvmhdd
# SSO with Authentik
extraEnvVars:
- name: WEBUI_URL
value: "https://openwebui.lumpiasty.xyz"
- name: OAUTH_CLIENT_ID
valueFrom:
secretKeyRef:
name: openwebui-authentik
key: client_id
- name: OAUTH_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: openwebui-authentik
key: client_secret
- name: OAUTH_PROVIDER_NAME
value: "authentik"
- name: OPENID_PROVIDER_URL
value: "https://authentik.lumpiasty.xyz/application/o/open-web-ui/.well-known/openid-configuration"
- name: OPENID_REDIRECT_URI
value: "https://openwebui.lumpiasty.xyz/oauth/oidc/callback"
- name: ENABLE_OAUTH_SIGNUP
value: "true"
- name: ENABLE_LOGIN_FORM
value: "false"
- name: OAUTH_MERGE_ACCOUNTS_BY_EMAIL
value: "true"

View File

@@ -0,0 +1,43 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: openwebui-secret
namespace: openwebui
---
apiVersion: secrets.hashicorp.com/v1beta1
kind: VaultAuth
metadata:
name: openwebui
namespace: openwebui
spec:
method: kubernetes
mount: kubernetes
kubernetes:
role: openwebui
serviceAccount: openwebui-secret
---
apiVersion: secrets.hashicorp.com/v1beta1
kind: VaultStaticSecret
metadata:
name: openwebui-authentik
namespace: openwebui
spec:
type: kv-v2
mount: secret
path: authentik/openwebui
destination:
create: true
name: openwebui-authentik
type: Opaque
transformation:
excludeRaw: true
templates:
client_id:
text: '{{ get .Secrets "client_id" }}'
client_secret:
text: '{{ get .Secrets "client_secret" }}'
vaultAuthRef: openwebui

View File

@@ -15,7 +15,7 @@ spec:
- name: renovate
# Update this to the latest available and then enable Renovate on
# the manifest
image: renovate/renovate:43.64.6-full
image: renovate/renovate:43.95.0-full
envFrom:
- secretRef:
name: renovate-gitea-token

View File

@@ -0,0 +1,8 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- postgres-volume.yaml
- postgres-cluster.yaml
- release.yaml
- secret.yaml

View File

@@ -0,0 +1,5 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: woodpecker

View File

@@ -0,0 +1,23 @@
---
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: woodpecker-postgresql-cluster
namespace: woodpecker
spec:
instances: 1
imageName: ghcr.io/cloudnative-pg/postgresql:17.4
bootstrap:
initdb:
database: woodpecker
owner: woodpecker
storage:
pvcTemplate:
storageClassName: ssd-lvmpv
resources:
requests:
storage: 10Gi
volumeName: woodpecker-postgresql-cluster-lvmssd

View File

@@ -0,0 +1,33 @@
apiVersion: local.openebs.io/v1alpha1
kind: LVMVolume
metadata:
labels:
kubernetes.io/nodename: anapistula-delrosalae
name: woodpecker-postgresql-cluster-lvmssd
namespace: openebs
spec:
capacity: 10Gi
ownerNodeID: anapistula-delrosalae
shared: "yes"
thinProvision: "no"
vgPattern: ^openebs-ssd$
volGroup: openebs-ssd
---
kind: PersistentVolume
apiVersion: v1
metadata:
name: woodpecker-postgresql-cluster-lvmssd
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: ssd-lvmpv
volumeMode: Filesystem
csi:
driver: local.csi.openebs.io
fsType: btrfs
volumeHandle: woodpecker-postgresql-cluster-lvmssd
---
# PVC is dynamically created by the Postgres operator

View File

@@ -0,0 +1,115 @@
---
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository
metadata:
name: woodpecker
namespace: woodpecker
spec:
interval: 24h
url: https://woodpecker-ci.org/
---
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: woodpecker
namespace: woodpecker
spec:
interval: 30m
chart:
spec:
chart: woodpecker
version: 3.5.1
sourceRef:
kind: HelmRepository
name: woodpecker
namespace: woodpecker
interval: 12h
values:
server:
enabled: true
statefulSet:
replicaCount: 1
persistentVolume:
enabled: false # Using Postgresql database
env:
WOODPECKER_HOST: "https://woodpecker.lumpiasty.xyz"
# Gitea integration
WOODPECKER_GITEA: "true"
WOODPECKER_GITEA_URL: "https://gitea.lumpiasty.xyz"
# PostgreSQL database configuration
WOODPECKER_DATABASE_DRIVER: postgres
# Password is loaded from woodpecker-postgresql-cluster-app secret (created by CNPG)
WOODPECKER_DATABASE_DATASOURCE:
valueFrom:
secretKeyRef:
name: woodpecker-postgresql-cluster-app
key: fqdn-uri
# Allow logging in from all accounts on Gitea
WOODPECKER_OPEN: "true"
# Make lumpiasty admin
WOODPECKER_ADMIN: GiteaAdmin
createAgentSecret: true
extraSecretNamesForEnvFrom:
- woodpecker-secrets
ingress:
enabled: true
ingressClassName: nginx-ingress
annotations:
cert-manager.io/cluster-issuer: letsencrypt
acme.cert-manager.io/http01-edit-in-place: "true"
hosts:
- host: woodpecker.lumpiasty.xyz
paths:
- path: /
backend:
serviceName: woodpecker-server
servicePort: 80
tls:
- hosts:
- woodpecker.lumpiasty.xyz
secretName: woodpecker-ingress
resources:
requests:
cpu: 100m
memory: 256Mi
service:
type: ClusterIP
port: 80
agent:
enabled: true
replicaCount: 2
env:
WOODPECKER_SERVER: "woodpecker-server:9000"
WOODPECKER_BACKEND: kubernetes
WOODPECKER_BACKEND_K8S_NAMESPACE: woodpecker
WOODPECKER_BACKEND_K8S_STORAGE_CLASS: ssd-lvmpv
WOODPECKER_BACKEND_K8S_VOLUME_SIZE: 10G
WOODPECKER_BACKEND_K8S_STORAGE_RWX: false
WOODPECKER_CONNECT_RETRY_COUNT: "5"
mapAgentSecret: true
extraSecretNamesForEnvFrom:
- woodpecker-secrets
persistence:
enabled: false
serviceAccount:
create: true
rbac:
create: true
resources:
requests:
cpu: 100m
memory: 128Mi

View File

@@ -0,0 +1,62 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: woodpecker-secret
namespace: woodpecker
---
apiVersion: secrets.hashicorp.com/v1beta1
kind: VaultAuth
metadata:
name: woodpecker
namespace: woodpecker
spec:
method: kubernetes
mount: kubernetes
kubernetes:
role: woodpecker
serviceAccount: woodpecker-secret
---
# Main woodpecker secrets from Vault
# Requires vault kv put secret/woodpecker \
# WOODPECKER_AGENT_SECRET="$(openssl rand -hex 32)" \
# WOODPECKER_GITEA_CLIENT="<gitea-oauth-client>" \
# WOODPECKER_GITEA_SECRET="<gitea-oauth-secret>"
# Note: Database password comes from CNPG secret (woodpecker-postgresql-cluster-app)
apiVersion: secrets.hashicorp.com/v1beta1
kind: VaultStaticSecret
metadata:
name: woodpecker-secrets
namespace: woodpecker
spec:
type: kv-v2
mount: secret
path: woodpecker
destination:
create: true
name: woodpecker-secrets
type: Opaque
transformation:
excludeRaw: true
vaultAuthRef: woodpecker
---
# Container registry credentials for Kaniko
# Requires vault kv put secret/container-registry \
# REGISTRY_USERNAME="<username>" \
# REGISTRY_PASSWORD="<token>"
apiVersion: secrets.hashicorp.com/v1beta1
kind: VaultStaticSecret
metadata:
name: container-registry
namespace: woodpecker
spec:
type: kv-v2
mount: secret
path: container-registry
destination:
create: true
name: container-registry
type: Opaque
transformation:
excludeRaw: true
vaultAuthRef: woodpecker

View File

@@ -18,7 +18,7 @@ spec:
chart:
spec:
chart: cert-manager-webhook-ovh
version: 0.9.4
version: 0.9.5
sourceRef:
kind: HelmRepository
name: cert-manager-webhook-ovh

View File

@@ -23,7 +23,7 @@ spec:
chart:
spec:
chart: cert-manager
version: v1.20.0
version: v1.20.1
sourceRef:
kind: HelmRepository
name: cert-manager

View File

@@ -23,7 +23,7 @@ spec:
chart:
spec:
chart: cilium
version: 1.19.1
version: 1.19.2
sourceRef:
kind: HelmRepository
name: cilium

View File

@@ -23,7 +23,7 @@ spec:
chart:
spec:
chart: k8up
version: 4.8.6
version: 4.9.0
sourceRef:
kind: HelmRepository
name: k8up-io

View File

@@ -23,7 +23,7 @@ spec:
chart:
spec:
chart: ingress-nginx
version: 4.15.0
version: 4.15.1
sourceRef:
kind: HelmRepository
name: ingress-nginx

View File

@@ -23,7 +23,7 @@ spec:
chart:
spec:
chart: openbao
version: 0.25.7
version: 0.26.2
sourceRef:
kind: HelmRepository
name: openbao

View File

@@ -0,0 +1,6 @@
bound_service_account_names:
- crawl4ai-secret
bound_service_account_namespaces:
- crawl4ai
token_policies:
- crawl4ai

View File

@@ -0,0 +1,6 @@
bound_service_account_names:
- openwebui-secret
bound_service_account_namespaces:
- openwebui
token_policies:
- openwebui

View File

@@ -0,0 +1,6 @@
bound_service_account_names:
- woodpecker-secret
bound_service_account_namespaces:
- woodpecker
token_policies:
- woodpecker

View File

@@ -0,0 +1,3 @@
path "secret/data/crawl4ai" {
capabilities = ["read"]
}

View File

@@ -0,0 +1,3 @@
path "secret/data/authentik/openwebui" {
capabilities = ["read"]
}

View File

@@ -0,0 +1,7 @@
path "secret/data/woodpecker" {
capabilities = ["read"]
}
path "secret/data/container-registry" {
capabilities = ["read"]
}