246 Commits

Author SHA1 Message Date
ae38951164 Update Helm release k8up to v4.8.6 2025-10-04 00:00:49 +00:00
708ffe203c Add Qwen2.5-VL models 2025-09-13 02:42:21 +02:00
3ceec2f10c Merge pull request 'Update renovate/renovate Docker tag to v41.82.10' (#66) from renovate/renovate-renovate-41.x into fresh-start
Reviewed-on: #66
2025-08-25 00:33:25 +00:00
95cfbfbe66 Update renovate/renovate Docker tag to v41.82.10 2025-08-25 00:32:46 +00:00
bf9aefb44a remove ollama 2025-08-25 02:30:47 +02:00
5ffb171821 Merge pull request 'Update Helm release gitea to v12.2.0' (#67) from renovate/gitea-12.x into fresh-start
Reviewed-on: #67
2025-08-25 00:23:50 +00:00
a35116aa31 Merge pull request 'Update redis Docker tag to v22' (#70) from renovate/redis-22.x into fresh-start
Reviewed-on: #70
2025-08-25 00:23:19 +00:00
b32337a2ba Merge pull request 'Update Helm release ingress-nginx to v4.13.1' (#71) from renovate/ingress-nginx-4.x into fresh-start
Reviewed-on: #71
2025-08-25 00:22:58 +00:00
d27b43715c Merge pull request 'Update Helm release immich to v0.7.5' (#73) from renovate/immich-0.x into fresh-start
Reviewed-on: #73
2025-08-25 00:22:24 +00:00
4b0ce7a2e3 Merge pull request 'Update Helm release openbao to v0.16.3' (#75) from renovate/openbao-0.x into fresh-start
Reviewed-on: #75
2025-08-25 00:22:18 +00:00
7f2ef7270c Merge pull request 'Update Helm release cloudnative-pg to v0.26.0' (#72) from renovate/cloudnative-pg-0.x into fresh-start
Reviewed-on: #72
2025-08-25 00:18:53 +00:00
73a9b275a7 Merge pull request 'Update Helm release cilium to v1.18.1' (#74) from renovate/cilium-1.x into fresh-start
Reviewed-on: #74
2025-08-25 00:17:27 +00:00
8a61a936c6 Update redis Docker tag to v22 2025-08-24 00:00:34 +00:00
1c2f77927f Update Helm release immich to v0.7.5 2025-08-23 00:00:29 +00:00
4f5b25d910 increase frigate config volume to 5Gi 2025-08-22 16:59:46 +02:00
7c5fafd54e Update Helm release openbao to v0.16.3 2025-08-22 00:00:30 +00:00
de11ec0d1b Update Helm release gitea to v12.2.0 2025-08-20 00:00:40 +00:00
07c32643e7 add searxng 2025-08-18 03:26:54 +02:00
9c61d47fda add qwen3-4b-2507 model 2025-08-18 02:50:46 +02:00
0f24f1dd7b Update Helm release cilium to v1.18.1 2025-08-16 00:00:28 +00:00
83e5cada3f decreate mtu on anapistuala delrosalae to 1280, hack 2025-08-15 20:56:12 +02:00
ccf6302924 Update Helm release cloudnative-pg to v0.26.0 2025-08-14 00:00:36 +00:00
5eb0362788 Update Helm release ingress-nginx to v4.13.1 2025-08-13 00:00:40 +00:00
0985832c2d disable gpu accel in frigate 2025-08-11 20:24:32 +02:00
db86abff25 remove old nginx ingress controller 2025-08-03 19:14:11 +02:00
a1b40a6a21 Revert "add cameras vlan"
This reverts commit 9269f21692.
2025-08-03 18:42:17 +02:00
444c4faf96 move all ingresses to new nginx ingress 2025-08-03 18:17:37 +02:00
9f304af879 update gitea to new ingress 2025-08-03 17:59:54 +02:00
c0524510b8 add nginx-ingress 2025-08-03 17:40:25 +02:00
a26a351396 update llama-swap 2025-08-03 17:16:25 +02:00
9269f21692 add cameras vlan 2025-08-03 16:39:38 +02:00
9d6a9ff304 Merge pull request 'Update Helm release immich to v0.7.2' (#65) from renovate/immich-0.x into fresh-start
Reviewed-on: #65
2025-08-03 14:00:33 +00:00
3cd094007e Merge pull request 'Update renovate/renovate Docker tag to v41.51.0' (#61) from renovate/renovate-renovate-41.x into fresh-start
Reviewed-on: #61
2025-08-03 14:00:19 +00:00
94a57daaf8 Merge pull request 'Update Helm release cilium to v1.18.0' (#62) from renovate/cilium-1.x into fresh-start
Reviewed-on: #62
2025-08-03 14:00:00 +00:00
6fec8d29a6 Update renovate/renovate Docker tag to v41.51.0 2025-08-03 00:00:50 +00:00
3a94da6021 Update Helm release immich to v0.7.2 2025-08-03 00:00:45 +00:00
70511ff9bc Merge pull request 'Update Helm release ollama to v1.25.0' (#63) from renovate/ollama-1.x into fresh-start
Reviewed-on: #63
2025-08-02 14:29:07 +00:00
e8b37d90d8 Merge pull request 'Update Helm release immich to v0.7.1' (#64) from renovate/immich-0.x into fresh-start
Reviewed-on: #64
2025-08-02 14:28:59 +00:00
30b7a78360 Update Helm release immich to v0.7.1 2025-08-02 00:01:07 +00:00
2561b354d1 Update Helm release ollama to v1.25.0 2025-07-30 00:00:32 +00:00
949d8b11db Update Helm release cilium to v1.18.0 2025-07-30 00:00:29 +00:00
6c46b20dba fix nginx disconnecting too fast 2025-07-29 19:49:15 +02:00
f0f9cb4d34 fix api endpoint in librechat 2025-07-29 18:54:07 +02:00
8386e21722 fix image upload in librechat 2025-07-29 18:50:13 +02:00
c871dae045 change chart source and update librechat 2025-07-29 18:36:19 +02:00
70e4967497 increase immich uploads volume 2025-07-29 04:16:28 +02:00
8e68c45573 allow websockets to immich 2025-07-29 03:25:43 +02:00
c4628523bc llama automatic unloading and longer start timeout 2025-07-29 02:31:39 +02:00
071e87ee44 disable warmups 2025-07-29 02:24:14 +02:00
9e17aadb56 add gemma3 model 2025-07-29 02:22:52 +02:00
3ca4ddc233 use immich chart provided ingress 2025-07-29 00:50:44 +02:00
215a2ac1fb Merge pull request 'Update Helm release cloudnative-pg to v0.25.0' (#59) from renovate/cloudnative-pg-0.x into fresh-start
Reviewed-on: #59
2025-07-28 22:46:07 +00:00
5b8a861daa Merge pull request 'Update renovate/renovate Docker tag to v41.43.5' (#58) from renovate/renovate-renovate-41.x into fresh-start
Reviewed-on: #58
2025-07-28 22:45:53 +00:00
319e3bafbe Merge pull request 'Update Helm release immich to v0.7.0' (#60) from renovate/immich-0.x into fresh-start
Reviewed-on: #60
2025-07-28 22:45:29 +00:00
ad1c60a049 Update Helm release immich to v0.7.0 2025-07-28 00:00:34 +00:00
41020f8c79 install immich 2025-07-27 22:38:45 +02:00
60c7dd4bdc Update renovate/renovate Docker tag to v41.43.5 2025-07-27 00:00:38 +00:00
0fde3108d6 move llama models to ssd 2025-07-26 17:54:23 +02:00
a299c2cc2b add ssd 2025-07-26 17:52:34 +02:00
a4ea45a39c Update Helm release cloudnative-pg to v0.25.0 2025-07-26 00:03:30 +00:00
30bae60308 fix immich postgres cluster 2025-07-25 23:09:58 +02:00
2f3b7af0da redis for immich 2025-07-25 22:43:21 +02:00
30efd5ae6e Merge pull request 'Update renovate/renovate Docker tag to v41.43.2' (#57) from renovate/renovate-renovate-41.x into fresh-start
Reviewed-on: #57
2025-07-25 20:15:37 +00:00
0e1279473f Update renovate/renovate Docker tag to v41.43.2 2025-07-25 00:00:45 +00:00
718a0d7e33 add immich 2025-07-24 02:50:34 +02:00
9765f1cf86 add gemma3n 2025-07-23 23:46:44 +02:00
5f3a00b382 add qwen3 no thinking 2025-07-23 22:56:52 +02:00
b379c181f2 increase context size 2025-07-23 22:06:45 +02:00
e1801347f2 add qwen3 2025-07-23 20:15:37 +02:00
d53db88fd2 gpu offload in llama.cpp 2025-07-23 19:55:48 +02:00
5fb2bcfc7e add llama.cpp to librechat 2025-07-23 19:19:43 +02:00
f5da3b52a2 Merge pull request 'Update Helm release ollama to v1.24.0' (#53) from renovate/ollama-1.x into fresh-start
Reviewed-on: #53
2025-07-23 17:13:28 +00:00
c3dbb0a608 Merge pull request 'Update Helm release openbao to v0.16.2' (#52) from renovate/openbao-0.x into fresh-start
Reviewed-on: #52
2025-07-23 17:13:09 +00:00
a520c62277 Merge pull request 'Update renovate/renovate Docker tag to v41.42.9' (#51) from renovate/renovate-renovate-41.x into fresh-start
Reviewed-on: #51
2025-07-23 17:12:49 +00:00
6cf45eda17 Merge pull request 'Update Helm release cilium to v1.17.6' (#55) from renovate/cilium-1.x into fresh-start
Reviewed-on: #55
2025-07-23 17:12:35 +00:00
753d43b643 Merge pull request 'Update Helm release nginx-ingress to v2.2.1' (#54) from renovate/nginx-ingress-2.x into fresh-start
Reviewed-on: #54
2025-07-23 17:12:11 +00:00
263b60018d Merge pull request 'Update Helm release gitea to v12.1.2' (#56) from renovate/gitea-12.x into fresh-start
Reviewed-on: #56
2025-07-23 17:10:14 +00:00
0816b6e434 Update renovate/renovate Docker tag to v41.42.9 2025-07-23 00:01:56 +00:00
18eb912f03 llama-swap 2025-07-23 00:18:45 +02:00
a2c23c5f97 Update Helm release gitea to v12.1.2 2025-07-20 00:00:54 +00:00
15ce411c3e Update Helm release nginx-ingress to v2.2.1 2025-07-18 00:00:57 +00:00
04a8c98d63 Update Helm release cilium to v1.17.6 2025-07-17 00:00:45 +00:00
f46219f87e Update Helm release ollama to v1.24.0 2025-07-13 00:00:52 +00:00
53154eeed7 adjust motion masks 2025-07-10 22:06:58 +02:00
2ad310c550 Update Helm release openbao to v0.16.2 2025-07-10 00:00:37 +00:00
d32d94eb00 introduce person mask 2025-07-07 00:02:09 +02:00
5b62f7e386 Merge pull request 'Update renovate/renovate Docker tag to v41.23.1' (#48) from renovate/renovate-renovate-41.x into fresh-start
Reviewed-on: #48
2025-07-06 18:40:21 +00:00
52124193e2 Merge pull request 'Update Helm release ollama to v1.23.0' (#49) from renovate/ollama-1.x into fresh-start
Reviewed-on: #49
2025-07-06 18:40:12 +00:00
0f8ee9e53d Merge pull request 'Update Helm release cert-manager to v1.18.2' (#50) from renovate/cert-manager-1.x into fresh-start
Reviewed-on: #50
2025-07-06 18:40:05 +00:00
122c70d0a8 fix config validation error 2025-07-06 20:06:30 +02:00
5463d76771 run renovate once daily 2025-07-06 20:03:42 +02:00
60f2056806 update nix flake 2025-07-06 19:48:03 +02:00
6119ac7271 Update renovate/renovate Docker tag to v41.23.1 2025-07-06 17:00:32 +00:00
1a01f82e30 tune detection objects and retention 2025-07-06 18:58:29 +02:00
74c9ddad62 add motion mask on cameras 2025-07-06 18:15:41 +02:00
caf62609d3 Update Helm release ollama to v1.23.0 2025-07-05 05:00:41 +00:00
d5622416de Update Helm release cert-manager to v1.18.2 2025-07-02 14:00:59 +00:00
4183831d2f fix expanding volumes 2025-06-30 18:40:14 +02:00
ae6ed770a9 increase storage for recordings 2025-06-30 18:34:57 +02:00
59d936d467 enable audio in recordings frigate 2025-06-30 00:02:09 +02:00
9b56ce5e4f switch to openvino cpu detector 2025-06-29 22:44:17 +02:00
2424ad440b enable hwaccel in frigate 2025-06-29 20:33:42 +02:00
dff138ba31 use go2rtc restream to remove need for two streams from camera 2025-06-29 17:25:18 +02:00
d95eb6f4ab Configure frigate webrtc 2025-06-29 02:10:41 +02:00
5252f209f5 enable ingress to frigate 2025-06-29 01:14:26 +02:00
e7348b2718 add cameras to frigate 2025-06-29 00:34:01 +02:00
c7cd2c5355 add frigate nvr 2025-06-28 02:41:52 +02:00
71e75afadb Merge pull request 'Update Helm release cert-manager-webhook-ovh to v0.7.5' (#39) from renovate/cert-manager-webhook-ovh-0.x into fresh-start
Reviewed-on: #39
2025-06-28 00:06:37 +00:00
23169aa2ca Merge pull request 'Update Helm release cloudnative-pg to v0.24.0' (#38) from renovate/cloudnative-pg-0.x into fresh-start
Reviewed-on: #38
2025-06-28 00:06:30 +00:00
d8aa0a6a32 Merge pull request 'Update Helm release ollama to v1.21.0' (#40) from renovate/ollama-1.x into fresh-start
Reviewed-on: #40
2025-06-27 23:59:33 +00:00
a6630c0376 fix openbao injector not starting 2025-06-28 01:57:25 +02:00
9056839784 Merge pull request 'Update Helm release openbao to v0.16.1' (#41) from renovate/openbao-0.x into fresh-start
Reviewed-on: #41
2025-06-27 23:47:11 +00:00
1f8afa2f8e Merge pull request 'Update Helm release cert-manager to v1.18.1' (#42) from renovate/cert-manager-1.x into fresh-start
Reviewed-on: #42
2025-06-27 23:46:30 +00:00
e7b22509cd Merge pull request 'Update renovate/renovate Docker tag to v41' (#47) from renovate/renovate-renovate-41.x into fresh-start
Reviewed-on: #47
2025-06-27 23:42:09 +00:00
e39574b60e Update renovate/renovate Docker tag to v41 2025-06-27 23:41:30 +00:00
197ceb6688 fix openebs after update 2025-06-28 01:37:40 +02:00
3e95a5edd1 Merge pull request 'Update Helm release openebs to v4.3.2' (#43) from renovate/openebs-4.x into fresh-start
Reviewed-on: #43
2025-06-27 21:38:27 +00:00
10fe51f52d Merge pull request 'Update registry.k8s.io/coredns/coredns Docker tag to v1.12.2' (#44) from renovate/registry.k8s.io-coredns-coredns-1.x into fresh-start
Reviewed-on: #44
2025-06-27 21:33:44 +00:00
e197cf5e5e Merge pull request 'Update Helm release gitea to v12.1.1' (#45) from renovate/gitea-12.x into fresh-start
Reviewed-on: #45
2025-06-27 21:31:42 +00:00
c54109dbf3 Merge pull request 'Update Helm release cilium to v1.17.5' (#46) from renovate/cilium-1.x into fresh-start
Reviewed-on: #46
2025-06-27 21:29:36 +00:00
5a97e4b1d8 Update Helm release openebs to v4.3.2 2025-06-27 20:28:16 +00:00
12cdfd96e2 Update Helm release openbao to v0.16.1 2025-06-27 20:28:13 +00:00
7d2056b3ee Update Helm release ollama to v1.21.0 2025-06-27 20:28:09 +00:00
35e579fc01 Update Helm release gitea to v12.1.1 2025-06-27 20:28:01 +00:00
302613b76a Update Helm release cert-manager to v1.18.1 2025-06-27 20:27:56 +00:00
89542df777 Update Helm release cilium to v1.17.5 2025-06-27 20:27:51 +00:00
233466e2cd Update Helm release cert-manager-webhook-ovh to v0.7.5 2025-06-27 20:27:48 +00:00
461f0589b3 Update registry.k8s.io/coredns/coredns Docker tag to v1.12.2 2025-06-16 09:00:47 +00:00
5c16cd3a4b Update Helm release cloudnative-pg to v0.24.0 2025-05-23 14:00:45 +00:00
5cd5263d19 Merge pull request 'Update Helm release cilium to v1.17.4' (#34) from renovate/cilium-1.x into fresh-start
Reviewed-on: #34
2025-05-17 22:00:56 +00:00
a886e7c79c Merge pull request 'Update renovate/renovate Docker tag to v40.14.3' (#33) from renovate/renovate-renovate-40.x into fresh-start
Reviewed-on: #33
2025-05-17 22:00:49 +00:00
dd676716f9 fix valkey persistence in gitea chart 2025-05-17 23:54:04 +02:00
110ffa9c22 Merge pull request 'Update Helm release gitea to v12' (#35) from renovate/gitea-12.x into fresh-start
Reviewed-on: #35
2025-05-17 21:46:57 +00:00
6ed7d61e21 rename mentions of redis to valkey in gitea 2025-05-17 23:46:35 +02:00
051083cd6e Merge pull request 'Update Helm release ollama to v1.17.0' (#36) from renovate/ollama-1.x into fresh-start
Reviewed-on: #36
2025-05-17 21:40:40 +00:00
87f2446cd1 move ollama api key to valut 2025-05-17 23:32:33 +02:00
faa55fa069 move ovh cert-manager secret to vault 2025-05-17 23:12:42 +02:00
af29de91d6 move renovate gitea token to vault 2025-05-17 22:58:43 +02:00
5f3a775201 move some settings of renovate to configmap 2025-05-17 22:45:43 +02:00
81f750e5e5 Update renovate/renovate Docker tag to v40.14.3 2025-05-17 19:00:49 +00:00
641e50b5e9 Update Helm release ollama to v1.17.0 2025-05-17 03:00:44 +00:00
3fe8626391 Update Helm release gitea to v12 2025-05-16 14:00:56 +00:00
94f851c607 Update Helm release cilium to v1.17.4 2025-05-15 19:00:42 +00:00
d2134ad554 Merge pull request 'Update renovate/renovate Docker tag to v40.11.6' (#32) from renovate/renovate-renovate-40.x into fresh-start
Reviewed-on: #32
2025-05-12 00:16:15 +00:00
22910085b7 add vault secret of gitea backups 2025-05-12 02:08:32 +02:00
6a4dee0852 add vault secrets operator 2025-05-12 02:05:36 +02:00
49d5803b4f add external-secrets 2025-05-12 00:42:56 +02:00
b5c51f6720 Update renovate/renovate Docker tag to v40.11.6 2025-05-11 11:00:42 +00:00
3a8dbc6e0c Merge pull request 'Update Helm release ollama to v1.16.0' (#30) from renovate/ollama-1.x into fresh-start
Reviewed-on: #30
2025-05-10 00:13:08 +00:00
ead8be8bcb Merge pull request 'Update Helm release cert-manager to v1.17.2' (#28) from renovate/cert-manager-1.x into fresh-start
Reviewed-on: #28
2025-05-10 00:13:02 +00:00
f027dad029 Merge pull request 'Update caddy Docker tag to v2.10.0' (#26) from renovate/caddy-2.x into fresh-start
Reviewed-on: #26
2025-05-10 00:12:41 +00:00
e35b8ccac8 Merge pull request 'Update Helm release librechat to v1.8.10' (#29) from renovate/librechat-1.x into fresh-start
Reviewed-on: #29
2025-05-10 00:12:32 +00:00
f69128b245 Merge pull request 'Update renovate/renovate Docker tag to v40' (#31) from renovate/renovate-renovate-40.x into fresh-start
Reviewed-on: #31
2025-05-10 00:12:02 +00:00
d14b62f384 pin cores to minimum frequency 2025-05-10 01:43:20 +02:00
ab7b8a6f26 Update renovate/renovate Docker tag to v40 2025-05-09 13:00:22 +00:00
8acc480b05 Update Helm release ollama to v1.16.0 2025-05-06 02:00:31 +00:00
65834037ee Update Helm release librechat to v1.8.10 2025-04-24 19:00:25 +00:00
1bf63168f2 Update Helm release cert-manager to v1.17.2 2025-04-24 12:00:33 +00:00
b3db332075 Update caddy Docker tag to v2.10.0 2025-04-22 01:00:33 +00:00
b84c792992 add basedpyright and make it happy 2025-04-22 02:42:16 +02:00
947f154a81 use nix provided python as default interpreter 2025-04-21 23:01:58 +02:00
1a88b1c602 synchronize kubernetes auth method in recoincile script 2025-04-21 22:09:13 +02:00
55fce1fc36 gitea switch to database from cloudnativepg 2025-04-21 21:16:02 +02:00
bb4afc0c07 increase ollama proxy-read-timeout on ingress 2025-04-21 19:59:03 +02:00
eb92a85cac fix apps kustomization 2025-04-21 17:54:30 +02:00
8f70ae5f2e Merge pull request 'Update renovate/renovate Docker tag to v39.253.2' (#22) from renovate/renovate-renovate-39.x into fresh-start
Reviewed-on: #22
2025-04-21 15:52:55 +00:00
f89a2fd1cc Merge pull request 'Update Helm release cilium to v1.17.3' (#23) from renovate/cilium-1.x into fresh-start
Reviewed-on: #23
2025-04-21 15:52:34 +00:00
b493ee9d77 Merge pull request 'Update Helm release nginx-ingress to v2.1.0' (#25) from renovate/nginx-ingress-2.x into fresh-start
Reviewed-on: #25
2025-04-21 15:52:19 +00:00
8de0663571 Merge pull request 'Update Helm release openbao to v0.12.0' (#24) from renovate/openbao-0.x into fresh-start
Reviewed-on: #24
2025-04-21 15:52:09 +00:00
3fc534f44b remove gpt-researcher 2025-04-21 17:48:08 +02:00
1c8ccd0fc4 Update renovate/renovate Docker tag to v39.253.2 2025-04-21 10:00:40 +00:00
847fd3557b use tavily and openrouter in gpt researcher 2025-04-20 03:06:46 +02:00
d2c2f5038f change models used by gpt-researcher 2025-04-20 00:19:34 +02:00
afb9dcec65 enable support for websockets for researcher 2025-04-19 05:21:29 +02:00
ba51980cec use our own image for gpt researcher 2025-04-19 04:49:55 +02:00
e0eb26b63d add docker registry 2025-04-19 04:43:27 +02:00
eda5ba08a0 add gpt-researcher 2025-04-19 04:07:21 +02:00
318aedf89d update network config 2025-04-17 22:35:53 +02:00
7b9090afc1 Update Helm release nginx-ingress to v2.1.0 2025-04-16 15:00:30 +00:00
a109290c18 increase ollama proxy timeout 2025-04-15 23:28:03 +02:00
f4b9742ab1 Update Helm release openbao to v0.12.0 2025-04-15 20:00:29 +00:00
b103358816 Update Helm release cilium to v1.17.3 2025-04-14 21:00:32 +00:00
46cacb339d Merge pull request 'Update renovate/renovate Docker tag to v39.240.1' (#18) from renovate/renovate-renovate-39.x into fresh-start
Reviewed-on: #18
2025-04-13 00:13:01 +00:00
1e7dd52721 Merge pull request 'Update Helm release ollama to v1.14.0' (#19) from renovate/ollama-1.x into fresh-start
Reviewed-on: #19
2025-04-13 00:12:53 +00:00
044cc37392 Merge pull request 'Update registry.k8s.io/coredns/coredns Docker tag to v1.12.1' (#20) from renovate/registry.k8s.io-coredns-coredns-1.x into fresh-start
Reviewed-on: #20
2025-04-13 00:07:17 +00:00
68ba891abc Merge pull request 'Update Helm release community-operator to v0.13.0' (#21) from renovate/community-operator-0.x into fresh-start
Reviewed-on: #21
2025-04-13 00:07:04 +00:00
81ed455ff8 Update renovate/renovate Docker tag to v39.240.1 2025-04-12 19:00:28 +00:00
b7c2da4419 Update Helm release community-operator to v0.13.0 2025-04-11 19:00:24 +00:00
4bc01e2e78 disable proxy bufferring in ollama ingress 2025-04-11 03:24:45 +02:00
94d51de471 Update registry.k8s.io/coredns/coredns Docker tag to v1.12.1 2025-04-08 20:00:30 +00:00
dc0104c55d Update Helm release ollama to v1.14.0 2025-04-08 13:00:44 +00:00
83be6619e8 deploy gitea postgres cluster 2025-04-05 22:34:57 +02:00
48ccacefdd Fix librechat kustomization typo 2025-04-05 22:12:40 +02:00
cfeef90515 Split renovate deployment to files 2025-04-05 22:11:37 +02:00
ce0bef4970 Split librechat deployment to files 2025-04-05 22:09:59 +02:00
bd5fd97ed0 split ollama deployment to files 2025-04-05 22:08:02 +02:00
52641779bc split gitea deployment to files 2025-04-05 22:01:53 +02:00
e98e02705d Move gitea kustomization to subdir 2025-04-05 20:22:29 +02:00
3c849f52f7 install cloudnativepg 2025-04-05 20:05:54 +02:00
36187fff41 Merge pull request 'Update renovate/renovate Docker tag to v39.233.3' (#15) from renovate/renovate-renovate-39.x into fresh-start
Reviewed-on: #15
2025-04-05 13:37:14 +00:00
1ac7504585 Merge pull request 'Update Helm release community-operator to v0.12.1' (#16) from renovate/community-operator-0.x into fresh-start
Reviewed-on: #16
2025-04-05 13:36:59 +00:00
879c013e89 Merge pull request 'Update Helm release ollama to v1.13.0' (#17) from renovate/ollama-1.x into fresh-start
Reviewed-on: #17
2025-04-05 13:36:35 +00:00
aa7fe8d3cf enable search in librechat 2025-04-05 03:56:02 +02:00
fd280f1fca add ingress to librechat 2025-04-05 03:54:11 +02:00
2ad381e35c Install librechat from different chart 2025-04-05 02:59:41 +02:00
e63a285dc3 Remove old librechat deployment 2025-04-04 23:01:49 +02:00
5336df3134 Update renovate/renovate Docker tag to v39.233.3 2025-04-04 12:00:48 +00:00
966639e3c8 Update Helm release ollama to v1.13.0 2025-04-04 04:00:32 +00:00
97924a8064 Update Helm release community-operator to v0.12.1 2025-04-01 09:00:25 +00:00
37b78f079e Add librechat 2025-04-01 02:55:59 +02:00
0d17825eab Add mongodb database for librechat 2025-04-01 00:35:50 +02:00
ffeecf65f6 Mongodb operator 2025-03-31 23:38:58 +02:00
fea49ae167 Merge pull request 'Update renovate/renovate Docker tag to v39.221.0' (#14) from renovate/renovate-renovate-39.x into fresh-start
Reviewed-on: #14
2025-03-30 16:31:27 +00:00
6b6e7937c1 Update renovate/renovate Docker tag to v39.221.0 2025-03-30 13:00:33 +00:00
487baa2813 vulkan support in ollama 2025-03-30 03:05:51 +02:00
fe2f79d13c Disable flux network policy 2025-03-29 23:12:35 +01:00
c3a747c03c Merge pull request 'Update renovate/renovate Docker tag to v39.220.4' (#12) from renovate/renovate-renovate-39.x into fresh-start
Reviewed-on: #12
2025-03-29 22:10:11 +00:00
f1f6ffb9a0 Merge pull request 'Update Helm release ollama to v1.12.0' (#13) from renovate/ollama-1.x into fresh-start
Reviewed-on: #13
2025-03-29 22:10:03 +00:00
e851f6ab8c Update Helm release ollama to v1.12.0 2025-03-29 17:00:29 +00:00
2ecd20c9d7 Update renovate/renovate Docker tag to v39.220.4 2025-03-29 14:00:39 +00:00
bdb3bd3234 Ollama proxy fix secret ref 2025-03-27 01:47:23 +01:00
47e957e444 add cert-manager annotation to ollama ingress 2025-03-27 01:34:23 +01:00
b2dfb2dc0b disable https for caddy 2025-03-27 01:32:37 +01:00
6ccc964c87 add ollama proxy and ingress 2025-03-27 01:30:12 +01:00
5c7b258ccf Merge pull request 'Update renovate/renovate Docker tag to v39.218.1' (#10) from renovate/renovate-renovate-39.x into fresh-start
Reviewed-on: #10
2025-03-26 23:13:23 +00:00
351426f055 Merge pull request 'Update Helm release gitea to v11.0.1' (#11) from renovate/gitea-11.x into fresh-start
Reviewed-on: #11
2025-03-26 23:12:11 +00:00
ca598f9750 Update Helm release gitea to v11.0.1 2025-03-26 18:00:58 +00:00
0cb93ce8a1 Update renovate/renovate Docker tag to v39.218.1 2025-03-26 17:00:31 +00:00
6fde991ba9 add ollama deployment 2025-03-26 02:17:53 +01:00
5f3840cc02 Reapply "Merge pull request 'Update Helm release gitea to v11' (#9) from renovate/gitea-11.x into fresh-start"
This reverts commit d9a22723ef.
2025-03-26 01:48:36 +01:00
d9a22723ef Revert "Merge pull request 'Update Helm release gitea to v11' (#9) from renovate/gitea-11.x into fresh-start"
This reverts commit f97a655ad5, reversing
changes made to f36ce88026.
2025-03-26 01:16:23 +01:00
f97a655ad5 Merge pull request 'Update Helm release gitea to v11' (#9) from renovate/gitea-11.x into fresh-start
Reviewed-on: #9
2025-03-26 00:07:23 +00:00
c2aacd0ef4 Remove custom gitea tag from values 2025-03-26 01:06:24 +01:00
f36ce88026 Merge pull request 'Update Helm release openebs to v4.2.0' (#7) from renovate/openebs-4.x into fresh-start
Reviewed-on: #7
2025-03-26 00:01:50 +00:00
d19d332b59 Merge pull request 'Update renovate/renovate Docker tag to v39.216.1' (#8) from renovate/renovate-renovate-39.x into fresh-start
Reviewed-on: #8
2025-03-26 00:00:00 +00:00
5cf9de7997 renovate improve yaml matching 2025-03-26 00:58:03 +01:00
3c84632a2d Merge pull request 'Update Helm release openbao to v0.10.1' (#6) from renovate/openbao-0.x into fresh-start
Reviewed-on: #6
2025-03-25 23:54:58 +00:00
14bcc8546c Merge pull request 'Update Helm release k8up to v4.8.4' (#4) from renovate/k8up-4.x into fresh-start
Reviewed-on: #4
2025-03-25 23:53:54 +00:00
ca8a63fdbe Merge pull request 'Update Helm release cert-manager to v1.17.1' (#3) from renovate/cert-manager-1.x into fresh-start
Reviewed-on: #3
2025-03-25 23:44:47 +00:00
3a46d17f02 Update Helm release gitea to v11 2025-03-25 23:42:27 +00:00
add851ee9e Update renovate/renovate Docker tag to v39.216.1 2025-03-25 23:42:26 +00:00
edbfd26bde Update Helm release openebs to v4.2.0 2025-03-25 23:42:25 +00:00
dea0dfb7cc Update Helm release openbao to v0.10.1 2025-03-25 23:42:24 +00:00
874fc826cd Update Helm release k8up to v4.8.4 2025-03-25 23:42:21 +00:00
33cb5c72c7 Update Helm release cert-manager to v1.17.1 2025-03-25 23:42:20 +00:00
31df54fcf0 Merge pull request 'Configure Renovate' (#2) from renovate/configure into fresh-start
Reviewed-on: #2
2025-03-25 23:41:34 +00:00
169 changed files with 901 additions and 4931 deletions

12
.envrc
View File

@@ -1,12 +0,0 @@
#!/usr/bin/env bash
export DIRENV_WARN_TIMEOUT=20s
eval "$(devenv direnvrc)"
# `use devenv` supports the same options as the `devenv shell` command.
#
# To silence all output, use `--quiet`.
#
# Example usage: use devenv --quiet --impure --option services.postgres.enable:bool true
use devenv

13
.gitignore vendored
View File

@@ -1,13 +1,2 @@
secrets.yaml
talos/generated
# Devenv
.devenv*
devenv.local.nix
devenv.local.yaml
# direnv
.direnv
# pre-commit
.pre-commit-config.yaml
.opencode
talos/generated

3
.gitmodules vendored Normal file
View File

@@ -0,0 +1,3 @@
[submodule "openwrt/roles/ansible-openwrt"]
path = openwrt/roles/ansible-openwrt
url = https://github.com/gekmihesg/ansible-openwrt.git

View File

@@ -1,8 +1,7 @@
{
"recommendations": [
"arrterian.nix-env-selector",
"jnoortheen.nix-ide",
"detachhead.basedpyright",
"mkhl.direnv",
"mermaidchart.vscode-mermaid-chart"
"detachhead.basedpyright"
]
}

View File

@@ -1,4 +1,13 @@
{
"nixEnvSelector.nixFile": "${workspaceFolder}/shell.nix",
"terminal.integrated.profiles.linux": {
"Nix Shell": {
"path": "nix",
"args": ["develop"],
"icon": "terminal-linux"
}
},
"terminal.integrated.defaultProfile.linux": "Nix Shell",
"ansible.python.interpreterPath": "/bin/python",
"python.defaultInterpreterPath": "${env:PYTHON_BIN}"
}

View File

@@ -1,49 +0,0 @@
when:
- event: push
branch: fresh-start
skip_clone: true
steps:
- name: Get kubernetes access from OpenBao
image: quay.io/openbao/openbao:2.5.2
environment:
VAULT_ADDR: https://openbao.lumpiasty.xyz:8200
ROLE_ID:
from_secret: flux_reconcile_role_id
SECRET_ID:
from_secret: flux_reconcile_secret_id
commands:
- bao write -field token auth/approle/login
role_id=$ROLE_ID
secret_id=$SECRET_ID > /woodpecker/.vault_id
- export VAULT_TOKEN=$(cat /woodpecker/.vault_id)
- bao write -format json -f /kubernetes/creds/flux-reconcile > /woodpecker/kube_credentials
- name: Construct Kubeconfig
image: alpine/k8s:1.32.13
environment:
KUBECONFIG: /woodpecker/kubeconfig
commands:
- kubectl config set-cluster cluster
--server=https://$KUBERNETES_SERVICE_HOST
--certificate-authority=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt
- kubectl config set-credentials cluster
--token=$(jq -r .data.service_account_token /woodpecker/kube_credentials)
- kubectl config set-context cluster
--cluster cluster
--user cluster
--namespace flux-system
- kubectl config use-context cluster
- name: Reconcile git source
image: ghcr.io/fluxcd/flux-cli:v2.8.3
environment:
KUBECONFIG: /woodpecker/kubeconfig
commands:
- flux reconcile source git flux-system
- name: Invalidate OpenBao token
image: quay.io/openbao/openbao:2.5.2
environment:
VAULT_ADDR: https://openbao.lumpiasty.xyz:8200
commands:
- export VAULT_TOKEN=$(cat /woodpecker/.vault_id)
- bao write -f auth/token/revoke-self

View File

@@ -1,7 +1,3 @@
SHELL := /usr/bin/env bash
.PHONY: install-router gen-talos-config apply-talos-config get-kubeconfig
install-router:
ansible-playbook ansible/playbook.yml -i ansible/hosts
@@ -24,6 +20,3 @@ gen-talos-config:
apply-talos-config:
talosctl -n anapistula-delrosalae apply-config -f talos/generated/anapistula-delrosalae.yaml
get-kubeconfig:
talosctl -n anapistula-delrosalae kubeconfig talos/generated/kubeconfig

363
README.md
View File

@@ -1,293 +1,106 @@
# Homelab
This repo contains configuration and documentation for my homelab setup, which is based on Talos OS for Kubernetes cluster and MikroTik router.
## Goals
## Architecture
Wanting to set up homelab kubernetes cluster.
Physical setup consists of MikroTik router which connects to the internet and serves as a gateway for the cluster and other devices in the home network as shown in the diagram below.
### Software
```mermaid
%%{init: {"flowchart": {"ranker": "tight-tree"}}}%%
flowchart TD
subgraph internet[Internet]
ipv4[IPv4 Internet]
ipv6[IPv6 Internet]
he_tunnel[Hurricane Electric IPv6 Tunnel Broker]
isp[ISP]
end
subgraph home[Home network]
router[MikroTik Router]
cluster[Talos cluster]
lan[LAN]
mgmt[Management network]
cam[Camera system]
router --> lan
router --> cluster
router --> mgmt
router --> cam
end
ipv4 -- "Public IPv4 address" --> isp
ipv6 -- "Routed /48 IPv6 prefix" --> he_tunnel -- "6in4 Tunnel" --> isp
isp --> router
```
Devices are separated into VLANs and subnets for isolation and firewalling between devices and services. Whole internal network is configured to eliminate NAT where unnecessary. Pods on the Kubernetes cluster communicate with the router using native IP routing, there is no encapsulation, overlay network nor NAT on the nodes. Router knows where to direct packets destined for the pods because the cluster announces its IP prefixes to the router using BGP. Router also performs NAT for IPv4 traffic from the cluster to and from the internet, while IPv6 traffic is routed directly to the internet without NAT. High level logical routing diagram is shown below.
```mermaid
flowchart TD
isp[ISP] --- gpon
subgraph device[MikroTik CRS418-8P-8G-2s+]
direction TB
gpon[SFP GPON ONU]
pppoe[PPPoE client]
he_tunnel[HE Tunnel]
router[Router]@{ shape: cyl }
dockers["""
Dockers Containers (bridge)
2001:470:61a3:500::/64
172.17.0.0/16
"""]@{ shape: cloud }
tailscale["Tailscale Container"]
lan["""
LAN (vlan2)
2001:470:61a3::/64
192.168.0.0/24
"""]@{ shape: cloud }
mgmt["""
Management network (vlan1)
192.168.255.0/24
"""]@{ shape: cloud }
cam["""
Camera system (vlan3)
192.168.3.0/24
"""]@{ shape: cloud }
cluster["""
Kubernetes cluster (vlan4)
2001:470:61a3:100::/64
192.168.1.0/24
"""]@{ shape: cloud }
gpon --- pppoe -- """
139.28.40.212
Default IPv4 gateway
""" --- router
pppoe --- he_tunnel -- """
2001:470:61a3:: incoming
Default IPv6 gateway
""" --- router
router -- """
2001:470:61a3:500:ffff:ffff:ffff:ffff
172.17.0.1/16
""" --- dockers --- tailscale
router -- """
2001:470:61a3:0:ffff:ffff:ffff:ffff
192.168.0.1
"""--- lan
router -- """
192.168.255.10
"""--- mgmt
router -- "192.168.3.1" --- cam
router -- """
2001:470:61a3:100::1
192.168.1.1
""" --- cluster
end
subgraph k8s[K8s cluster]
direction TB
pod_network["""
Pod networks
2001:470:61a3:200::/104
10.42.0.0/16
(Dynamically allocated /120 IPv6 and /24 IPv4 prefixes per node)
"""]@{ shape: cloud }
service_network["""
Service network
2001:470:61a3:300::/112
10.43.0.0/16
(Advertises vIP addresses via BGP from nodes hosting endpoints)
"""]@{ shape: cloud }
load_balancer["""
Load balancer network
2001:470:61a3:400::/112
10.44.0.0/16
(Advertises vIP addresses via BGP from nodes hosting endpoints)
"""]@{ shape: cloud }
end
cluster -- "Routes exported via BGP" ----- k8s
```
Currently the k8s cluster consists of single node (hostname anapistula-delrosalae), which is a PC with Ryzen 5 3600, 64GB RAM, RX 580 8GB (for accelerating LLMs), 1TB NVMe SSD, 2TB and 3TB HDDs and serves both as control plane and worker node.
## Software stack
The cluster itself is based on [Talos Linux](https://www.talos.dev/) (which is also a Kubernetes distribution) and uses [Cilium](https://cilium.io/) as CNI, IPAM, kube-proxy replacement, Load Balancer, and BGP control plane. Persistent volumes are managed by [OpenEBS LVM LocalPV](https://openebs.io/docs/user-guides/local-storage-user-guide/local-pv-lvm/lvm-overview). Applications are deployed using GitOps (this repo) and reconciled on cluster using [Flux](https://fluxcd.io/). Git repository is hosted on [Gitea](https://gitea.io/) running on a cluster itself. Secets are kept in [OpenBao](https://openbao.org/) (HashiCorp Vault fork) running on a cluster and synced to cluster objects using [Vault Secrets Operator](https://github.com/hashicorp/vault-secrets-operator). Deployments are kept up to date using self hosted [Renovate](https://www.mend.io/renovate/) bot updating manifests in the Git repository. There is a [Woodpecker](https://woodpecker-ci.org/) instance watching repositories on Gitea and scheduling jobs on cluster. Incoming HTTP traffic is routed to cluster using [Nginx Ingress Controller](https://kubernetes.github.io/ingress-nginx/) and certificates are issued by [cert-manager](https://cert-manager.io/) with [Let's Encrypt](https://letsencrypt.org/) ACME issuer with [cert-manager-webhook-ovh](https://github.com/aureq/cert-manager-webhook-ovh) resolving DNS-01 challanges. Cluster also runs [CloudNativePG](https://cloudnative-pg.io/) operator for managing PostgreSQL databases. Router is running [Mikrotik RouterOS](https://help.mikrotik.com/docs/spaces/ROS/pages/328059/RouterOS) and its configuration is managed via [Ansible](https://docs.ansible.com/) playbook in this repo. High level core cluster software architecture is shown on the diagram below.
> Talos Linux is an immutable Linux distribution purpose-built for running Kubernetes. The OS is distributed as an OCI (Docker) image and does not contain any package manager, shell, SSH, or any other tools for managing the system. Instead, all operations are performed using API, which can be accessed using `talosctl` CLI tool.
```mermaid
flowchart TD
router[MikroTik Router]
router -- "Routes HTTP traffic" --> nginx
cilium -- "Announces routes via BGP" --> router
subgraph cluster[K8s cluster]
direction TB
flux[Flux CD] -- "Reconciles manifests" --> kubeapi[Kube API Server]
flux -- "Fetches Git repo" --> gitea[Gitea]
kubeapi -- "Configs, Services, Pods" --> cilium[Cilium]
cilium -- "Routing" --> services[Services] -- "Endpoints" --> pods[Pods]
cilium -- "Configures routing, interfaces, IPAM" --> pods[Pods]
kubeapi -- "Ingress rules" --> nginx[NGINX Ingress Controller] -- "Routes HTTP traffic" ---> pods
kubeapi -- "Certificate requests" --> cert_manager[cert-manager] -- "Provides certificates" --> nginx
cert_manager -- "ACME DNS-01 challanges" --> dns_webhook[cert-manager-webhook-ovh] -- "Resolves DNS challanges" --> ovh[OVH DNS]
cert_manager -- "Requests DNS-01 challanges" --> acme[Let's Encrypt ACME server] -- "Verifies domain ownership" --> ovh
kubeapi -- "Assigns pods" --> kubelet[Kubelet] -- "Manages" --> pods
kubeapi -- "PVs, LvmVols" --> openebs[OpenEBS LVM LocalPV]
openebs -- "Mounts volumes" --> pods
openebs -- "Manages" --> lv[LVM LVs]
kubeapi -- "Gets Secret refs" --> vault_operator[Vault Secrets Operator] -- "Syncs secrets" --> kubeapi
vault_operator -- "Retrieves secrets" --> vault[OpenBao] -- "Secret storage" --> lv
vault -- "Auth method" --> kubeapi
gitea -- "Receives events" --> woodpecker[Woodpecker CI] -- "Schedules jobs" --> kubeapi
gitea -- "Stores repositories" --> lv
gitea--> renovate[Renovate Bot] -- "Updates manifests" --> gitea
end
```
### Reconcilation paths of each component
- Kubernetes manifests are reconciled using Flux triggerred by Woodpecker CI on push
- RouterOS configs are applied by Ansible <!-- ran by Gitea Action on push -->
- Talos configs are applied using makefile <!-- switch to ansible and trigger on action push -->
- Vault policies are applied by running `synchronize-vault.py` <!-- triggerred by Gitea action on push -->
<!-- - Docker images are built and pushed to registry by Gitea Actions on push -->
<!-- TODO: Backups, monitoring, logging, deployment with ansible etc -->
## Software
1. Running applications
1. NAS, backups, security recorder
2. Online presence, website, email, communicators (ts3, matrix?)
3. Git server, container registry
4. Environment to deploy my own apps
5. Some LLM server, apps for my own use
6. Public services like Tor, mirrors of linux distros etc.
7. [Some frontends](https://libredirect.github.io/)
8. [Awesome-Selfhosted](https://github.com/awesome-selfhosted/awesome-selfhosted), [Awesome Sysadmin](https://github.com/awesome-foss/awesome-sysadmin)
2. Managing them hopefully using GitOps
1. FluxCD, Argo etc.
2. State of cluster in git, all apps version pinned
3. Some bot to inform about updates?
3. It's a home**lab**
1. Should be open to experimenting
2. Avoiding vendor lock-in, changing my mind shouldn't block me for too long
3. Backups of important data in easy to access format
4. Expecting downtime, no critical workloads
5. Trying to keep it reasonably up anyways
### Infrastructure
### Operating systems
1. Using commodity hardware
2. Reasonably scalable
3. Preferably mobile workloads, software should be a bit more flexible than me moving disks and data
4. Replication is overkill for most data
5. Preferably dynamically configured network
1. BGP with OpenWRT router
2. Dynamically allocated host subnets
3. Load-balancing (MetalLB?), ECMP on router
4. Static IP configurations on nodes
6. IPv6 native, IPv4 accessible
1. IPv6 has whole block routed to us which gives us control over address routing and usage
2. Which allows us to expose services directly to the internet without complex router config
3. Which allows us to use eg. ExternalDNS to autoconfigure domain names for LB
4. But majority of the world still runs IPv4, which should be supported for public services
5. Exposing IPv4 service may require additional reconfiguration of router, port forwarding, manual domain setting or controller doing this some day in future
6. One public IPv4 address means probably extensive use of rule-based ingress controllers
7. IPv6 internet from pods should not be NATed
8. IPv4 internet from pods should be NATed by router
| Logo | Name | Description |
|------|------|-------------|
| <img src="docs/assets/talos.svg" alt="Talos Linux" height="50" width="50"> | Talos Linux | Kubernetes distribution and operating system for cluster nodes |
| <img src="docs/assets/mikrotik.svg" alt="MikroTik RouterOS" height="50" width="50"> | MikroTik RouterOS | Router operating system for MikroTik devices |
### Current implementation idea
### Configuration management
1. Cluster server nodes running Talos
2. OpenWRT router
1. VLAN / virtual interface, for cluster
2. Configuring using Ansible
3. Peering with cluster using BGP
4. Load-balancing using ECMP
3. Cluster networking
1. Cilium CNI
2. Native routing, no encapsulation or overlay
3. Using Cilium's network policies for firewall needs
4. IPv6 address pool
1. Nodes: 2001:470:61a3:100::/64
2. Pods: 2001:470:61a3:200::/64
3. Services: 2001:470:61a3:300::/112
4. Load balancer: 2001:470:61a3:400::/112
5. IPv4 address pool
1. Nodes: 192.168.1.32/27
2. Pods: 10.42.0.0/16
3. Services: 10.43.0.0/16
4. Load balancer: 10.44.0.0/16
4. Storage
1. OS is installed on dedicated disk
2. Mayastor managing all data disks
1. DiskPool for each data disk in cluster, labelled by type SSD or HDD
2. Creating StorageClass for each topology need (type, whether to replicate, on which node etc.)
| Logo | Name | Description |
|------|------|-------------|
| <img src="docs/assets/flux.svg" alt="Flux CD" height="50" width="50"> | Flux CD | GitOps operator for reconciling cluster state with Git repository |
| <img src="docs/assets/ansible.svg" alt="Ansible" height="50" width="50"> | Ansible | Configuration management and automation tool |
| | Vault Secrets Operator | Kubernetes operator for syncing secrets from OpenBao/Vault to Kubernetes |
## Working with repo
### Networking
Repo is preconfigured to use with nix and vscode
| Logo | Name | Description |
|------|------|-------------|
| <img src="docs/assets/cilium.svg" alt="Cilium" height="50" width="50"> | Cilium | CNI, BGP control plane, kube-proxy replacement and Load Balancer for cluster networking |
| <img src="docs/assets/nginx.svg" alt="Nginx" height="50" width="50"> | Nginx Ingress Controller | Ingress controller for routing external traffic to services in the cluster |
| <img src="docs/assets/cert-manager.svg" alt="cert-manager" height="50" width="50"> | cert-manager | Automatic TLS certificate management |
Install nix, vscode should pick up settings and launch terminals in `nix develop` with all needed utils.
### Storage
## Bootstrapping cluster
| Logo | Name | Description |
|------|------|-------------|
| <img src="docs/assets/openebs.svg" alt="OpenEBS" height="50" width="50"> | OpenEBS LVM LocalPV | Container Storage Interface for managing persistent volumes on local LVM pools |
| <img src="docs/assets/openbao.svg" alt="OpenBao" height="50" width="50"> | OpenBao | Secret storage (HashiCorp Vault compatible) |
| <img src="docs/assets/cloudnativepg.svg" alt="CloudNativePG" height="50" width="50"> | CloudNativePG | PostgreSQL operator for managing PostgreSQL instances |
1. Configure OpenWRT, create dedicated interface for connecting server
1. Set up node subnet, routing
2. Create static host entry `kube-api.homelab.lumpiasty.xyz` pointing at ipv6 of first node
2. Connect server
3. Grab Talos ISO, dd it to usb stick
4. Boot it and using keyboard set up static ip ipv6 subnet, should become reachable from pc
5. `talosctl gen config homelab https://kube-api.homelab.lumpiasty.xyz:6443`
6. Generate secrets `talosctl gen secrets`, **backup, keep `secrets.yml` safe**
7. Generate config files `make gen-talos-config`
8. Apply config to first node `talosctl apply-config --insecure -n 2001:470:61a3:100::2 -f controlplane.yml`
9. Wait for reboot then `talosctl bootstrap --talosconfig=talosconfig -n 2001:470:61a3:100::2`
10. Set up router and CNI
### Development tools
## Updating Talos config
| Logo | Name | Description |
|------|------|-------------|
| <img src="docs/assets/devenv.svg" alt="devenv" height="50" width="50"> | devenv | Tool for declarative managment of development environment using Nix |
| <img src="docs/assets/renovate.svg" alt="Renovate" height="50" width="50"> | Renovate | Bot for keeping dependencies up to date |
| <img src="docs/assets/woodpecker.svg" alt="Woodpecker" height="50" width="50"> | Woodpecker CI | Continous Integration system |
Update patches and re-generate and apply configs.
### AI infrastructure
| Logo | Name | Address | Description |
|------|------|---------|-------------|
| <img src="docs/assets/llama-cpp.svg" alt="LLaMA.cpp" height="50" width="50"> | LLaMA.cpp | https://llama.lumpiasty.xyz/ | LLM inference server running local models with GPU acceleration |
### Applications/Services
| Logo | Name | Address | Description |
|------|------|---------|-------------|
| <img src="docs/assets/gitea.svg" alt="Gitea" height="50" width="50"> | Gitea | https://gitea.lumpiasty.xyz/ | Private Git repository hosting and artifact storage (Docker, Helm charts) |
| <img src="docs/assets/open-webui.png" alt="Open WebUI" height="50" width="50"> | Open WebUI | https://openwebui.lumpiasty.xyz/ | Web UI for chatting with LLMs running on the cluster |
| <img src="docs/assets/teamspeak.svg" alt="iSpeak3" height="50" width="50"> | iSpeak3.pl | [ts3server://ispeak3.pl](ts3server://ispeak3.pl) | Public TeamSpeak 3 voice communication server |
| <img src="docs/assets/immich.svg" alt="Immich" height="50" width="50"> | Immich | https://immich.lumpiasty.xyz/ | Self-hosted photo and video backup and streaming service |
| <img src="docs/assets/frigate.svg" alt="Frigate" height="50" width="50"> | Frigate | https://frigate.lumpiasty.xyz/ | NVR for camera system with AI object detection and classification |
## Development
This repo leverages [devenv](https://devenv.sh/) for easy setup of a development environment. Install devenv, clone this repo and run `devenv shell` to make the tools and enviornment variables available in your shell. Alternatively, you can use direnv to automate enabling enviornment after entering directory in your shell. You can also install [direnv extension](https://marketplace.visualstudio.com/items?itemName=mkhl.direnv) in VSCode to automatically set up environment after opening workspace so all the fancy intellisense and extensions detect stuff correctly.
### App deployment
This repo is being watched by Flux running on cluster. To change config/add new app, simply commit to this repo and wait a while for cluster to reconcile changes. You can speed up this process by "notifying" Flux using `flux reconcile source git flux-system`.
Flux watches 3 kustomizations in this repo:
- flux-system - [cluster/flux-system](cluster/flux-system) directory, contains flux manifests
- infra - [infra](infra) directory, contains cluster infrastructure manifests like storage classes, network policies, monitoring etc.
- apps - [apps](apps) directory, contains manifests for applications deployed on cluster
### Talos config changes
Talos config in this repo is stored as yaml patches under [talos/patches](talos/patches) directory. Those patches can then be compiled into full Talos config files using `make gen-talos-config` command. Full config can then be applied to cluster using `make apply-talos-config` command, which applies config to all nodes in cluster.
To compile config, you need to have secrets file, which contains certificates and keys for cluster. Those secrets are then incorporated into final config files. That is also why we can not store full config in repo.
### Router config changes
Router config is stored as Ansible playbook under `ansible/` directory. To apply changes to router, run `ansible-playbook playbooks/routeros.yml` command in `ansible/` directory Before running playbook, you can check what changes will be applied to router using `--check` flag to `ansible-playbook` command, which will run playbook in "check mode" and show you the changes that would be applied without actually applying them. This is useful for verifying that your changes are correct before applying them to the router.
To run Ansible playbook, you need to have required Ansible collections installed. You can install them using `ansible-galaxy collection install -r ansible/requirements.yml` command. Configuring this in devenv is yet to be done, so you might need to install collections manually for now.
Secrets needed to access the router API are stored in OpenBao and loaded on demand when running playbook so you need to have access to appropriate secrets.
### Kube API access
To generate kubeconfig for accessing cluster API, run `make get-kubeconfig` command, which will generate kubeconfig under `talos/generated/kubeconfig` path. Devenv automatically sets `KUBECONFIG` enviornment variable to point to this file, so you can start using `kubectl` right away.
Like above, you need secrets file to generate kubeconfig.
<!-- TODO: Add instructions for setting up Router -->
```
make gen-talos-config
make apply-talos-config
```

View File

@@ -1,20 +0,0 @@
## RouterOS Ansible
This directory contains the new Ansible automation for the MikroTik router.
- Transport: RouterOS API (`community.routeros` collection), not SSH CLI scraping.
- Layout: one playbook (`playbooks/routeros.yml`) importing domain task files from `tasks/`.
- Goal: idempotent convergence using `community.routeros.api_modify` for managed paths.
### Quick start
1. Install dependencies:
- `ansible-galaxy collection install -r ansible/requirements.yml`
- `python -m pip install librouteros hvac`
2. Configure secret references in `ansible/vars/routeros-secrets.yml`.
3. Store required fields in OpenBao under configured KV path.
4. Export token (`OPENBAO_TOKEN` or `VAULT_TOKEN`).
5. Run:
- `ANSIBLE_CONFIG=ansible/ansible.cfg ansible-playbook ansible/playbooks/routeros.yml`
More details and design rationale: `docs/ansible/routeros-design.md`.

View File

@@ -1,5 +0,0 @@
[defaults]
inventory = inventory/hosts.yml
host_key_checking = False
retry_files_enabled = False
result_format = yaml

2
ansible/hosts Normal file
View File

@@ -0,0 +1,2 @@
[openwrt]
2001:470:61a3:100:ffff:ffff:ffff:ffff ansible_scp_extra_args="-O"

View File

@@ -1,6 +0,0 @@
all:
children:
mikrotik:
hosts:
crs418:
ansible_host: 192.168.255.10

6
ansible/playbook.yml Normal file
View File

@@ -0,0 +1,6 @@
- name: Configure router
hosts: openwrt
remote_user: root
roles:
- ansible-openwrt
- router

View File

@@ -1,92 +0,0 @@
---
- name: Converge MikroTik RouterOS config
hosts: mikrotik
gather_facts: false
connection: local
vars_files:
- ../vars/routeros-secrets.yml
pre_tasks:
- name: Load router secrets from OpenBao
ansible.builtin.set_fact:
routeros_api_username: >-
{{
lookup(
'community.hashi_vault.vault_kv2_get',
openbao_fields.routeros_api.path,
engine_mount_point=openbao_kv_mount
).secret[openbao_fields.routeros_api.username_key]
}}
routeros_api_password: >-
{{
lookup(
'community.hashi_vault.vault_kv2_get',
openbao_fields.routeros_api.path,
engine_mount_point=openbao_kv_mount
).secret[openbao_fields.routeros_api.password_key]
}}
routeros_pppoe_username: >-
{{
lookup(
'community.hashi_vault.vault_kv2_get',
openbao_fields.wan_pppoe.path,
engine_mount_point=openbao_kv_mount
).secret[openbao_fields.wan_pppoe.username_key]
}}
routeros_pppoe_password: >-
{{
lookup(
'community.hashi_vault.vault_kv2_get',
openbao_fields.wan_pppoe.path,
engine_mount_point=openbao_kv_mount
).secret[openbao_fields.wan_pppoe.password_key]
}}
routeros_tailscale_container_password: >-
{{
lookup(
'community.hashi_vault.vault_kv2_get',
openbao_fields.routeros_tailscale_container.path,
engine_mount_point=openbao_kv_mount
).secret[openbao_fields.routeros_tailscale_container.container_password_key]
}}
no_log: true
module_defaults:
group/community.routeros.api:
hostname: "{{ ansible_host }}"
username: "{{ routeros_api_username }}"
password: "{{ routeros_api_password }}"
tls: true
validate_certs: false
validate_cert_hostname: false
force_no_cert: true
encoding: UTF-8
tasks:
- name: Preflight checks
ansible.builtin.import_tasks: ../tasks/preflight.yml
- name: Base network configuration
ansible.builtin.import_tasks: ../tasks/base.yml
- name: WAN and tunnel interfaces
ansible.builtin.import_tasks: ../tasks/wan.yml
- name: Hardware and platform tuning
ansible.builtin.import_tasks: ../tasks/hardware.yml
- name: RouterOS container configuration
ansible.builtin.import_tasks: ../tasks/containers.yml
- name: Addressing configuration
ansible.builtin.import_tasks: ../tasks/addressing.yml
- name: Firewall configuration
ansible.builtin.import_tasks: ../tasks/firewall.yml
- name: Routing configuration
ansible.builtin.import_tasks: ../tasks/routing.yml
- name: System configuration
ansible.builtin.import_tasks: ../tasks/system.yml

View File

@@ -1,5 +0,0 @@
collections:
- name: community.routeros
version: ">=3.16.0"
- name: community.hashi_vault
version: ">=7.1.0"

View File

@@ -0,0 +1,53 @@
# Would never work without this awesome blogpost
# https://farcaller.net/2024/making-cilium-bgp-work-with-ipv6/
log "/tmp/bird.log" all;
log syslog all;
#Router ID
router id 192.168.1.1;
protocol kernel kernel4 {
learn;
scan time 10;
merge paths yes;
ipv4 {
import none;
export all;
};
}
protocol kernel kernel6 {
learn;
scan time 10;
merge paths yes;
ipv6 {
import none;
export all;
};
}
protocol device {
scan time 10;
}
protocol direct {
interface "*";
}
protocol bgp homelab {
debug { events };
passive;
direct;
local 2001:470:61a3:100:ffff:ffff:ffff:ffff as 65000;
neighbor range 2001:470:61a3:100::/64 as 65000;
ipv4 {
extended next hop yes;
import all;
export all;
};
ipv6 {
import all;
export all;
};
}

View File

@@ -0,0 +1,5 @@
- name: Reload bird
service:
name: bird
state: restarted
enabled: true

View File

@@ -0,0 +1,16 @@
---
- name: Install bird2
opkg:
name: "{{ item }}"
state: present
# Workaround for opkg module not handling multiple names at once well
loop:
- bird2
- bird2c
- name: Set up bird.conf
ansible.builtin.copy:
src: bird.conf
dest: /etc/bird.conf
mode: "644"
notify: Reload bird

View File

@@ -1,48 +0,0 @@
---
- name: Configure IPv4 addresses
community.routeros.api_modify:
path: ip address
data:
- address: 172.17.0.1/16
interface: dockers
network: 172.17.0.0
- address: 192.168.4.1/24
interface: lo
network: 192.168.4.0
- address: 192.168.100.20/24
interface: sfp-sfpplus1
network: 192.168.100.0
- address: 192.168.255.10/24
interface: bridge1
network: 192.168.255.0
- address: 192.168.0.1/24
interface: vlan2
network: 192.168.0.0
- address: 192.168.1.1/24
interface: vlan4
network: 192.168.1.0
- address: 192.168.3.1/24
interface: vlan3
network: 192.168.3.0
handle_absent_entries: remove
handle_entries_content: remove_as_much_as_possible
ensure_order: true
- name: Configure IPv6 addresses
community.routeros.api_modify:
path: ipv6 address
data:
- address: 2001:470:70:dd::2/64
advertise: false
interface: sit1
- address: ::ffff:ffff:ffff:ffff/64
from-pool: pool1
interface: vlan2
- address: 2001:470:61a3:500:ffff:ffff:ffff:ffff/64
interface: dockers
- address: 2001:470:61a3:100::1/64
advertise: false
interface: vlan4
handle_absent_entries: remove
handle_entries_content: remove_as_much_as_possible
ensure_order: true

View File

@@ -1,226 +0,0 @@
---
- name: Configure bridges
community.routeros.api_modify:
path: interface bridge
data:
- name: bridge1
vlan-filtering: true
- name: dockers
handle_absent_entries: remove
handle_entries_content: remove_as_much_as_possible
ensure_order: true
- name: Configure VLAN interfaces
community.routeros.api_modify:
path: interface vlan
data:
- name: vlan2
comment: LAN (PC, WIFI)
interface: bridge1
vlan-id: 2
- name: vlan3
comment: KAMERY
interface: bridge1
vlan-id: 3
- name: vlan4
comment: SERVER LAN
interface: bridge1
vlan-id: 4
handle_absent_entries: remove
handle_entries_content: remove_as_much_as_possible
ensure_order: true
- name: Configure interface lists
community.routeros.api_modify:
path: interface list
data:
- name: wan
comment: contains interfaces facing internet
handle_absent_entries: remove
handle_entries_content: remove_as_much_as_possible
ensure_order: true
- name: Configure interface list members
community.routeros.api_modify:
path: interface list member
data:
- interface: pppoe-gpon
list: wan
- interface: lte1
list: wan
- interface: sit1
list: wan
handle_absent_entries: remove
handle_entries_content: remove_as_much_as_possible
ensure_order: true
- name: Configure bridge ports
community.routeros.api_modify:
path: interface bridge port
data:
- bridge: dockers
interface: veth1
comment: Tailscale container interface
- bridge: bridge1
interface: ether1
pvid: 2
- bridge: bridge1
interface: ether2
pvid: 2
- bridge: bridge1
interface: ether8
pvid: 4
- bridge: bridge1
interface: ether9
pvid: 2
- bridge: bridge1
interface: ether10
pvid: 3
- bridge: bridge1
interface: sfp-sfpplus2
- bridge: bridge1
interface: ether11
handle_absent_entries: remove
handle_entries_content: remove_as_much_as_possible
ensure_order: true
- name: Configure bridge VLAN membership
community.routeros.api_modify:
path: interface bridge vlan
data:
- bridge: bridge1
tagged: sfp-sfpplus2
untagged: ether1,ether2,ether9
vlan-ids: 2
- bridge: bridge1
tagged: sfp-sfpplus2
untagged: ether10
vlan-ids: 3
- bridge: bridge1
untagged: ether8
vlan-ids: 4
handle_absent_entries: remove
handle_entries_content: remove_as_much_as_possible
ensure_order: true
- name: Configure IPv4 pools
community.routeros.api_modify:
path: ip pool
data:
- name: dhcp_pool0
ranges: 192.168.0.50-192.168.0.250
comment: LAN DHCP pool
- name: dhcp_pool1
ranges: 192.168.255.1-192.168.255.9,192.168.255.11-192.168.255.254
comment: MGMT DHCP pool
handle_absent_entries: remove
handle_entries_content: remove_as_much_as_possible
ensure_order: true
- name: Configure DHCP servers
community.routeros.api_modify:
path: ip dhcp-server
data:
- name: dhcp1
address-pool: dhcp_pool0
interface: vlan2
lease-time: 30m
comment: LAN
- name: dhcp2
address-pool: dhcp_pool1
interface: bridge1
lease-time: 30m
comment: MGMT
handle_absent_entries: remove
handle_entries_content: remove_as_much_as_possible
ensure_order: true
- name: Configure DHCP networks
community.routeros.api_modify:
path: ip dhcp-server network
data:
- address: 192.168.0.0/24
dns-server: 192.168.0.1
gateway: 192.168.0.1
- address: 192.168.255.0/24
dns-none: true
gateway: 192.168.255.10
handle_absent_entries: remove
handle_entries_content: remove_as_much_as_possible
ensure_order: true
# TODO: IPv6 pools are useful when we have dynamic prefix, but we don't
# We can remove it now
- name: Configure IPv6 pools
community.routeros.api_modify:
path: ipv6 pool
data:
- name: pool1
prefix: 2001:470:61a3::/48
prefix-length: 64
handle_absent_entries: remove
handle_entries_content: remove_as_much_as_possible
ensure_order: true
- name: Configure DNS
community.routeros.api_find_and_modify:
ignore_dynamic: false
path: ip dns
find: {}
values:
allow-remote-requests: true
cache-size: 20480
servers: 1.1.1.1,1.0.0.1,2606:4700:4700::1111,2606:4700:4700::1001
- name: Configure NAT-PMP global settings
community.routeros.api_find_and_modify:
ignore_dynamic: false
path: ip nat-pmp
find: {}
values:
enabled: true
- name: Configure NAT-PMP interfaces
community.routeros.api_modify:
path: ip nat-pmp interfaces
data:
- interface: dockers
type: internal
- interface: pppoe-gpon
type: external
- interface: vlan2
type: internal
handle_absent_entries: remove
handle_entries_content: remove_as_much_as_possible
ensure_order: true
- name: Configure UPnP global settings
community.routeros.api_find_and_modify:
ignore_dynamic: false
path: ip upnp
find: {}
values:
enabled: true
- name: Configure UPnP interfaces
community.routeros.api_modify:
path: ip upnp interfaces
data:
- interface: dockers
type: internal
- interface: pppoe-gpon
type: external
- interface: vlan2
type: internal
handle_absent_entries: remove
handle_entries_content: remove_as_much_as_possible
ensure_order: true
- name: Configure IPv6 ND defaults
community.routeros.api_find_and_modify:
ignore_dynamic: false
path: ipv6 nd
find:
default: true
values:
advertise-dns: true

View File

@@ -1,66 +0,0 @@
---
- name: Configure container runtime defaults
community.routeros.api_find_and_modify:
ignore_dynamic: false
path: container config
find: {}
values:
registry-url: https://ghcr.io
tmpdir: /tmp1/pull
- name: Configure container env lists
community.routeros.api_modify:
path: container envs
data:
- key: ADVERTISE_ROUTES
list: tailscale
value: 192.168.0.0/24,192.168.1.0/24,192.168.4.1/32,192.168.100.1/32,192.168.255.0/24,10.42.0.0/16,10.43.0.0/16,10.44.0.0/16,2001:470:61a3::/48
- key: CONTAINER_GATEWAY
list: tailscale
value: 172.17.0.1
- key: PASSWORD
list: tailscale
value: "{{ routeros_tailscale_container_password }}"
- key: TAILSCALE_ARGS
list: tailscale
value: --accept-routes --advertise-exit-node --snat-subnet-routes=false
- key: UPDATE_TAILSCALE
list: tailscale
value: y
handle_absent_entries: remove
handle_entries_content: remove_as_much_as_possible
ensure_order: true
- name: Configure container mounts
community.routeros.api_modify:
path: container mounts
data:
- dst: /var/lib/tailscale
list: tailscale
src: /usb1/tailscale
- dst: /root
list: tailscale-root
src: /tmp1/tailscale-root
handle_absent_entries: remove
handle_entries_content: remove_as_much_as_possible
ensure_order: true
- name: Configure tailscale container
community.routeros.api_modify:
path: container
data:
- dns: 172.17.0.1
envlists: tailscale
hostname: mikrotik
interface: veth1
layer-dir: ""
mountlists: tailscale
name: tailscale-mikrotik:latest
remote-image: fluent-networks/tailscale-mikrotik:latest
root-dir: /usb1/containers/tailscale
start-on-boot: true
tmpfs: /tmp:67108864:01777
workdir: /
handle_absent_entries: remove
handle_entries_content: remove_as_much_as_possible
ensure_order: true

View File

@@ -1,480 +0,0 @@
---
- name: Configure IPv4 firewall filter rules
community.routeros.api_modify:
path: ip firewall filter
data:
- action: fasttrack-connection
chain: forward
connection-state: established,related
- action: accept
chain: forward
comment: Allow all already established connections
connection-state: established,related
- action: accept
chain: forward
comment: Allow LTE modem management (next rule forbids it otherwise)
dst-address: 192.168.8.1
out-interface: lte1
- action: reject
chain: forward
comment: Forbid forwarding 192.168.0.0/16 to WAN
dst-address: 192.168.0.0/16
out-interface-list: wan
reject-with: icmp-network-unreachable
- action: reject
chain: forward
comment: Forbid forwarding 10.0.0.0/8 to WAN
dst-address: 10.0.0.0/8
out-interface-list: wan
reject-with: icmp-network-unreachable
- action: reject
chain: forward
comment: Forbid forwarding 172.16.0.0/12 to WAN
dst-address: 172.16.0.0/12
out-interface-list: wan
reject-with: icmp-network-unreachable
- action: reject
chain: forward
comment: Forbid forwarding 100.64.0.0/10 to WAN
dst-address: 100.64.0.0/10
out-interface-list: wan
reject-with: icmp-network-unreachable
- action: accept
chain: forward
comment: Allow from LAN to everywhere
in-interface: vlan2
- action: accept
chain: forward
comment: Allow from SRV to internet
in-interface: vlan4
out-interface-list: wan
- action: accept
chain: forward
comment: Allow from SRV to CAM
in-interface: vlan4
out-interface: vlan3
- action: accept
chain: forward
comment: Allow from dockers to everywhere
in-interface: dockers
- action: jump
chain: forward
comment: Allow port forwards
in-interface: pppoe-gpon
jump-target: allow-ports
- action: reject
chain: forward
comment: Reject all remaining (port unreachable from WAN)
in-interface-list: wan
log-prefix: FORWARD REJECT
reject-with: icmp-port-unreachable
- action: reject
chain: forward
comment: Reject all remaining (net prohibited from LAN)
log-prefix: FORWARD REJECT
reject-with: icmp-net-prohibited
- action: accept
chain: input
comment: Allow all already established connections
connection-state: established,related
- action: accept
chain: input
comment: Allow HE tunnel
in-interface: pppoe-gpon
protocol: ipv6-encap
- action: accept
chain: input
comment: Allow ICMP
protocol: icmp
- action: accept
chain: input
comment: Allow Winbox
dst-port: 8291
log: true
protocol: tcp
- action: accept
chain: input
comment: Allow SSH Mikrotik
dst-port: 2137
log: true
protocol: tcp
- action: accept
chain: input
comment: Allow RouterOS API-SSL from MGMT
dst-port: 8729
protocol: tcp
- action: accept
chain: input
comment: Allow DNS from LAN
dst-port: 53
in-interface: vlan2
protocol: udp
- action: accept
chain: input
dst-port: 53
in-interface: vlan2
protocol: tcp
- action: accept
chain: input
comment: Allow DNS from SRV
dst-port: 53
in-interface: vlan4
protocol: udp
- action: accept
chain: input
dst-port: 53
in-interface: vlan4
protocol: tcp
- action: accept
chain: input
comment: Allow DNS from dockers
dst-port: 53
in-interface: dockers
protocol: udp
- action: accept
chain: input
dst-port: 53
in-interface: dockers
protocol: tcp
- action: accept
chain: input
comment: Allow BGP from SRV
dst-port: 179
in-interface: vlan4
protocol: udp
- action: accept
chain: input
comment: NAT-PMP from LAN
dst-port: 5351
in-interface: vlan2
protocol: udp
- action: accept
chain: input
comment: NAT-PMP from dockers (for tailscale)
dst-port: 5351
in-interface: dockers
protocol: udp
- action: reject
chain: input
comment: Reject all remaining
log-prefix: INPUT REJECT
reject-with: icmp-port-unreachable
- action: accept
chain: allow-ports
comment: Allow TS3
dst-port: 9987
out-interface: vlan4
protocol: udp
- action: accept
chain: allow-ports
dst-port: 30033
out-interface: vlan4
protocol: tcp
- action: accept
chain: allow-ports
comment: Allow HTTP
dst-port: 80
out-interface: vlan4
protocol: tcp
- action: accept
chain: allow-ports
comment: Allow HTTPS
dst-port: 443
out-interface: vlan4
protocol: tcp
- action: accept
chain: allow-ports
comment: Allow SSH Gitea
dst-port: 22
out-interface: vlan4
protocol: tcp
- action: accept
chain: allow-ports
comment: Allow anything udp to Tailscale
dst-address: 172.17.0.2
out-interface: dockers
protocol: udp
- action: accept
chain: allow-ports
comment: Allow anything from GPON to LAN (NAT-PMP)
dst-address: 192.168.0.0/24
in-interface: pppoe-gpon
out-interface: vlan2
handle_absent_entries: remove
handle_entries_content: remove_as_much_as_possible
ensure_order: true
- name: Configure IPv4 NAT rules
community.routeros.api_modify:
path: ip firewall nat
data:
- action: masquerade
chain: srcnat
comment: Masquerade to internet
out-interface-list: wan
- action: masquerade
chain: srcnat
comment: GPON ONT management
dst-address: 192.168.100.1
- action: masquerade
chain: srcnat
comment: LTE Modem management
dst-address: 192.168.8.1
- action: dst-nat
chain: dstnat
comment: TS3
dst-address: 139.28.40.212
dst-port: 9987
protocol: udp
to-addresses: 10.44.0.0
- action: dst-nat
chain: dstnat
dst-address: 139.28.40.212
dst-port: 30033
protocol: tcp
to-addresses: 10.44.0.0
- action: src-nat
chain: srcnat
comment: src-nat from LAN to TS3 to some Greenland address
dst-address: 10.44.0.0
dst-port: 9987
in-interface: '!pppoe-gpon'
protocol: udp
to-addresses: 128.0.70.5
- action: src-nat
chain: srcnat
dst-address: 10.44.0.0
dst-port: 30033
in-interface: '!pppoe-gpon'
protocol: tcp
to-addresses: 128.0.70.5
- action: dst-nat
chain: dstnat
comment: HTTPS
dst-address: 139.28.40.212
dst-port: 443
protocol: tcp
to-addresses: 10.44.0.6
- action: dst-nat
chain: dstnat
comment: HTTP
dst-address: 139.28.40.212
dst-port: 80
protocol: tcp
to-addresses: 10.44.0.6
- action: dst-nat
chain: dstnat
comment: SSH Gitea
dst-address: 139.28.40.212
dst-port: 22
protocol: tcp
to-addresses: 10.44.0.6
- action: dst-nat
chain: dstnat
comment: sunshine
dst-address: 139.28.40.212
dst-port: 47984
in-interface: pppoe-gpon
protocol: tcp
to-addresses: 192.168.0.67
- action: dst-nat
chain: dstnat
comment: sunshine
dst-address: 139.28.40.212
dst-port: 47989
in-interface: pppoe-gpon
protocol: tcp
to-addresses: 192.168.0.67
- action: dst-nat
chain: dstnat
comment: sunshine
dst-address: 139.28.40.212
dst-port: 48010
in-interface: pppoe-gpon
protocol: tcp
to-addresses: 192.168.0.67
- action: dst-nat
chain: dstnat
comment: sunshine
dst-address: 139.28.40.212
dst-port: 48010
in-interface: pppoe-gpon
protocol: udp
to-addresses: 192.168.0.67
- action: dst-nat
chain: dstnat
comment: sunshine
dst-address: 139.28.40.212
dst-port: 47998-48000
in-interface: pppoe-gpon
protocol: udp
to-addresses: 192.168.0.67
handle_absent_entries: remove
handle_entries_content: remove_as_much_as_possible
ensure_order: true
- name: Configure IPv6 firewall filter rules
community.routeros.api_modify:
path: ipv6 firewall filter
data:
- action: fasttrack-connection
chain: forward
connection-state: established,related
- action: accept
chain: forward
comment: Allow all already established connections
connection-state: established,related
- action: reject
chain: forward
comment: Forbid forwarding routed /48 from tunnelbroker to WAN
dst-address: 2001:470:61a3::/48
out-interface-list: wan
reject-with: icmp-no-route
- action: reject
chain: forward
comment: Forbid forwarding routed /64 from tunnelbroker to WAN
dst-address: 2001:470:71:dd::/64
out-interface-list: wan
reject-with: icmp-no-route
- action: accept
chain: forward
comment: Allow from LAN to everywhere
in-interface: vlan2
- action: accept
chain: forward
comment: Allow ICMPv6 from internet to LAN
in-interface-list: wan
out-interface: vlan2
protocol: icmpv6
- action: accept
chain: forward
comment: Allow from SRV to internet
in-interface: vlan4
out-interface-list: wan
- action: accept
chain: forward
comment: Allow from internet to SRV nodes
dst-address: 2001:470:61a3:100::/64
in-interface-list: wan
out-interface: vlan4
- action: accept
chain: forward
comment: Allow from internet to homelab LB
dst-address: 2001:470:61a3:400::/112
in-interface-list: wan
out-interface: vlan4
- action: accept
chain: forward
comment: Allow from SRV to CAM
in-interface: vlan4
out-interface: vlan3
- action: accept
chain: forward
comment: Allow from dockers to everywhere
in-interface: dockers
- action: accept
chain: forward
comment: Allow from internet to dockers
dst-address: 2001:470:61a3:500::/64
in-interface-list: wan
out-interface: dockers
- action: accept
chain: forward
comment: Allow tcp transmission port to LAN
dst-port: 51413
out-interface: vlan2
protocol: tcp
- action: accept
chain: forward
comment: Allow udp transmission port to LAN
dst-port: 51413
out-interface: vlan2
protocol: udp
- action: reject
chain: forward
comment: Reject all remaining
reject-with: icmp-no-route
- action: accept
chain: input
comment: Allow all already established connections
connection-state: established,related
- action: accept
chain: input
comment: Allow ICMPv6
protocol: icmpv6
- action: accept
chain: input
comment: Allow Winbox
dst-port: 8291
protocol: tcp
- action: accept
chain: input
comment: Allow SSH Mikrotik
dst-port: 2137
protocol: tcp
- action: accept
chain: input
comment: Allow DNS from LAN
dst-port: 53
in-interface: vlan2
protocol: udp
- action: accept
chain: input
dst-port: 53
in-interface: vlan2
protocol: tcp
- action: accept
chain: input
comment: Allow DNS from SRV
dst-port: 53
in-interface: vlan4
protocol: udp
- action: accept
chain: input
dst-port: 53
in-interface: vlan4
protocol: tcp
- action: accept
chain: input
comment: Allow DNS from dockers
dst-port: 53
in-interface: dockers
protocol: udp
- action: accept
chain: input
dst-port: 53
in-interface: dockers
protocol: tcp
- action: accept
chain: input
comment: Allow BGP from SRV
dst-port: 179
in-interface: vlan4
protocol: tcp
src-address: 2001:470:61a3:100::/64
- action: reject
chain: input
comment: Reject all remaining
reject-with: icmp-admin-prohibited
handle_absent_entries: remove
handle_entries_content: remove_as_much_as_possible
ensure_order: true
- name: Configure IPv6 NAT rules
community.routeros.api_modify:
path: ipv6 firewall nat
data:
- action: src-nat
chain: srcnat
comment: src-nat tailnet to internet
out-interface-list: wan
src-address: fd7a:115c:a1e0::/48
to-address: 2001:470:61a3:600::/64
- action: masquerade
chain: srcnat
disabled: true
in-interface: vlan2
out-interface: vlan4
handle_absent_entries: remove
handle_entries_content: remove_as_much_as_possible
ensure_order: true

View File

@@ -1,103 +0,0 @@
---
- name: Configure ethernet interface metadata and SFP options
community.routeros.api_find_and_modify:
ignore_dynamic: false
path: interface ethernet
find:
default-name: "{{ item.default_name }}"
values: "{{ item.config }}"
loop:
- default_name: ether1
config:
comment: Mój pc
- default_name: ether2
config:
comment: Wifi środek
- default_name: ether8
config:
comment: Serwer
- default_name: ether9
config:
comment: Wifi góra
- default_name: ether10
config:
comment: Kamera na domu
- default_name: ether11
config:
comment: KVM serwer
- default_name: sfp-sfpplus1
config:
auto-negotiation: false
comment: GPON WAN
speed: 2.5G-baseX
- default_name: sfp-sfpplus2
config:
comment: GARAŻ
loop_control:
label: "{{ item.default_name }}"
- name: Configure LTE interface defaults
community.routeros.api_find_and_modify:
ignore_dynamic: false
path: interface lte
find:
default-name: lte1
values:
apn-profiles: default-nodns
comment: Backup LTE WAN
- name: Configure LTE APN profiles
community.routeros.api_modify:
path: interface lte apn
data:
- add-default-route: false
apn: internet
comment: default but without dns and default route
ipv6-interface: lte1
name: default-nodns
use-network-apn: true
use-peer-dns: false
# Default APN we can't really remove yet I don't want to reconfigure it
- add-default-route: true
apn: internet
authentication: none
default-route-distance: 2
ip-type: auto
name: default
use-network-apn: true
use-peer-dns: true
handle_absent_entries: remove
handle_entries_content: remove_as_much_as_possible
- name: Configure temporary disk for containers
community.routeros.api_modify:
path: disk
data:
- slot: tmp1
type: tmpfs
# This is not ideal, there's no unique identifier for usb disk,
# after reinstall it might be assigned to another slot
# Just adding disk with slot usb1 and not specifying anything else
# so ansible doesn't touch it
- slot: usb1
handle_absent_entries: remove
handle_entries_content: remove_as_much_as_possible
- name: Configure switch settings
community.routeros.api_find_and_modify:
ignore_dynamic: false
path: interface ethernet switch
find:
.id: "0"
values:
qos-hw-offloading: true
# Enabling L3 offloading would cause all packets to skip firewall and NAT
l3-hw-offloading: false
- name: Configure neighbor discovery settings
community.routeros.api_find_and_modify:
ignore_dynamic: false
path: ip neighbor discovery-settings
find: {}
values:
discover-interface-list: '!dynamic'

View File

@@ -1,46 +0,0 @@
---
- name: Verify API connectivity and fetch basic facts
community.routeros.api_facts:
gather_subset:
- default
- hardware
- name: Show target identity
ansible.builtin.debug:
msg: "Managing {{ ansible_host }} ({{ ansible_facts['net_model'] | default('unknown model') }})"
- name: Assert expected router model
ansible.builtin.assert:
that:
- ansible_facts['net_model'] is defined
- ansible_facts['net_model'] == "CRS418-8P-8G-2S+"
fail_msg: "Unexpected router model: {{ ansible_facts['net_model'] | default('unknown') }}"
success_msg: "Router model matches expected CRS418-8P-8G-2S+"
- name: Read RouterOS device-mode flags
community.routeros.api:
path: system/device-mode
register: routeros_device_mode
check_mode: false
changed_when: false
- name: Assert container feature is enabled in device mode
ansible.builtin.assert:
that:
- not (routeros_device_mode.skipped | default(false))
- (routeros_device_mode | to_nice_json | lower) is search('container[^a-z0-9]+(yes|true)')
fail_msg: "RouterOS device-mode does not report container as enabled. Payload: {{ routeros_device_mode | to_nice_json }}"
success_msg: "RouterOS device-mode confirms container=yes"
- name: Read configured disks
community.routeros.api_info:
path: disk
register: routeros_disks
check_mode: false
- name: Assert usb1 disk is present
ansible.builtin.assert:
that:
- (routeros_disks.result | selectattr('slot', 'equalto', 'usb1') | list | length) > 0
fail_msg: "Required disk slot usb1 is not present on router."
success_msg: "Required disk usb1 is present"

View File

@@ -1,99 +0,0 @@
---
- name: Configure IPv4 routes
community.routeros.api_modify:
path: ip route
data:
- comment: Tailnet
disabled: false
distance: 1
dst-address: 100.64.0.0/10
gateway: 172.17.0.2
routing-table: main
scope: 30
suppress-hw-offload: false
target-scope: 10
- disabled: false
distance: 1
dst-address: 0.0.0.0/0
gateway: pppoe-gpon
routing-table: main
scope: 30
suppress-hw-offload: false
target-scope: 10
vrf-interface: pppoe-gpon
- disabled: false
distance: 2
dst-address: 0.0.0.0/0
gateway: 192.168.8.1
routing-table: main
scope: 30
suppress-hw-offload: false
target-scope: 10
vrf-interface: lte1
handle_absent_entries: remove
handle_entries_content: remove_as_much_as_possible
- name: Configure IPv6 routes
community.routeros.api_modify:
path: ipv6 route
data:
- disabled: false
distance: 1
dst-address: 2000::/3
gateway: 2001:470:70:dd::1
scope: 30
target-scope: 10
- comment: Tailnet
disabled: false
dst-address: fd7a:115c:a1e0::/48
gateway: 2001:470:61a3:500::1
pref-src: ""
routing-table: main
suppress-hw-offload: false
handle_absent_entries: remove
handle_entries_content: remove_as_much_as_possible
- name: Configure BGP instance
community.routeros.api_modify:
path: routing bgp instance
data:
- name: bgp-homelab
as: 65000
disabled: false
router-id: 192.168.1.1
routing-table: main
handle_absent_entries: remove
handle_entries_content: remove_as_much_as_possible
ensure_order: true
- name: Configure BGP templates
community.routeros.api_modify:
path: routing bgp template
data:
- name: klaster
afi: ip,ipv6
as: 6500
disabled: false
# Default template
- name: default
handle_absent_entries: remove
handle_entries_content: remove_as_much_as_possible
- name: Configure BGP connections
community.routeros.api_modify:
path: routing bgp connection
data:
- name: bgp1
afi: ip,ipv6
as: 65000
connect: true
disabled: false
instance: bgp-homelab
listen: true
local.role: ibgp
remote.address: 2001:470:61a3:100::3/128
routing-table: main
templates: klaster
handle_absent_entries: remove
handle_entries_content: remove_as_much_as_possible
ensure_order: true

View File

@@ -1,43 +0,0 @@
---
- name: Configure system clock
community.routeros.api_find_and_modify:
ignore_dynamic: false
path: system clock
find: {}
values:
time-zone-name: Europe/Warsaw
- name: Configure dedicated Ansible management user
community.routeros.api_modify:
path: user
data:
- name: "{{ routeros_api_username }}"
group: full
password: "{{ routeros_api_password }}"
disabled: false
comment: "Ansible management user"
handle_absent_entries: ignore
handle_entries_content: remove_as_much_as_possible
- name: Configure service ports and service enablement
community.routeros.api_find_and_modify:
ignore_dynamic: false
path: ip service
find:
name: "{{ item.name }}"
values: "{{ item }}"
loop:
- name: ftp
disabled: true
- name: telnet
disabled: true
- name: www
disabled: true
- name: ssh
port: 2137
- name: api
disabled: true
- name: api-ssl
disabled: false
loop_control:
label: "{{ item.name }}"

View File

@@ -1,44 +0,0 @@
---
- name: Configure PPPoE client
community.routeros.api_modify:
path: interface pppoe-client
data:
- disabled: false
interface: sfp-sfpplus1
keepalive-timeout: 2
name: pppoe-gpon
password: "{{ routeros_pppoe_password }}"
use-peer-dns: true
user: "{{ routeros_pppoe_username }}"
handle_absent_entries: remove
handle_entries_content: remove_as_much_as_possible
ensure_order: true
- name: Configure 6to4 tunnel interface
community.routeros.api_modify:
path: interface 6to4
data:
- comment: Hurricane Electric IPv6 Tunnel Broker
local-address: 139.28.40.212
mtu: 1472
name: sit1
remote-address: 216.66.80.162
handle_absent_entries: remove
handle_entries_content: remove_as_much_as_possible
ensure_order: true
- name: Configure veth interface for containers
community.routeros.api_modify:
path: interface veth
data:
- address: 172.17.0.2/16,2001:470:61a3:500::1/64
container-mac-address: 7E:7E:A1:B1:2A:7C
dhcp: false
gateway: 172.17.0.1
gateway6: 2001:470:61a3:500:ffff:ffff:ffff:ffff
mac-address: 7E:7E:A1:B1:2A:7B
name: veth1
comment: Tailscale container
handle_absent_entries: remove
handle_entries_content: remove_as_much_as_possible
ensure_order: true

View File

@@ -1,19 +0,0 @@
---
# Secret references only; actual values are loaded from OpenBao/Vault at runtime.
# KVv2 mount and secret path (full secret path is <mount>/data/<path>).
openbao_kv_mount: secret
# Field names expected in the OpenBao secret.
openbao_fields:
routeros_api:
path: routeros_api
username_key: username
password_key: password
wan_pppoe:
path: wan_pppoe
username_key: username
password_key: password
routeros_tailscale_container:
path: router_tailscale
container_password_key: container_password

View File

@@ -1,8 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- postgres-volume.yaml
- postgres-cluster.yaml
- secret.yaml
- release.yaml

View File

@@ -1,4 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: authentik

View File

@@ -1,23 +0,0 @@
---
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: authentik-postgresql-cluster-lvmhdd
namespace: authentik
spec:
instances: 1
imageName: ghcr.io/cloudnative-pg/postgresql:17.4
bootstrap:
initdb:
database: authentik
owner: authentik
storage:
pvcTemplate:
storageClassName: hdd-lvmpv
resources:
requests:
storage: 10Gi
volumeName: authentik-postgresql-cluster-lvmhdd-1

View File

@@ -1,33 +0,0 @@
apiVersion: local.openebs.io/v1alpha1
kind: LVMVolume
metadata:
labels:
kubernetes.io/nodename: anapistula-delrosalae
name: authentik-postgresql-cluster-lvmhdd-1
namespace: openebs
spec:
capacity: 10Gi
ownerNodeID: anapistula-delrosalae
shared: "yes"
thinProvision: "no"
vgPattern: ^openebs-hdd$
volGroup: openebs-hdd
---
kind: PersistentVolume
apiVersion: v1
metadata:
name: authentik-postgresql-cluster-lvmhdd-1
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: hdd-lvmpv
volumeMode: Filesystem
csi:
driver: local.csi.openebs.io
fsType: btrfs
volumeHandle: authentik-postgresql-cluster-lvmhdd-1
---
# PVCs are dynamically created by the Postgres operator

View File

@@ -1,61 +0,0 @@
---
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository
metadata:
name: authentik
namespace: authentik
spec:
interval: 24h
url: https://charts.goauthentik.io
---
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: authentik
namespace: authentik
spec:
interval: 30m
chart:
spec:
chart: authentik
version: 2026.2.1
sourceRef:
kind: HelmRepository
name: authentik
namespace: authentik
interval: 12h
values:
authentik:
postgresql:
host: authentik-postgresql-cluster-lvmhdd-rw
name: authentik
user: authentik
global:
env:
- name: AUTHENTIK_SECRET_KEY
valueFrom:
secretKeyRef:
name: authentik-secret
key: secret_key
- name: AUTHENTIK_POSTGRESQL__PASSWORD
valueFrom:
secretKeyRef:
name: authentik-postgresql-cluster-lvmhdd-app
key: password
postgresql:
enabled: false
server:
ingress:
enabled: true
ingressClassName: nginx-ingress
annotations:
cert-manager.io/cluster-issuer: letsencrypt
hosts:
- authentik.lumpiasty.xyz
tls:
- secretName: authentik-ingress
hosts:
- authentik.lumpiasty.xyz

View File

@@ -1,38 +0,0 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: authentik-secret
namespace: authentik
---
apiVersion: secrets.hashicorp.com/v1beta1
kind: VaultAuth
metadata:
name: authentik
namespace: authentik
spec:
method: kubernetes
mount: kubernetes
kubernetes:
role: authentik
serviceAccount: authentik-secret
---
apiVersion: secrets.hashicorp.com/v1beta1
kind: VaultStaticSecret
metadata:
name: authentik-secret
namespace: authentik
spec:
type: kv-v2
mount: secret
path: authentik
destination:
create: true
name: authentik-secret
type: Opaque
transformation:
excludeRaw: true
vaultAuthRef: authentik

View File

@@ -1,48 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: crawl4ai-proxy
namespace: crawl4ai
spec:
replicas: 1
selector:
matchLabels:
app: crawl4ai-proxy
template:
metadata:
labels:
app: crawl4ai-proxy
spec:
containers:
- name: crawl4ai-proxy
image: gitea.lumpiasty.xyz/lumpiasty/crawl4ai-proxy-fit:latest
imagePullPolicy: Always
env:
- name: LISTEN_PORT
value: "8000"
- name: CRAWL4AI_ENDPOINT
value: http://crawl4ai.crawl4ai.svc.cluster.local:11235/crawl
ports:
- name: http
containerPort: 8000
readinessProbe:
tcpSocket:
port: http
initialDelaySeconds: 3
periodSeconds: 10
timeoutSeconds: 2
failureThreshold: 6
livenessProbe:
tcpSocket:
port: http
initialDelaySeconds: 10
periodSeconds: 15
timeoutSeconds: 2
failureThreshold: 6
resources:
requests:
cpu: 25m
memory: 32Mi
limits:
cpu: 200m
memory: 128Mi

View File

@@ -1,5 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- deployment.yaml
- service.yaml

View File

@@ -1,14 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: crawl4ai-proxy
namespace: crawl4ai
spec:
type: ClusterIP
selector:
app: crawl4ai-proxy
ports:
- name: http
port: 8000
targetPort: 8000
protocol: TCP

View File

@@ -1,62 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: crawl4ai
namespace: crawl4ai
spec:
replicas: 1
selector:
matchLabels:
app: crawl4ai
template:
metadata:
labels:
app: crawl4ai
spec:
containers:
- name: crawl4ai
image: unclecode/crawl4ai:latest
imagePullPolicy: IfNotPresent
env:
- name: CRAWL4AI_API_TOKEN
valueFrom:
secretKeyRef:
name: crawl4ai-secret
key: api_token
optional: false
- name: MAX_CONCURRENT_TASKS
value: "5"
ports:
- name: http
containerPort: 11235
readinessProbe:
httpGet:
path: /health
port: http
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 3
failureThreshold: 6
livenessProbe:
httpGet:
path: /health
port: http
initialDelaySeconds: 30
periodSeconds: 15
timeoutSeconds: 3
failureThreshold: 6
resources:
requests:
cpu: 500m
memory: 1Gi
limits:
cpu: "2"
memory: 4Gi
volumeMounts:
- name: dshm
mountPath: /dev/shm
volumes:
- name: dshm
emptyDir:
medium: Memory
sizeLimit: 1Gi

View File

@@ -1,4 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: crawl4ai

View File

@@ -1,38 +0,0 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: crawl4ai-secret
namespace: crawl4ai
---
apiVersion: secrets.hashicorp.com/v1beta1
kind: VaultAuth
metadata:
name: crawl4ai
namespace: crawl4ai
spec:
method: kubernetes
mount: kubernetes
kubernetes:
role: crawl4ai
serviceAccount: crawl4ai-secret
---
apiVersion: secrets.hashicorp.com/v1beta1
kind: VaultStaticSecret
metadata:
name: crawl4ai-secret
namespace: crawl4ai
spec:
type: kv-v2
mount: secret
path: crawl4ai
destination:
create: true
name: crawl4ai-secret
type: Opaque
transformation:
excludeRaw: true
vaultAuthRef: crawl4ai

View File

@@ -1,14 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: crawl4ai
namespace: crawl4ai
spec:
type: ClusterIP
selector:
app: crawl4ai
ports:
- name: http
port: 11235
targetPort: 11235
protocol: TCP

View File

@@ -1,49 +0,0 @@
apiVersion: local.openebs.io/v1alpha1
kind: LVMVolume
metadata:
labels:
kubernetes.io/nodename: anapistula-delrosalae
name: frigate-config
namespace: openebs
spec:
capacity: 5Gi
ownerNodeID: anapistula-delrosalae
shared: "yes"
thinProvision: "no"
vgPattern: ^openebs-hdd$
volGroup: openebs-hdd
---
kind: PersistentVolume
apiVersion: v1
metadata:
name: frigate-config
spec:
capacity:
storage: 5Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: openebs-lvmpv
volumeMode: Filesystem
csi:
driver: local.csi.openebs.io
volumeHandle: frigate-config
claimRef:
apiVersion: v1
kind: PersistentVolumeClaim
namespace: frigate
name: frigate-config
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: frigate-config
namespace: frigate
spec:
storageClassName: openebs-lvmpv
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 5Gi
volumeName: frigate-config

View File

@@ -3,7 +3,5 @@ kind: Kustomization
resources:
- namespace.yaml
- secret.yaml
- config-pvc.yaml
- media-pvc.yaml
- release.yaml
- webrtc-svc.yaml

View File

@@ -1,49 +0,0 @@
apiVersion: local.openebs.io/v1alpha1
kind: LVMVolume
metadata:
labels:
kubernetes.io/nodename: anapistula-delrosalae
name: frigate-media
namespace: openebs
spec:
capacity: 500Gi
ownerNodeID: anapistula-delrosalae
shared: "yes"
thinProvision: "no"
vgPattern: ^openebs-hdd$
volGroup: openebs-hdd
---
kind: PersistentVolume
apiVersion: v1
metadata:
name: frigate-media
spec:
capacity:
storage: 500Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: openebs-lvmpv
volumeMode: Filesystem
csi:
driver: local.csi.openebs.io
volumeHandle: frigate-media
claimRef:
apiVersion: v1
kind: PersistentVolumeClaim
namespace: frigate
name: frigate-media
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: frigate-media
namespace: frigate
spec:
storageClassName: openebs-lvmpv
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 500Gi
volumeName: frigate-media

View File

@@ -36,8 +36,6 @@ spec:
cookie_secure: True
record:
expire_interval: 1440 # 24h
sync_recordings: True
enabled: True
retain:
days: 90

View File

@@ -1,46 +0,0 @@
---
apiVersion: local.openebs.io/v1alpha1
kind: LVMVolume
metadata:
labels:
kubernetes.io/nodename: anapistula-delrosalae
name: gitea-shared-storage-lvmhdd
namespace: openebs
spec:
capacity: 10Gi
ownerNodeID: anapistula-delrosalae
shared: "yes"
thinProvision: "no"
vgPattern: ^openebs-hdd$
volGroup: openebs-hdd
---
kind: PersistentVolume
apiVersion: v1
metadata:
name: gitea-shared-storage-lvmhdd
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: hdd-lvmpv
volumeMode: Filesystem
csi:
driver: local.csi.openebs.io
fsType: btrfs
volumeHandle: gitea-shared-storage-lvmhdd
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: gitea-shared-storage-lvmhdd
namespace: gitea
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
storageClassName: hdd-lvmpv
volumeName: gitea-shared-storage-lvmhdd

View File

@@ -2,10 +2,7 @@ apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- postgres-volume.yaml
- postgres-cluster.yaml
- gitea-shared-volume.yaml
- valkey-volume.yaml
- release.yaml
- secret.yaml
- backups.yaml

View File

@@ -2,27 +2,11 @@
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: gitea-postgresql-cluster-lvmhdd
name: gitea-postgresql-cluster
namespace: gitea
spec:
instances: 1
imageName: ghcr.io/cloudnative-pg/postgresql:17.4
storage:
pvcTemplate:
storageClassName: hdd-lvmpv
resources:
requests:
storage: 20Gi
volumeName: gitea-postgresql-cluster-lvmhdd-1
# Just to avoid bootstrapping the instance agian
# I migrated data manually using pv_migrate because this feature is broken
# when source and target volumes are in different storage classes
# CNPG just sets dataSource to the PVC and expects the underlying storage
# to handle the migration, but it doesn't work here
bootstrap:
recovery:
backup:
name: backup-migration
size: 10Gi
storageClass: mayastor-single-hdd

View File

@@ -1,33 +0,0 @@
apiVersion: local.openebs.io/v1alpha1
kind: LVMVolume
metadata:
labels:
kubernetes.io/nodename: anapistula-delrosalae
name: gitea-postgresql-cluster-lvmhdd-1
namespace: openebs
spec:
capacity: 20Gi
ownerNodeID: anapistula-delrosalae
shared: "yes"
thinProvision: "no"
vgPattern: ^openebs-hdd$
volGroup: openebs-hdd
---
kind: PersistentVolume
apiVersion: v1
metadata:
name: gitea-postgresql-cluster-lvmhdd-1
spec:
capacity:
storage: 20Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: hdd-lvmpv
volumeMode: Filesystem
csi:
driver: local.csi.openebs.io
fsType: btrfs
volumeHandle: gitea-postgresql-cluster-lvmhdd-1
---
# PVCs are dynamically created by the Postgres operator

View File

@@ -17,7 +17,7 @@ spec:
chart:
spec:
chart: gitea
version: 12.5.0
version: 12.2.0
sourceRef:
kind: HelmRepository
name: gitea-charts
@@ -45,35 +45,31 @@ spec:
primary:
persistence:
enabled: true
existingClaim: gitea-valkey-primary-lvmhdd-0
storageClass: mayastor-single-hdd
resources:
requests:
cpu: 0
persistence:
enabled: true
# We'll create PV and PVC manually
create: false
claimName: gitea-shared-storage-lvmhdd
storageClass: mayastor-single-hdd
gitea:
additionalConfigFromEnvs:
- name: GITEA__DATABASE__PASSWD
valueFrom:
secretKeyRef:
name: gitea-postgresql-cluster-lvmhdd-app
name: gitea-postgresql-cluster-app
key: password
config:
database:
DB_TYPE: postgres
HOST: gitea-postgresql-cluster-lvmhdd-rw:5432
HOST: gitea-postgresql-cluster-rw:5432
NAME: app
USER: app
indexer:
ISSUE_INDEXER_TYPE: bleve
REPO_INDEXER_ENABLED: true
webhook:
ALLOWED_HOST_LIST: woodpecker.lumpiasty.xyz
admin:
username: GiteaAdmin
email: gi@tea.com
@@ -90,11 +86,6 @@ spec:
# Requirement for sharing ip with other service
externalTrafficPolicy: Cluster
ipFamilyPolicy: RequireDualStack
http:
type: ClusterIP
# We need the service to be at port 80 specifically
# to work around bug of Actions Runner
port: 80
ingress:
enabled: true
@@ -102,7 +93,6 @@ spec:
annotations:
cert-manager.io/cluster-issuer: letsencrypt
acme.cert-manager.io/http01-edit-in-place: "true"
nginx.ingress.kubernetes.io/proxy-body-size: "1g"
hosts:
- host: gitea.lumpiasty.xyz
paths:

View File

@@ -1,46 +0,0 @@
---
apiVersion: local.openebs.io/v1alpha1
kind: LVMVolume
metadata:
labels:
kubernetes.io/nodename: anapistula-delrosalae
name: gitea-valkey-primary-lvmhdd-0
namespace: openebs
spec:
capacity: 1Gi
ownerNodeID: anapistula-delrosalae
shared: "yes"
thinProvision: "no"
vgPattern: ^openebs-hdd$
volGroup: openebs-hdd
---
kind: PersistentVolume
apiVersion: v1
metadata:
name: gitea-valkey-primary-lvmhdd-0
spec:
capacity:
storage: 1Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: hdd-lvmpv
volumeMode: Filesystem
csi:
driver: local.csi.openebs.io
fsType: btrfs
volumeHandle: gitea-valkey-primary-lvmhdd-0
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: gitea-valkey-primary-lvmhdd-0
namespace: gitea
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
storageClassName: hdd-lvmpv
volumeName: gitea-valkey-primary-lvmhdd-0

View File

@@ -1,46 +0,0 @@
---
apiVersion: local.openebs.io/v1alpha1
kind: LVMVolume
metadata:
labels:
kubernetes.io/nodename: anapistula-delrosalae
name: immich-library-lvmhdd
namespace: openebs
spec:
capacity: 150Gi
ownerNodeID: anapistula-delrosalae
shared: "yes"
thinProvision: "no"
vgPattern: ^openebs-hdd$
volGroup: openebs-hdd
---
kind: PersistentVolume
apiVersion: v1
metadata:
name: immich-library-lvmhdd
spec:
capacity:
storage: 150Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: hdd-lvmpv
volumeMode: Filesystem
csi:
driver: local.csi.openebs.io
fsType: btrfs
volumeHandle: immich-library-lvmhdd
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: library-lvmhdd
namespace: immich
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 150Gi
storageClassName: hdd-lvmpv
volumeName: immich-library-lvmhdd

View File

@@ -2,10 +2,8 @@ apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- valkey-volume.yaml
- volume.yaml
- redis.yaml
- postgres-password.yaml
- postgres-volume.yaml
- postgres-cluster.yaml
- immich-library.yaml
- release.yaml

View File

@@ -2,31 +2,21 @@
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: immich-db-lvmhdd
name: immich-db
namespace: immich
spec:
# TODO: Configure renovate to handle imageName
imageName: ghcr.io/tensorchord/cloudnative-vectorchord:14-0.4.3
instances: 1
storage:
pvcTemplate:
storageClassName: hdd-lvmpv
resources:
requests:
storage: 10Gi
volumeName: immich-db-lvmhdd-1
# Just to avoid bootstrapping the instance again
# I migrated data manually using pv_migrate because this feature is broken
# when source and target volumes are in different storage classes
# CNPG just sets dataSource to the PVC and expects the underlying storage
# to handle the migration, but it doesn't work here
size: 10Gi
storageClass: mayastor-single-hdd
bootstrap:
recovery:
backup:
name: backup-migration
initdb:
# Defaults of immich chart
database: immich
owner: immich
# We need to create custom role because default one does not allow to set up
# vectorchord extension

View File

@@ -1,33 +0,0 @@
apiVersion: local.openebs.io/v1alpha1
kind: LVMVolume
metadata:
labels:
kubernetes.io/nodename: anapistula-delrosalae
name: immich-db-lvmhdd-1
namespace: openebs
spec:
capacity: 10Gi
ownerNodeID: anapistula-delrosalae
shared: "yes"
thinProvision: "no"
vgPattern: ^openebs-hdd$
volGroup: openebs-hdd
---
kind: PersistentVolume
apiVersion: v1
metadata:
name: immich-db-lvmhdd-1
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: hdd-lvmpv
volumeMode: Filesystem
csi:
driver: local.csi.openebs.io
fsType: btrfs
volumeHandle: immich-db-lvmhdd-1
---
# PVCs are dynamically created by the Postgres operator

View File

@@ -2,35 +2,28 @@
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository
metadata:
name: valkey
name: bitnami
namespace: immich
spec:
interval: 24h
url: https://valkey.io/valkey-helm/
type: "oci"
url: oci://registry-1.docker.io/bitnamicharts/
---
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: valkey
name: redis
namespace: immich
spec:
interval: 30m
chart:
spec:
chart: valkey
version: 0.9.3
chart: redis
version: 22.0.5
sourceRef:
kind: HelmRepository
name: valkey
name: bitnami
values:
dataStorage:
enabled: true
persistentVolumeClaimName: immich-valkey
auth:
enabled: true
usersExistingSecret: redis
aclUsers:
default:
passwordKey: redis-password
permissions: "~* &* +@all"
global:
defaultStorageClass: mayastor-single-hdd
architecture: standalone

View File

@@ -18,7 +18,7 @@ spec:
chart:
spec:
chart: immich
version: 1.2.2
version: 0.7.5
sourceRef:
kind: HelmRepository
name: secustor
@@ -27,14 +27,14 @@ spec:
config:
vecotrExtension: vectorchord
postgres:
host: immich-db-lvmhdd-rw
host: immich-db-rw
existingSecret:
enabled: true
secretName: immich-db-immich
usernameKey: username
passwordKey: password
redis:
host: valkey
host: redis-master
existingSecret:
enabled: true
secretName: redis
@@ -47,7 +47,7 @@ spec:
volumes:
- name: uploads
persistentVolumeClaim:
claimName: library-lvmhdd
claimName: library
machineLearning:
enabled: true

View File

@@ -1,46 +0,0 @@
---
apiVersion: local.openebs.io/v1alpha1
kind: LVMVolume
metadata:
labels:
kubernetes.io/nodename: anapistula-delrosalae
name: immich-valkey
namespace: openebs
spec:
capacity: 1Gi
ownerNodeID: anapistula-delrosalae
shared: "yes"
thinProvision: "no"
vgPattern: ^openebs-hdd$
volGroup: openebs-hdd
---
kind: PersistentVolume
apiVersion: v1
metadata:
name: immich-valkey
spec:
capacity:
storage: 1Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: hdd-lvmpv
volumeMode: Filesystem
csi:
driver: local.csi.openebs.io
fsType: btrfs
volumeHandle: immich-valkey
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: immich-valkey
namespace: immich
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
storageClassName: hdd-lvmpv
volumeName: immich-valkey

13
apps/immich/volume.yaml Normal file
View File

@@ -0,0 +1,13 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: library
namespace: immich
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 150Gi
storageClassName: mayastor-single-hdd

View File

@@ -1,7 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- pvc.yaml
- statefulset.yaml
- service.yaml

View File

@@ -1,4 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: ispeak3

View File

@@ -1,49 +0,0 @@
apiVersion: local.openebs.io/v1alpha1
kind: LVMVolume
metadata:
labels:
kubernetes.io/nodename: anapistula-delrosalae
name: ispeak3-ts3-data
namespace: openebs
spec:
capacity: 1Gi
ownerNodeID: anapistula-delrosalae
shared: "yes"
thinProvision: "no"
vgPattern: ^openebs-hdd$
volGroup: openebs-hdd
---
kind: PersistentVolume
apiVersion: v1
metadata:
name: ispeak3-ts3-data
spec:
capacity:
storage: 1Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Delete
storageClassName: openebs-lvmpv
volumeMode: Filesystem
csi:
driver: local.csi.openebs.io
volumeHandle: ispeak3-ts3-data
claimRef:
apiVersion: v1
kind: PersistentVolumeClaim
namespace: ispeak3
name: ispeak3-ts3-data
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: ispeak3-ts3-data
namespace: ispeak3
spec:
storageClassName: openebs-lvmpv
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
volumeName: ispeak3-ts3-data

View File

@@ -1,20 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: teamspeak3
namespace: ispeak3
spec:
selector:
app: teamspeak3
ports:
- name: voice
protocol: UDP
port: 9987
targetPort: 9987
- name: filetransfer
protocol: TCP
port: 30033
targetPort: 30033
type: LoadBalancer
externalTrafficPolicy: Local
ipFamilyPolicy: PreferDualStack

View File

@@ -1,34 +0,0 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: teamspeak3-server
namespace: ispeak3
spec:
serviceName: "teamspeak3"
replicas: 1
selector:
matchLabels:
app: teamspeak3
template:
metadata:
labels:
app: teamspeak3
spec:
containers:
- name: teamspeak3
image: teamspeak:3.13.7
ports:
- containerPort: 9987
name: voice
protocol: UDP
- containerPort: 10011
name: query
- containerPort: 30033
name: filetransfer
volumeMounts:
- name: ts3-data
mountPath: /var/ts3server/
volumes:
- name: ts3-data
persistentVolumeClaim:
claimName: ispeak3-ts3-data

View File

@@ -1,17 +1,11 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- crawl4ai
- crawl4ai-proxy
- authentik
- gitea
- registry
- renovate
- librechat
- frigate
- llama
- immich
- nas
- searxng
- ispeak3
- openwebui
- woodpecker

View File

@@ -8,113 +8,92 @@ spec:
interval: 24h
url: https://dynomite567.github.io/helm-charts/
---
# apiVersion: helm.toolkit.fluxcd.io/v2
# kind: HelmRelease
# metadata:
# name: librechat
# namespace: librechat
# spec:
# interval: 30m
# chart:
# spec:
# chart: librechat
# version: 1.9.1
# sourceRef:
# kind: HelmRepository
# name: dynomite567-charts
# values:
# global:
# librechat:
# existingSecretName: librechat
# librechat:
# configEnv:
# PLUGIN_MODELS: null
# ALLOW_REGISTRATION: "false"
# TRUST_PROXY: "1"
# DOMAIN_CLIENT: https://librechat.lumpiasty.xyz
# SEARCH: "true"
# existingSecretName: librechat
# configYamlContent: |
# version: 1.0.3
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: librechat
namespace: librechat
spec:
interval: 30m
chart:
spec:
chart: librechat
version: 1.8.9
sourceRef:
kind: HelmRepository
name: dynomite567-charts
values:
global:
librechat:
existingSecretName: librechat
librechat:
configEnv:
PLUGIN_MODELS: null
ALLOW_REGISTRATION: "false"
TRUST_PROXY: "1"
DOMAIN_CLIENT: https://librechat.lumpiasty.xyz
SEARCH: "true"
existingSecretName: librechat
configYamlContent: |
version: 1.0.3
# endpoints:
# custom:
# - name: "Llama.cpp"
# apiKey: "llama"
# baseURL: "http://llama.llama.svc.cluster.local:11434/v1"
# models:
# default: [
# "DeepSeek-R1-0528-Qwen3-8B-GGUF",
# "Qwen3-8B-GGUF",
# "Qwen3-8B-GGUF-no-thinking",
# "gemma3n-e4b",
# "gemma3-12b",
# "gemma3-12b-q2",
# "gemma3-12b-novision",
# "gemma3-4b",
# "gemma3-4b-novision",
# "Qwen3-4B-Thinking-2507",
# "Qwen3-4B-Thinking-2507-long-ctx",
# "Qwen2.5-VL-7B-Instruct-GGUF",
# "Qwen2.5-VL-32B-Instruct-GGUF-IQ1_S",
# "Qwen2.5-VL-32B-Instruct-GGUF-Q2_K_L",
# "Qwen3-VL-2B-Instruct-GGUF",
# "Qwen3-VL-2B-Instruct-GGUF-unslothish",
# "Qwen3-VL-2B-Thinking-GGUF",
# "Qwen3-VL-4B-Instruct-GGUF",
# "Qwen3-VL-4B-Instruct-GGUF-unslothish",
# "Qwen3-VL-4B-Thinking-GGUF",
# "Qwen3-VL-8B-Instruct-GGUF",
# "Qwen3-VL-8B-Instruct-GGUF-unslothish",
# "Qwen3-VL-8B-Thinking-GGUF",
# "Huihui-Qwen3-VL-8B-Instruct-abliterated-GGUF",
# "Huihui-Qwen3-VL-8B-Thinking-abliterated-GGUF"
# ]
# titleConvo: true
# titleModel: "gemma3-4b-novision"
# summarize: false
# summaryModel: "gemma3-4b-novision"
# forcePrompt: false
# modelDisplayLabel: "Llama.cpp"
endpoints:
custom:
- name: "Llama.cpp"
apiKey: "llama"
baseURL: "http://llama.llama.svc.cluster.local:11434/v1"
models:
default: [
"DeepSeek-R1-0528-Qwen3-8B-GGUF",
"Qwen3-8B-GGUF",
"Qwen3-8B-GGUF-no-thinking",
"gemma3n-e4b",
"gemma3-12b",
"gemma3-12b-q2",
"gemma3-12b-novision",
"gemma3-4b",
"gemma3-4b-novision",
"Qwen3-4B-Thinking-2507",
"Qwen3-4B-Thinking-2507-long-ctx",
"Qwen2.5-VL-7B-Instruct-GGUF",
"Qwen2.5-VL-32B-Instruct-GGUF-IQ1_S",
"Qwen2.5-VL-32B-Instruct-GGUF-Q2_K_L"
]
titleConvo: true
titleModel: "gemma3-4b-novision"
summarize: false
summaryModel: "gemma3-4b-novision"
forcePrompt: false
modelDisplayLabel: "Llama.cpp"
imageVolume:
enabled: true
size: 10G
accessModes: ReadWriteOnce
storageClassName: mayastor-single-hdd
ingress:
enabled: true
className: nginx-ingress
annotations:
cert-manager.io/cluster-issuer: letsencrypt
nginx.ingress.kubernetes.io/proxy-body-size: "0"
nginx.ingress.kubernetes.io/proxy-buffering: "false"
nginx.ingress.kubernetes.io/proxy-read-timeout: 30m
hosts:
- host: librechat.lumpiasty.xyz
paths:
- path: /
pathType: ImplementationSpecific
tls:
- hosts:
- librechat.lumpiasty.xyz
secretName: librechat-ingress
# # ✨ IMPORTANT: let llama-swap/llama-server own all these
# dropParams:
# - "temperature"
# - "top_p"
# - "top_k"
# - "presence_penalty"
# - "frequency_penalty"
# - "stop"
# - "max_tokens"
# imageVolume:
# enabled: true
# size: 10G
# accessModes: ReadWriteOnce
# storageClassName: mayastor-single-hdd
# ingress:
# enabled: true
# className: nginx-ingress
# annotations:
# cert-manager.io/cluster-issuer: letsencrypt
# nginx.ingress.kubernetes.io/proxy-body-size: "0"
# nginx.ingress.kubernetes.io/proxy-buffering: "false"
# nginx.ingress.kubernetes.io/proxy-read-timeout: 30m
# hosts:
# - host: librechat.lumpiasty.xyz
# paths:
# - path: /
# pathType: ImplementationSpecific
# tls:
# - hosts:
# - librechat.lumpiasty.xyz
# secretName: librechat-ingress
mongodb:
persistence:
storageClass: mayastor-single-hdd
# mongodb:
# persistence:
# storageClass: mayastor-single-hdd
# meilisearch:
# persistence:
# storageClass: mayastor-single-hdd
# auth:
# existingMasterKeySecret: librechat
meilisearch:
persistence:
storageClass: mayastor-single-hdd
auth:
existingMasterKeySecret: librechat

View File

@@ -16,7 +16,7 @@ spec:
spec:
containers:
- name: caddy
image: caddy:2.11.2-alpine
image: caddy:2.10.0-alpine
imagePullPolicy: IfNotPresent
volumeMounts:
- mountPath: /etc/caddy

View File

@@ -1,285 +1,216 @@
# yaml-language-server: $schema=https://raw.githubusercontent.com/mostlygeek/llama-swap/refs/heads/main/config-schema.json
healthCheckTimeout: 600
logToStdout: "both" # proxy and upstream
macros:
base_args: "--no-warmup --port ${PORT}"
common_args: "--fit-target 1536 --no-warmup --port ${PORT}"
ctx_128k: "--ctx-size 131072"
ctx_256k: "--ctx-size 262144"
gemma_sampling: "--prio 2 --temp 1.0 --repeat-penalty 1.0 --min-p 0.00 --top-k 64 --top-p 0.95"
qwen35_sampling: "--temp 0.6 --top-p 0.95 --top-k 20 --min-p 0.00 -ctk q8_0 -ctv q8_0"
qwen35_35b_args: "--temp 1.0 --min-p 0.00 --top-p 0.95 --top-k 20 -ctk q8_0 -ctv q8_0"
qwen35_35b_heretic_mmproj: "--mmproj-url https://huggingface.co/unsloth/Qwen3.5-35B-A3B-GGUF/resolve/main/mmproj-F16.gguf --mmproj /root/.cache/llama.cpp/unsloth_Qwen3.5-35B-A3B-GGUF_mmproj-F16.gguf"
qwen35_4b_heretic_mmproj: "--mmproj-url https://huggingface.co/unsloth/Qwen3.5-4B-GGUF/resolve/main/mmproj-F16.gguf --mmproj /root/.cache/llama.cpp/unsloth_Qwen3.5-4B-GGUF_mmproj-F16.gguf"
glm47_flash_args: "--temp 0.7 --top-p 1.0 --min-p 0.01 --repeat-penalty 1.0"
gemma4_sampling: "--temp 1.0 --top-p 0.95 --top-k 64"
thinking_on: "--chat-template-kwargs '{\"enable_thinking\": true}'"
thinking_off: "--chat-template-kwargs '{\"enable_thinking\": false}'"
hooks:
on_startup:
preload:
- "Qwen3.5-0.8B-GGUF-nothink:Q4_K_XL"
groups:
always:
persistent: true
exclusive: false
swap: false
members:
- "Qwen3.5-0.8B-GGUF-nothink:Q4_K_XL"
models:
"DeepSeek-R1-0528-Qwen3-8B-GGUF":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/DeepSeek-R1-0528-Qwen3-8B-GGUF:Q4_K_M
-ngl 37 -c 16384
--no-warmup
--port ${PORT}
"Qwen3-8B-GGUF":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/Qwen3-8B-GGUF:Q4_K_M
-ngl 37 -c 16384
--no-warmup
--port ${PORT}
"Qwen3-8B-GGUF-no-thinking":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/Qwen3-8B-GGUF:Q4_K_M
-ngl 37 -c 16384
--jinja --chat-template-file /config/qwen_nothink_chat_template.jinja
--no-warmup
--port ${PORT}
"gemma3n-e4b":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/gemma-3n-E4B-it-GGUF:UD-Q4_K_XL
--ctx-size 16384
--n-gpu-layers 99
--seed 3407
--prio 2
--temp 1.0
--repeat-penalty 1.0
--min-p 0.00
--top-k 64
--top-p 0.95
--no-warmup
--port ${PORT}
"gemma3-12b":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/gemma-3-12b-it-GGUF:Q4_K_M
${ctx_128k}
${gemma_sampling}
${common_args}
-hf unsloth/gemma-3-12b-it-GGUF:Q4_K_M
--ctx-size 16384
--n-gpu-layers 99
--prio 2
--temp 1.0
--repeat-penalty 1.0
--min-p 0.00
--top-k 64
--top-p 0.95
--no-warmup
--port ${PORT}
"gemma3-12b-novision":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/gemma-3-12b-it-GGUF:Q4_K_M
${ctx_128k}
${gemma_sampling}
--no-mmproj
${common_args}
-hf unsloth/gemma-3-12b-it-GGUF:Q4_K_M
--ctx-size 16384
--n-gpu-layers 99
--prio 2
--temp 1.0
--repeat-penalty 1.0
--min-p 0.00
--top-k 64
--top-p 0.95
--no-mmproj
--no-warmup
--port ${PORT}
"gemma3-12b-q2":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/gemma-3-12b-it-GGUF:Q2_K_L
--ctx-size 16384
--n-gpu-layers 99
--prio 2
--temp 1.0
--repeat-penalty 1.0
--min-p 0.00
--top-k 64
--top-p 0.95
--no-warmup
--port ${PORT}
"gemma3-4b":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/gemma-3-4b-it-GGUF:Q4_K_M
${ctx_128k}
${gemma_sampling}
${common_args}
-hf unsloth/gemma-3-4b-it-GGUF:Q4_K_M
--ctx-size 16384
--n-gpu-layers 99
--prio 2
--temp 1.0
--repeat-penalty 1.0
--min-p 0.00
--top-k 64
--top-p 0.95
--no-warmup
--port ${PORT}
"gemma3-4b-novision":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/gemma-3-4b-it-GGUF:Q4_K_M
${ctx_128k}
${gemma_sampling}
--no-mmproj
${common_args}
"Qwen3-Coder-Next-GGUF:Q4_K_M":
-hf unsloth/gemma-3-4b-it-GGUF:Q4_K_M
--ctx-size 16384
--n-gpu-layers 99
--prio 2
--temp 1.0
--repeat-penalty 1.0
--min-p 0.00
--top-k 64
--top-p 0.95
--no-mmproj
--no-warmup
--port ${PORT}
"Qwen3-4B-Thinking-2507":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/Qwen3-Coder-Next-GGUF:Q4_K_M
--ctx-size 65536
--predict 8192
--temp 1.0
--min-p 0.01
--top-p 0.95
--top-k 40
--repeat-penalty 1.0
-ctk q8_0 -ctv q8_0
${common_args}
"Qwen3.5-35B-A3B-GGUF:Q4_K_M":
-hf unsloth/Qwen3-4B-Thinking-2507-GGUF:Q4_K_M
-ngl 99 -c 16384 --predict 8192
--temp 0.6
--min-p 0.00
--top-p 0.95
--top-k 20
--repeat-penalty 1.0
--no-warmup
--port ${PORT}
"Qwen3-4B-Thinking-2507-long-ctx":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/Qwen3.5-35B-A3B-GGUF:Q4_K_M
${ctx_256k}
${qwen35_35b_args}
${common_args}
"Qwen3.5-35B-A3B-GGUF-nothink:Q4_K_M":
-hf unsloth/Qwen3-4B-Thinking-2507-GGUF:Q4_K_M
-ngl 99 -c 262144 --predict 81920
--temp 0.6
--min-p 0.00
--top-p 0.95
--top-k 20
--repeat-penalty 1.0
--no-warmup
--flash-attn
--cache-type-k q8_0 --cache-type-v q8_0
--port ${PORT}
"Qwen3-4B-Instruct-2507":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/Qwen3.5-35B-A3B-GGUF:Q4_K_M
${ctx_256k}
${qwen35_35b_args}
${common_args}
${thinking_off}
# The "heretic" version does not provide the mmproj
# so providing url to the one from the non-heretic version.
"Qwen3.5-35B-A3B-heretic-GGUF:Q4_K_M":
-hf unsloth/Qwen3-4B-Instruct-2507-GGUF:Q4_K_M
-ngl 99 -c 16384 --predict 8192
--temp 0.7
--min-p 0.00
--top-p 0.8
--top-k 20
--repeat-penalty 1.0
--no-warmup
--port ${PORT}
"Qwen3-4B-Instruct-2507-long-ctx":
ttl: 600
cmd: |
/app/llama-server
-hf mradermacher/Qwen3.5-35B-A3B-heretic-GGUF:Q4_K_M
${qwen35_35b_heretic_mmproj}
${ctx_256k}
${qwen35_35b_args}
${common_args}
"Qwen3.5-35B-A3B-heretic-GGUF-nothink:Q4_K_M":
-hf unsloth/Qwen3-4B-Instruct-2507-GGUF:Q4_K_M
-ngl 99 -c 262144 --predict 81920
--temp 0.7
--min-p 0.00
--top-p 0.8
--top-k 20
--repeat-penalty 1.0
--no-warmup
--flash-attn
--cache-type-k q8_0 --cache-type-v q8_0
--port ${PORT}
"Qwen2.5-VL-32B-Instruct-GGUF-IQ1_S":
ttl: 600
cmd: |
/app/llama-server
-hf mradermacher/Qwen3.5-35B-A3B-heretic-GGUF:Q4_K_M
${qwen35_35b_heretic_mmproj}
${ctx_256k}
${qwen35_35b_args}
${common_args}
${thinking_off}
"Qwen3.5-0.8B-GGUF:Q4_K_XL":
-hf unsloth/Qwen2.5-VL-32B-Instruct-GGUF:IQ1_S
-ngl 99 -c 16384 --predict 8192
--temp 0.7
--min-p 0.00
--top-p 0.8
--top-k 20
--repeat-penalty 1.0
--no-warmup
--port ${PORT}
"Qwen2.5-VL-32B-Instruct-GGUF-Q2_K_L":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/Qwen3.5-0.8B-GGUF:Q4_K_XL
${ctx_256k}
${qwen35_sampling}
${base_args}
${thinking_on}
"Qwen3.5-0.8B-GGUF-nothink:Q4_K_XL":
-hf unsloth/Qwen2.5-VL-32B-Instruct-GGUF:Q2_K_L
-ngl 99 -c 16384 --predict 8192
--temp 0.7
--min-p 0.00
--top-p 0.8
--top-k 20
--repeat-penalty 1.0
--no-warmup
--port ${PORT}
"Qwen2.5-VL-7B-Instruct-GGUF":
ttl: 600
cmd: |
/app/llama-server
-hf unsloth/Qwen3.5-0.8B-GGUF:Q4_K_XL
--ctx-size 4096
${qwen35_sampling}
${base_args}
${thinking_off}
"Qwen3.5-2B-GGUF:Q4_K_M":
cmd: |
/app/llama-server
-hf unsloth/Qwen3.5-2B-GGUF:Q4_K_M
${ctx_256k}
${qwen35_sampling}
${common_args}
${thinking_on}
"Qwen3.5-2B-GGUF-nothink:Q4_K_M":
cmd: |
/app/llama-server
-hf unsloth/Qwen3.5-2B-GGUF:Q4_K_M
${ctx_256k}
${qwen35_sampling}
${common_args}
${thinking_off}
"Qwen3.5-4B-GGUF:Q4_K_M":
cmd: |
/app/llama-server
-hf unsloth/Qwen3.5-4B-GGUF:Q4_K_M
${ctx_128k}
${qwen35_sampling}
${common_args}
${thinking_on}
"Qwen3.5-4B-GGUF-nothink:Q4_K_M":
cmd: |
/app/llama-server
-hf unsloth/Qwen3.5-4B-GGUF:Q4_K_M
${ctx_128k}
${qwen35_sampling}
${common_args}
${thinking_off}
"Qwen3.5-4B-heretic-GGUF:Q4_K_M":
cmd: |
/app/llama-server
-hf mradermacher/Qwen3.5-4B-heretic-GGUF:Q4_K_M
${qwen35_4b_heretic_mmproj}
${ctx_128k}
${qwen35_sampling}
${common_args}
${thinking_on}
"Qwen3.5-4B-heretic-GGUF-nothink:Q4_K_M":
cmd: |
/app/llama-server
-hf mradermacher/Qwen3.5-4B-heretic-GGUF:Q4_K_M
${qwen35_4b_heretic_mmproj}
${ctx_128k}
${qwen35_sampling}
${common_args}
${thinking_off}
"Qwen3.5-9B-GGUF:Q4_K_M":
cmd: |
/app/llama-server
-hf unsloth/Qwen3.5-9B-GGUF:Q4_K_M
${ctx_256k}
${qwen35_sampling}
${common_args}
${thinking_on}
"Qwen3.5-9B-GGUF-nothink:Q4_K_M":
cmd: |
/app/llama-server
-hf unsloth/Qwen3.5-9B-GGUF:Q4_K_M
${ctx_256k}
${qwen35_sampling}
${common_args}
${thinking_off}
"Qwen3.5-9B-GGUF:Q3_K_M":
cmd: |
/app/llama-server
-hf unsloth/Qwen3.5-9B-GGUF:Q3_K_M
${ctx_256k}
${qwen35_sampling}
${common_args}
${thinking_on}
"Qwen3.5-9B-GGUF-nothink:Q3_K_M":
cmd: |
/app/llama-server
-hf unsloth/Qwen3.5-9B-GGUF:Q3_K_M
${ctx_256k}
${qwen35_sampling}
${common_args}
${thinking_off}
"Qwen3.5-27B-GGUF:Q3_K_M":
cmd: |
/app/llama-server
-hf unsloth/Qwen3.5-27B-GGUF:Q3_K_M
${ctx_256k}
${qwen35_sampling}
${common_args}
${thinking_on}
"Qwen3.5-27B-GGUF-nothink:Q3_K_M":
cmd: |
/app/llama-server
-hf unsloth/Qwen3.5-27B-GGUF:Q3_K_M
${ctx_256k}
${qwen35_sampling}
${common_args}
${thinking_off}
"GLM-4.7-Flash-GGUF:Q4_K_M":
cmd: |
/app/llama-server
-hf unsloth/GLM-4.7-Flash-GGUF:Q4_K_M
${glm47_flash_args}
${common_args}
"gemma-4-26B-A4B-it:UD-Q4_K_XL":
cmd: |
/app/llama-server
-hf unsloth/gemma-4-26B-A4B-it-GGUF:UD-Q4_K_XL \
${ctx_256k}
${gemma4_sampling}
${common_args}
"gemma-4-26B-A4B-it:UD-Q2_K_XL":
cmd: |
/app/llama-server
-hf unsloth/gemma-4-26B-A4B-it-GGUF:UD-Q2_K_XL \
${ctx_256k}
${gemma4_sampling}
${common_args}
"unsloth/gemma-4-E4B-it-GGUF:UD-Q4_K_XL":
cmd: |
/app/llama-server
-hf unsloth/gemma-4-E4B-it-GGUF:UD-Q4_K_XL \
${ctx_128k}
${gemma4_sampling}
${common_args}
"unsloth/gemma-4-E2B-it-GGUF:UD-Q4_K_XL":
cmd: |
/app/llama-server
-hf unsloth/gemma-4-E2B-it-GGUF:UD-Q4_K_XL \
${ctx_128k}
${gemma4_sampling}
${common_args}
-hf unsloth/Qwen2.5-VL-7B-Instruct-GGUF:Q4_K_M
-ngl 37 -c 16384 --predict 8192
--temp 0.7
--min-p 0.00
--top-p 0.8
--top-k 20
--repeat-penalty 1.0
--no-warmup
--port ${PORT}

View File

@@ -6,8 +6,6 @@ metadata:
namespace: llama
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: llama-swap
@@ -18,7 +16,7 @@ spec:
spec:
containers:
- name: llama-swap
image: ghcr.io/mostlygeek/llama-swap:v199-vulkan-b8637
image: ghcr.io/mostlygeek/llama-swap:v147-vulkan-b6075
imagePullPolicy: IfNotPresent
command:
- /app/llama-swap
@@ -43,7 +41,7 @@ spec:
volumes:
- name: models
persistentVolumeClaim:
claimName: llama-models-lvmssd
claimName: llama-models
- name: kfd
hostPath:
path: /dev/kfd

View File

@@ -5,7 +5,7 @@ resources:
- secret.yaml
- auth-proxy.yaml
- ingress.yaml
- pvc-ssd.yaml
- pvc.yaml
- deployment.yaml
configMapGenerator:
- name: llama-swap

View File

@@ -1,46 +0,0 @@
---
apiVersion: local.openebs.io/v1alpha1
kind: LVMVolume
metadata:
labels:
kubernetes.io/nodename: anapistula-delrosalae
name: llama-models-lvmssd
namespace: openebs
spec:
capacity: "322122547200"
ownerNodeID: anapistula-delrosalae
shared: "yes"
thinProvision: "no"
vgPattern: ^openebs-ssd$
volGroup: openebs-ssd
---
kind: PersistentVolume
apiVersion: v1
metadata:
name: llama-models-lvmssd
spec:
capacity:
storage: 300Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: ssd-lvmpv
volumeMode: Filesystem
csi:
driver: local.csi.openebs.io
fsType: btrfs
volumeHandle: llama-models-lvmssd
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: llama-models-lvmssd
namespace: llama
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 300Gi
storageClassName: ssd-lvmpv
volumeName: llama-models-lvmssd

13
apps/llama/pvc.yaml Normal file
View File

@@ -0,0 +1,13 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
namespace: llama
name: llama-models
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 200Gi
storageClassName: mayastor-single-ssd

View File

@@ -1,28 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: nas-sftp-config
namespace: nas
data:
sftp.json: |
{
"Global": {
"Chroot": {
"Directory": "%h",
"StartPath": "data"
},
"Directories": [
"data"
]
},
"Users": [
{
"Username": "nas",
"UID": 1000,
"GID": 1000,
"PublicKeys": [
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCresbDFZijI+rZMgd3LdciPjpb4x4S5B7y0U+EoYPaz6hILT72fyz3QdcgKJJv8JUJI6g0811/yFRuOzCXgWaA922c/S/t6HMUrorh7mPVQMTN2dc/SVBvMa7S2M9NYBj6z1X2LRHs+g1JTMCtL202PIjes/E9qu0as0Vx6n/6HHNmtmA9LrpiAmurbeKXDmrYe2yWg/FA6cX5d86SJb21Dj8WqdCd3Hz0Pi6FzMKXhpWvs5Hfei1htsjsRzCxkpSTjlgFEFVfmHIXPfB06Sa6aCnkxAFnE7N+xNa9RIWeZmOXdA74LsfSKQ9eAXSrsC/IRxo2ce8cBzXJy+Itxw24fUqGYXBiCgx8i3ZA9IdwI1u71xYo9lyNjav5VykzKnAHRAYnDm9UsCf8k04reBevcLdtxL11vPCtind3xn76Nhy2b45dcp/MdYFANGsCcXJOMb6Aisb03HPGhs/aU3tCAQbTVe195mL9FWhGqIK2wBmF1SKW+4ssX2bIU6YaCYc= cardno:23_671_999"
]
}
]
}

View File

@@ -1,68 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: nas-sftp
namespace: nas
spec:
replicas: 1
selector:
matchLabels:
app: nas-sftp
template:
metadata:
labels:
app: nas-sftp
spec:
initContainers:
- name: prepare-home
image: alpine:3.23.3
imagePullPolicy: IfNotPresent
command:
- /bin/sh
- -c
- |
set -euo pipefail
mkdir -p /volume/sftp-root
chown root:root /volume/sftp-root
chmod 755 /volume/sftp-root
mkdir -p /volume/sftp-root/data
chown 1000:1000 /volume/sftp-root/data
chmod 750 /volume/sftp-root/data
mkdir -p /volume/host-keys
chown root:root /volume/host-keys
chmod 700 /volume/host-keys
volumeMounts:
- name: home
mountPath: /volume
containers:
- name: sftp
image: docker.io/emberstack/sftp:build-5.1.72
imagePullPolicy: IfNotPresent
ports:
- containerPort: 22
name: sftp
protocol: TCP
volumeMounts:
- name: config
mountPath: /app/config/sftp.json
subPath: sftp.json
readOnly: true
- name: home
mountPath: /home/nas
subPath: sftp-root
- name: home
mountPath: /etc/ssh/keys
subPath: host-keys
resources:
requests:
cpu: 50m
memory: 128Mi
limits:
memory: 512Mi
volumes:
- name: home
persistentVolumeClaim:
claimName: nas-data-lvm-hdd
- name: config
configMap:
name: nas-sftp-config

View File

@@ -1,8 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- configmap.yaml
- pvc.yaml
- deployment.yaml
- service.yaml

View File

@@ -1,4 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: nas

View File

@@ -1,49 +0,0 @@
apiVersion: local.openebs.io/v1alpha1
kind: LVMVolume
metadata:
labels:
kubernetes.io/nodename: anapistula-delrosalae
name: nas-data-lvm-hdd
namespace: openebs
spec:
capacity: 4Gi
ownerNodeID: anapistula-delrosalae
shared: "yes"
thinProvision: "no"
vgPattern: ^openebs-hdd$
volGroup: openebs-hdd
---
kind: PersistentVolume
apiVersion: v1
metadata:
name: nas-data-lvm-hdd
spec:
capacity:
storage: 4Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Delete
storageClassName: openebs-lvmpv
volumeMode: Filesystem
csi:
driver: local.csi.openebs.io
volumeHandle: nas-data-lvm-hdd
claimRef:
apiVersion: v1
kind: PersistentVolumeClaim
namespace: nas
name: nas-data-lvm-hdd
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: nas-data-lvm-hdd
namespace: nas
spec:
storageClassName: openebs-lvmpv
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 4Gi
volumeName: nas-data-lvm-hdd

View File

@@ -1,15 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: nas-sftp
namespace: nas
spec:
type: LoadBalancer
externalTrafficPolicy: Cluster
ports:
- name: sftp
port: 22
targetPort: 22
protocol: TCP
selector:
app: nas-sftp

View File

@@ -1,44 +0,0 @@
---
apiVersion: v1
kind: Service
metadata:
namespace: openwebui
name: openwebui-web
spec:
type: ClusterIP
selector:
app.kubernetes.io/component: open-webui
app.kubernetes.io/instance: openwebui
ports:
- name: http
port: 80
targetPort: 8080
protocol: TCP
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
namespace: openwebui
name: openwebui
annotations:
cert-manager.io/cluster-issuer: letsencrypt
nginx.ingress.kubernetes.io/proxy-body-size: "0"
nginx.ingress.kubernetes.io/proxy-buffering: "false"
nginx.ingress.kubernetes.io/proxy-read-timeout: 30m
spec:
ingressClassName: nginx-ingress
rules:
- host: openwebui.lumpiasty.xyz
http:
paths:
- backend:
service:
name: openwebui-web
port:
number: 80
path: /
pathType: Prefix
tls:
- hosts:
- openwebui.lumpiasty.xyz
secretName: openwebui-ingress

View File

@@ -1,9 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- pvc.yaml
- pvc-pipelines.yaml
- secret.yaml
- release.yaml
- ingress.yaml

View File

@@ -1,4 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: openwebui

View File

@@ -1,46 +0,0 @@
---
apiVersion: local.openebs.io/v1alpha1
kind: LVMVolume
metadata:
labels:
kubernetes.io/nodename: anapistula-delrosalae
name: openwebui-pipelines-lvmhdd
namespace: openebs
spec:
capacity: 1Gi
ownerNodeID: anapistula-delrosalae
shared: "yes"
thinProvision: "no"
vgPattern: ^openebs-hdd$
volGroup: openebs-hdd
---
kind: PersistentVolume
apiVersion: v1
metadata:
name: openwebui-pipelines-lvmhdd
spec:
capacity:
storage: 1Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: hdd-lvmpv
volumeMode: Filesystem
csi:
driver: local.csi.openebs.io
fsType: btrfs
volumeHandle: openwebui-pipelines-lvmhdd
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: openwebui-pipelines-lvmhdd
namespace: openwebui
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
storageClassName: hdd-lvmpv
volumeName: openwebui-pipelines-lvmhdd

View File

@@ -1,46 +0,0 @@
---
apiVersion: local.openebs.io/v1alpha1
kind: LVMVolume
metadata:
labels:
kubernetes.io/nodename: anapistula-delrosalae
name: openwebui-lvmhdd
namespace: openebs
spec:
capacity: 10Gi
ownerNodeID: anapistula-delrosalae
shared: "yes"
thinProvision: "no"
vgPattern: ^openebs-hdd$
volGroup: openebs-hdd
---
kind: PersistentVolume
apiVersion: v1
metadata:
name: openwebui-lvmhdd
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: hdd-lvmpv
volumeMode: Filesystem
csi:
driver: local.csi.openebs.io
fsType: btrfs
volumeHandle: openwebui-lvmhdd
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: openwebui-lvmhdd
namespace: openwebui
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
storageClassName: hdd-lvmpv
volumeName: openwebui-lvmhdd

View File

@@ -1,73 +0,0 @@
---
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository
metadata:
name: open-webui
namespace: openwebui
spec:
interval: 24h
url: https://open-webui.github.io/helm-charts
---
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: openwebui
namespace: openwebui
spec:
interval: 30m
chart:
spec:
chart: open-webui
version: 12.13.0
sourceRef:
kind: HelmRepository
name: open-webui
values:
# Disable built in ingress, service is broken in chart
# They have hard coded wrong target port
# Reimplementing that in ingress.yaml
ingress:
enabled: false
persistence:
enabled: true
existingClaim: openwebui-lvmhdd
enableOpenaiApi: true
openaiBaseApiUrl: "http://llama.llama.svc.cluster.local:11434/v1"
ollama:
enabled: false
pipelines:
enabled: true
persistence:
enabled: true
existingClaim: openwebui-pipelines-lvmhdd
# SSO with Authentik
extraEnvVars:
- name: WEBUI_URL
value: "https://openwebui.lumpiasty.xyz"
- name: OAUTH_CLIENT_ID
valueFrom:
secretKeyRef:
name: openwebui-authentik
key: client_id
- name: OAUTH_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: openwebui-authentik
key: client_secret
- name: OAUTH_PROVIDER_NAME
value: "authentik"
- name: OPENID_PROVIDER_URL
value: "https://authentik.lumpiasty.xyz/application/o/open-web-ui/.well-known/openid-configuration"
- name: OPENID_REDIRECT_URI
value: "https://openwebui.lumpiasty.xyz/oauth/oidc/callback"
- name: ENABLE_OAUTH_SIGNUP
value: "true"
- name: ENABLE_LOGIN_FORM
value: "false"
- name: OAUTH_MERGE_ACCOUNTS_BY_EMAIL
value: "true"

View File

@@ -1,43 +0,0 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: openwebui-secret
namespace: openwebui
---
apiVersion: secrets.hashicorp.com/v1beta1
kind: VaultAuth
metadata:
name: openwebui
namespace: openwebui
spec:
method: kubernetes
mount: kubernetes
kubernetes:
role: openwebui
serviceAccount: openwebui-secret
---
apiVersion: secrets.hashicorp.com/v1beta1
kind: VaultStaticSecret
metadata:
name: openwebui-authentik
namespace: openwebui
spec:
type: kv-v2
mount: secret
path: authentik/openwebui
destination:
create: true
name: openwebui-authentik
type: Opaque
transformation:
excludeRaw: true
templates:
client_id:
text: '{{ get .Secrets "client_id" }}'
client_secret:
text: '{{ get .Secrets "client_secret" }}'
vaultAuthRef: openwebui

View File

@@ -0,0 +1,40 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: registry
namespace: registry
spec:
replicas: 1
selector:
matchLabels:
app: registry
template:
metadata:
labels:
app: registry
spec:
containers:
- name: registry
image: registry:3.0.0
ports:
- containerPort: 5000
volumeMounts:
- name: data
mountPath: /var/lib/registry
volumes:
- name: data
persistentVolumeClaim:
claimName: registry-data
---
apiVersion: v1
kind: Service
metadata:
name: registry-service
namespace: registry
spec:
selector:
app: registry
ports:
- protocol: TCP
port: 80
targetPort: 5000

View File

@@ -0,0 +1,26 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
namespace: registry
name: registry
annotations:
cert-manager.io/cluster-issuer: letsencrypt
nginx.ingress.kubernetes.io/proxy-body-size: "0"
spec:
ingressClassName: nginx-ingress
rules:
- host: registry.lumpiasty.xyz
http:
paths:
- backend:
service:
name: registry-service
port:
number: 80
path: /
pathType: Prefix
tls:
- hosts:
- registry.lumpiasty.xyz
secretName: researcher-ingress

View File

@@ -1,7 +1,8 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- secret.yaml
- volume.yaml
- deployment.yaml
- service.yaml
- ingress.yaml

View File

@@ -2,4 +2,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: woodpecker
name: registry

13
apps/registry/volume.yaml Normal file
View File

@@ -0,0 +1,13 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: registry-data
namespace: registry
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 50Gi
storageClassName: mayastor-single-hdd

View File

@@ -15,7 +15,7 @@ spec:
- name: renovate
# Update this to the latest available and then enable Renovate on
# the manifest
image: renovate/renovate:43.104.3-full
image: renovate/renovate:41.82.10-full
envFrom:
- secretRef:
name: renovate-gitea-token

View File

@@ -39,4 +39,4 @@ spec:
name: searxng-config
- name: searxng-persistent-data
persistentVolumeClaim:
claimName: searxng-persistent-data-lvmhdd
claimName: searxng-persistent-data

View File

@@ -1,46 +1,13 @@
---
apiVersion: local.openebs.io/v1alpha1
kind: LVMVolume
metadata:
labels:
kubernetes.io/nodename: anapistula-delrosalae
name: searxng-persistent-data-lvmhdd
namespace: openebs
spec:
capacity: 1Gi
ownerNodeID: anapistula-delrosalae
shared: "yes"
thinProvision: "no"
vgPattern: ^openebs-hdd$
volGroup: openebs-hdd
---
kind: PersistentVolume
apiVersion: v1
metadata:
name: searxng-persistent-data-lvmhdd
spec:
capacity:
storage: 1Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: hdd-lvmpv
volumeMode: Filesystem
csi:
driver: local.csi.openebs.io
fsType: btrfs
volumeHandle: searxng-persistent-data-lvmhdd
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: searxng-persistent-data-lvmhdd
namespace: searxng
name: searxng-persistent-data
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
storageClassName: hdd-lvmpv
volumeName: searxng-persistent-data-lvmhdd
storageClassName: mayastor-single-ssd

View File

@@ -1,8 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- postgres-volume.yaml
- postgres-cluster.yaml
- release.yaml
- secret.yaml

View File

@@ -1,23 +0,0 @@
---
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: woodpecker-postgresql-cluster
namespace: woodpecker
spec:
instances: 1
imageName: ghcr.io/cloudnative-pg/postgresql:17.4
bootstrap:
initdb:
database: woodpecker
owner: woodpecker
storage:
pvcTemplate:
storageClassName: ssd-lvmpv
resources:
requests:
storage: 10Gi
volumeName: woodpecker-postgresql-cluster-lvmssd

View File

@@ -1,33 +0,0 @@
apiVersion: local.openebs.io/v1alpha1
kind: LVMVolume
metadata:
labels:
kubernetes.io/nodename: anapistula-delrosalae
name: woodpecker-postgresql-cluster-lvmssd
namespace: openebs
spec:
capacity: 10Gi
ownerNodeID: anapistula-delrosalae
shared: "yes"
thinProvision: "no"
vgPattern: ^openebs-ssd$
volGroup: openebs-ssd
---
kind: PersistentVolume
apiVersion: v1
metadata:
name: woodpecker-postgresql-cluster-lvmssd
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: ssd-lvmpv
volumeMode: Filesystem
csi:
driver: local.csi.openebs.io
fsType: btrfs
volumeHandle: woodpecker-postgresql-cluster-lvmssd
---
# PVC is dynamically created by the Postgres operator

View File

@@ -1,115 +0,0 @@
---
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository
metadata:
name: woodpecker
namespace: woodpecker
spec:
interval: 24h
url: https://woodpecker-ci.org/
---
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: woodpecker
namespace: woodpecker
spec:
interval: 30m
chart:
spec:
chart: woodpecker
version: 3.5.1
sourceRef:
kind: HelmRepository
name: woodpecker
namespace: woodpecker
interval: 12h
values:
server:
enabled: true
statefulSet:
replicaCount: 1
persistentVolume:
enabled: false # Using Postgresql database
env:
WOODPECKER_HOST: "https://woodpecker.lumpiasty.xyz"
# Gitea integration
WOODPECKER_GITEA: "true"
WOODPECKER_GITEA_URL: "https://gitea.lumpiasty.xyz"
# PostgreSQL database configuration
WOODPECKER_DATABASE_DRIVER: postgres
# Password is loaded from woodpecker-postgresql-cluster-app secret (created by CNPG)
WOODPECKER_DATABASE_DATASOURCE:
valueFrom:
secretKeyRef:
name: woodpecker-postgresql-cluster-app
key: fqdn-uri
# Allow logging in from all accounts on Gitea
WOODPECKER_OPEN: "true"
# Make lumpiasty admin
WOODPECKER_ADMIN: GiteaAdmin
createAgentSecret: true
extraSecretNamesForEnvFrom:
- woodpecker-secrets
ingress:
enabled: true
ingressClassName: nginx-ingress
annotations:
cert-manager.io/cluster-issuer: letsencrypt
acme.cert-manager.io/http01-edit-in-place: "true"
hosts:
- host: woodpecker.lumpiasty.xyz
paths:
- path: /
backend:
serviceName: woodpecker-server
servicePort: 80
tls:
- hosts:
- woodpecker.lumpiasty.xyz
secretName: woodpecker-ingress
resources:
requests:
cpu: 100m
memory: 256Mi
service:
type: ClusterIP
port: 80
agent:
enabled: true
replicaCount: 2
env:
WOODPECKER_SERVER: "woodpecker-server:9000"
WOODPECKER_BACKEND: kubernetes
WOODPECKER_BACKEND_K8S_NAMESPACE: woodpecker
WOODPECKER_BACKEND_K8S_STORAGE_CLASS: ssd-lvmpv
WOODPECKER_BACKEND_K8S_VOLUME_SIZE: 10G
WOODPECKER_BACKEND_K8S_STORAGE_RWX: false
WOODPECKER_CONNECT_RETRY_COUNT: "5"
mapAgentSecret: true
extraSecretNamesForEnvFrom:
- woodpecker-secrets
persistence:
enabled: false
serviceAccount:
create: true
rbac:
create: true
resources:
requests:
cpu: 100m
memory: 128Mi

View File

@@ -1,62 +0,0 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: woodpecker-secret
namespace: woodpecker
---
apiVersion: secrets.hashicorp.com/v1beta1
kind: VaultAuth
metadata:
name: woodpecker
namespace: woodpecker
spec:
method: kubernetes
mount: kubernetes
kubernetes:
role: woodpecker
serviceAccount: woodpecker-secret
---
# Main woodpecker secrets from Vault
# Requires vault kv put secret/woodpecker \
# WOODPECKER_AGENT_SECRET="$(openssl rand -hex 32)" \
# WOODPECKER_GITEA_CLIENT="<gitea-oauth-client>" \
# WOODPECKER_GITEA_SECRET="<gitea-oauth-secret>"
# Note: Database password comes from CNPG secret (woodpecker-postgresql-cluster-app)
apiVersion: secrets.hashicorp.com/v1beta1
kind: VaultStaticSecret
metadata:
name: woodpecker-secrets
namespace: woodpecker
spec:
type: kv-v2
mount: secret
path: woodpecker
destination:
create: true
name: woodpecker-secrets
type: Opaque
transformation:
excludeRaw: true
vaultAuthRef: woodpecker
---
# Container registry credentials for Kaniko
# Requires vault kv put secret/container-registry \
# REGISTRY_USERNAME="<username>" \
# REGISTRY_PASSWORD="<token>"
apiVersion: secrets.hashicorp.com/v1beta1
kind: VaultStaticSecret
metadata:
name: container-registry
namespace: woodpecker
spec:
type: kv-v2
mount: secret
path: container-registry
destination:
create: true
name: container-registry
type: Opaque
transformation:
excludeRaw: true
vaultAuthRef: woodpecker

View File

@@ -1,79 +0,0 @@
{ pkgs, lib, config, inputs, ... }:
let
# Python with hvac package
python = pkgs.python313.withPackages (python-pkgs: with python-pkgs; [
hvac
librouteros
]);
in
{
# Overlays - apply krew2nix to get kubectl with krew support
overlays = [
inputs.krew2nix.overlay
];
# Environment variables
env = {
GREET = "devenv";
TALOSCONFIG = "${config.devenv.root}/talos/generated/talosconfig";
EDITOR = "vim";
RESTIC_REPOSITORY = "s3:https://s3.eu-central-003.backblazeb2.com/lumpiasty-backups";
VAULT_ADDR = "https://openbao.lumpiasty.xyz:8200";
PATH = "${config.devenv.root}/utils:${pkgs.coreutils}/bin";
PYTHON_BIN = "${python}/bin/python";
KUBECONFIG = "${config.devenv.root}/talos/generated/kubeconfig";
};
# Packages
packages = with pkgs; [
python
vim gnumake
talosctl cilium-cli
kubectx k9s kubernetes-helm
(kubectl.withKrewPlugins (plugins: with plugins; [
mayastor
openebs
browse-pvc
]))
fluxcd
restic
openbao
pv-migrate
mermaid-cli
(
# Wrapping opencode to set the OPENCODE_ENABLE_EXA environment variable
runCommand "opencode" {
buildInputs = [ makeWrapper ];
} ''
mkdir -p $out/bin
makeWrapper ${pkgs.opencode}/bin/opencode $out/bin/opencode \
--set OPENCODE_ENABLE_EXA "1"
''
)
tea
woodpecker-cli
];
# Scripts
scripts.hello.exec = ''
echo hello from $GREET
'';
# Shell hooks
enterShell = ''
source ${pkgs.bash-completion}/share/bash-completion/bash_completion
echo "Environment ready!"
'';
# Tests
enterTest = ''
echo "Running tests"
git --version | grep --color=auto "${pkgs.git.version}"
'';
languages.ansible.enable = true;
# TODO: automatically manage collections from ansible/requirements.yml
# For now, we need to manually install them with `ansible-galaxy collection install -r ansible/requirements.yml`
# This is not implemented in devenv
}

View File

@@ -1,20 +0,0 @@
# yaml-language-server: $schema=https://devenv.sh/devenv.schema.json
inputs:
nixpkgs:
url: github:NixOS/nixpkgs/nixos-unstable
krew2nix:
url: github:a1994sc/krew2nix
inputs:
nixpkgs:
follows: nixpkgs
# If you're using non-OSS software, you can set allowUnfree to true.
# allowUnfree: true
# If you're willing to use a package that's vulnerable
# permittedInsecurePackages:
# - "openssl-1.1.1w"
# If you have more than one devenv you can merge them
#imports:
# - ./backend

Some files were not shown because too many files have changed in this diff Show More