custom config of coredns to deny ipv6 huggingface

This commit is contained in:
2026-02-26 02:32:26 +01:00
parent 2c83eb26b3
commit a0a7b85cc2
3 changed files with 209 additions and 0 deletions

View File

@@ -0,0 +1,200 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: coredns
namespace: kube-system
labels:
k8s-app: kube-dns
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: system:coredns
labels:
k8s-app: kube-dns
rules:
- apiGroups: [""]
resources:
- endpoints
- services
- pods
- namespaces
verbs: ["list", "watch"]
- apiGroups: ["discovery.k8s.io"]
resources: ["endpointslices"]
verbs: ["list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: system:coredns
labels:
k8s-app: kube-dns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:coredns
subjects:
- kind: ServiceAccount
name: coredns
namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
data:
Corefile: |-
.:53 {
errors
health {
lameduck 5s
}
ready
log . {
class error
}
prometheus :9153
# Return NODATA for AAAA on selected domains to force IPv4.
template IN AAAA {
match "(^|\\.)huggingface\\.co\\.$"
rcode NOERROR
fallthrough
}
kubernetes homelab.lumpiasty.xyz cluster.local in-addr.arpa ip6.arpa {
pods insecure
fallthrough in-addr.arpa ip6.arpa
ttl 30
}
forward . /etc/resolv.conf {
max_concurrent 1000
}
cache 30 {
disable success cluster.local
disable denial cluster.local
}
loop
reload
loadbalance
}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: coredns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/name: CoreDNS
spec:
replicas: 1
selector:
matchLabels:
k8s-app: kube-dns
template:
metadata:
labels:
k8s-app: kube-dns
spec:
priorityClassName: system-cluster-critical
serviceAccountName: coredns
tolerations:
- key: node-role.kubernetes.io/control-plane
operator: Exists
effect: NoSchedule
nodeSelector:
kubernetes.io/os: linux
containers:
- name: coredns
image: registry.k8s.io/coredns/coredns:v1.14.1
imagePullPolicy: IfNotPresent
args: ["-conf", "/etc/coredns/Corefile"]
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
livenessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
periodSeconds: 10
readinessProbe:
httpGet:
path: /ready
port: 8181
scheme: HTTP
timeoutSeconds: 1
successThreshold: 1
failureThreshold: 3
periodSeconds: 10
resources:
limits:
memory: 170Mi
requests:
cpu: 0
memory: 70Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- ALL
readOnlyRootFilesystem: true
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
readOnly: true
dnsPolicy: Default
volumes:
- name: config-volume
configMap:
name: coredns
items:
- key: Corefile
path: Corefile
---
apiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/name: CoreDNS
spec:
type: ClusterIP
clusterIP: 10.43.0.10
clusterIPs:
- 10.43.0.10
- 2001:470:61a3:300::a
ipFamilyPolicy: RequireDualStack
ipFamilies:
- IPv4
- IPv6
selector:
k8s-app: kube-dns
ports:
- name: dns
port: 53
protocol: UDP
targetPort: 53
- name: dns-tcp
port: 53
protocol: TCP
targetPort: 53

View File

@@ -4,6 +4,7 @@ resources:
- controllers/k8up-crd-4.8.3.yaml
- controllers/cilium.yaml
- controllers/nginx-ingress.yaml
- controllers/coredns.yaml
- controllers/dns-public.yaml
- controllers/cert-manager.yaml
- controllers/cert-manager-webhook-ovh.yaml

View File

@@ -14,7 +14,15 @@ machine:
hostDNS:
forwardKubeDNSToHost: false
kubelet:
clusterDNS:
- 10.43.0.10
- 2001:470:61a3:300::a
cluster:
# We're configuring CoreDNS ourselves, so disable the default one
coreDNS:
disabled: true
network:
# Likely redundant, we use Cilium as IPAM with their CRDs
podSubnets: