export TALOS_VERSION=
wget -O "metal-arm64-${TALOS_VERSION//./-}.iso" \
"https://github.com/siderolabs/talos/releases/download/$TALOS_VERSION/metal-arm64.iso"wget -O "metal-amd64-${TALOS_VERSION//./-}.iso" \
"https://github.com/siderolabs/talos/releases/download/$TALOS_VERSION/metal-amd64.iso"**/.DS_Store
**/.vscode
clusterconfig/Tip
just dump
cat >> .gitignore <<EOF
**/.DS_Store
**/.vscode
clusterconfig/
EOFImportant
if you dont have an age key
mkdir -p $HOME/.config/sops/age
age-keygen -o $HOME/.config/sops/age/keys.txtexport TALSECRETS_KEY=$(age-keygen -y ~/.config/sops/age/keys.txt)cat >> .sops.yaml <<EOF
creation_rules:
- age:
- $TALSECRETS_KEY
EOFtalhelper gensecret > talsecret.sops.yaml
sops -e -i talsecret.sops.yamlclusterName: {cluster-name} # <- EDIT THIS
talosVersion: {version} # <- EDIT THIS
kubernetesVersion: {version} # <- EDIT THIS
endpoint: https://{endpoint-address}:6443 # <- EDIT THIS
domain: cluster.local
allowSchedulingOnControlPlanes: false
clusterPodNets:
- 10.244.0.0/16
clusterSvcNets:
- 10.96.0.0/12
cniConfig:
name: none
patches:
- |-
cluster:
proxy:
disabled: true
- |-
machine:
features:
hostDNS:
enabled: true
forwardKubeDNSToHost: false
commonConfig: &common
volumes:
- name: EPHEMERAL
encryption:
provider: luks2
keys:
- slot: 0
nodeID: {}
- name: STATE
encryption:
provider: luks2
keys:
- slot: 0
nodeID: {}
machineSpec:
mode: metal
secureboot: false
schematic:
customization:
systemExtensions:
officialExtensions:
- siderolabs/iscsi-tools
- siderolabs/util-linux-tools
- siderolabs/qemu-guest-agent
patches:
- |-
machine:
sysctls:
vm.nr_hugepages: "1024"
kernel:
modules:
- name: nvme_tcp
- name: vfio_pci
kubelet:
extraMounts:
- destination: /var/lib/longhorn
type: bind
source: /var/lib/longhorn
options:
- bind
- rshared
- rw
extraArgs:
rotate-server-certificates: true
controlPlane:
<<: *common
worker:
<<: *common
nodes:
- hostname: node-0
controlPlane: false
nodeAnnotations:
machine: netcup-v22.....
patches:
- |-
machine:
kubelet:
extraConfig:
registerWithTaints:
- key: node.kubernetes.io/edge
value: "true"
effect: NoSchedule
ipAddress: 192.168.0.10
installDisk: /dev/vda
networkInterfaces:
- interface: ens3
addresses:
- 192.168.0.10/22
routes:
- network: 0.0.0.0/0
gateway: 192.168.0.1
dhcp: falsetalhelper genconfigtalhelper gencommand apply --extra-flags --insecuretalosctl bootstrap --talosconfig=clusterconfig/talosconfig --nodes {endpoint-address} # <- EDIT THIStalosctl kubeconfig --talosconfig=clusterconfig/talosconfig --nodes {endpoint-address} # <- EDIT THISwatch kubectl get nodeskubectl label node <node-name> node-role.kubernetes.io/worker=""kubectl label node <node-name> node-role.kubernetes.io/edge=""kubectl label node <node-name> node-role.kubernetes.io/control-plane=""wget -O gateway-api-crds-standard.yaml https://github.com/kubernetes-sigs/gateway-api/releases/latest/download/standard-install.yaml
wget -O gateway-api-crds-experimental.yaml https://github.com/kubernetes-sigs/gateway-api/releases/latest/download/experimental-install.yaml
wget -O cert-approver.yaml https://raw.githubusercontent.com/alex1989hu/kubelet-serving-cert-approver/main/deploy/standalone-install.yaml
wget -O metrics-server.yaml https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yamlkubectl apply --server-side -f gateway-api-crds-standard.yaml
kubectl apply --server-side -f gateway-api-crds-experimental.yaml
kubectl apply -f cert-approver.yaml
kubectl apply -f metrics-server.yamlhelm repo add cilium https://helm.cilium.io/
helm repo updatehelm search repo cilium/cilium --versions | headhelm show values cilium/cilium --version {version} > cilium-values.yaml # <- EDIT THISipam:
mode: kubernetes
kubeProxyReplacement: true
securityContext:
capabilities:
ciliumAgent:
- CHOWN
- KILL
- NET_ADMIN
- NET_RAW
- IPC_LOCK
- SYS_ADMIN
- SYS_RESOURCE
- DAC_OVERRIDE
- FOWNER
- SETGID
- SETUID
cleanCiliumState:
- NET_ADMIN
- SYS_ADMIN
- SYS_RESOURCE
cgroup:
autoMount:
enabled: false
hostRoot: /sys/fs/cgroup
k8sServiceHost: {endpoint-ip} # <- EDIT THIS
k8sServicePort: 6443
gatewayAPI:
enabled: false
hubble:
relay:
enabled: true
ui:
enabled: true
hostFirewall:
enabled: truehelm template \
cilium cilium/cilium \
--kube-version {version} \ # <- EDIT THIS
--version {version} \ # <- EDIT THIS
--namespace cilium-system \
--values cilium-values.yaml \
> cilium.yamlkubectl create namespace cilium-system
kubectl label namespace cilium-system \
pod-security.kubernetes.io/enforce=privileged \
pod-security.kubernetes.io/warn=privileged \
pod-security.kubernetes.io/audit=privileged --overwrite
kubectl apply -f cilium.yamlwatch kubectl get pods -n cilium-systemhelm repo add longhorn https://charts.longhorn.io
helm repo updatehelm search repo longhorn/longhorn --versions | headhelm show values longhorn/longhorn --version {version} > longhorn-values.yaml # <- EDIT THISpersistence:
defaultClass: false
global:
tolerations:
- key: node.kubernetes.io/edge
operator: "Exists"
effect:helm template \
longhorn longhorn/longhorn \
--kube-version {version} \ # <- EDIT THIS
--version {version} \ # <- EDIT THIS
--namespace longhorn-system \
--values longhorn-values.yaml \
> longhorn.yamlkubectl create namespace longhorn-system
kubectl label namespace longhorn-system \
pod-security.kubernetes.io/enforce=privileged \
pod-security.kubernetes.io/warn=privileged \
pod-security.kubernetes.io/audit=privileged --overwrite
kubectl apply -f longhorn.yamlwatch kubectl get pods -n longhorn-systemkubectl -n longhorn-system port-forward svc/longhorn-frontend 8080:80helm repo add cnpg https://cloudnative-pg.github.io/charts
helm repo updatehelm search repo cnpg/cloudnative-pg --versions | headhelm show values cnpg/cloudnative-pg --version {version} > cnpg-values.yaml # <- EDIT THISreplicaCount: 3helm template \
cnpg cnpg/cloudnative-pg \
--kube-version {version} \ # <- EDIT THIS
--version {version} \ # <- EDIT THIS
--namespace cnpg-system \
--values cnpg-values.yaml \
> cnpg.yamlkubectl create namespace cnpg-system
kubectl apply --server-side -f cnpg.yamlwatch kubectl get pods -n cnpg-systemcrane ls quay.io/jetstack/charts/cert-manager | tailcrane ls quay.io/jetstack/charts/cert-manager-approver-policy | tailwget -O cert-manager-crds.yaml https://github.com/cert-manager/cert-manager/releases/download/{version}/cert-manager.crds.yaml # <- EDIT THIShelm show values oci://quay.io/jetstack/charts/cert-manager --version {version} > cert-manager-values.yaml # <- EDIT THISinstallCRDs: false
replicaCount: 3
disableAutoApproval: truehelm template \
cert-manager oci://quay.io/jetstack/charts/cert-manager \
--kube-version {version} \ # <- EDIT THIS
--version {version} \ # <- EDIT THIS
--namespace cert-manager-system \
--values cert-manager-values.yaml \
> cert-manager.yaml
helm template \
cert-manager-approver-policy oci://quay.io/jetstack/charts/cert-manager-approver-policy \
--kube-version {version} \ # <- EDIT THIS
--version {version} \ # <- EDIT THIS
--namespace cert-manager-system \
> cert-manager-approver-policy.yamlkubectl create namespace cert-manager-system
kubectl label namespace cert-manager-system \
pod-security.kubernetes.io/enforce=baseline \
pod-security.kubernetes.io/warn=baseline \
pod-security.kubernetes.io/audit=restricted
kubectl apply --namespace cert-manager-system -f cert-manager-crds.yaml
kubectl apply -f cert-manager.yaml
kubectl apply -f cert-manager-approver-policy.yamlwatch kubectl get pods -n cert-manager-systemcrane ls docker.io/envoyproxy/gateway-crds-helm | tailhelm show values oci://docker.io/envoyproxy/gateway-crds-helm --version {version} > envoy-gateway-values.yaml # <- EDIT THISservice:
type: "ClusterIP"
deployment:
replicas: 3
crds:
gatewayAPI:
enabled: true
channel: standard
envoyGateway:
enabled: true
config:
envoyGateway:
extensionApis:
enableBackend: truehelm template \
envoy-gateway oci://docker.io/envoyproxy/gateway-crds-helm \
--kube-version {version} \ # <- EDIT THIS
--version {version} \ # <- EDIT THIS
--values envoy-gateway-values.yaml \
> envoy-gateway-crds.yaml
helm template \
envoy-gateway oci://docker.io/envoyproxy/gateway-helm \
--kube-version {version} \ # <- EDIT THIS
--version {version} \ # <- EDIT THIS
--namespace envoy-gateway-system \
--values envoy-gateway-values.yaml \
--skip-crds \
> envoy-gateway.yamlkubectl create namespace envoy-gateway-system
kubectl label namespace envoy-gateway-system \
pod-security.kubernetes.io/enforce=privileged \
pod-security.kubernetes.io/warn=privileged \
pod-security.kubernetes.io/audit=privileged --overwrite
kubectl apply --server-side -f envoy-gateway-crds.yaml
kubectl apply --server-side -f envoy-gateway.yamlwatch kubectl get pods -n envoy-gateway-systemImportant
its better to comment https part of an gateway until certificate is created
Important
User Profile > API Tokens > API Tokens
- Permissions:
- Zone - DNS - Edit
- Zone - Zone - Read
- Zone Resources:
- Include - All Zones
apiVersion: v1
kind: Secret
metadata:
name: cloudflare-api-token-secret
namespace: envoy-gateway-system
type: Opaque
stringData:
api-token: {token} # <- EDIT THIS
---
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: cloudflare-dns-issuer
namespace: envoy-gateway-system
spec:
acme:
email: {email} # <- EDIT THIS
server: https://acme-v02.api.letsencrypt.org/directory
privateKeySecretRef:
name: issuer-account-key-secret
solvers:
- dns01:
cloudflare:
email: {email} # <- EDIT THIS
apiTokenSecretRef:
name: cloudflare-api-token-secret
key: api-tokenkubectl apply -f cloudflare-dns-issuer.yamlapiVersion: gateway.envoyproxy.io/v1alpha1
kind: EnvoyProxy
metadata:
name: envoy-gateway-config
namespace: envoy-gateway-system
spec:
provider:
type: Kubernetes
kubernetes:
envoyService:
type: ClusterIP
envoyDeployment:
pod:
annotations:
gateway.envoyproxy.io/enable-proxy-protocol: "true"
---
apiVersion: gateway.networking.k8s.io/v1
kind: GatewayClass
metadata:
name: envoy-gateway-class
spec:
controllerName: gateway.envoyproxy.io/gatewayclass-controller
parametersRef:
group: gateway.envoyproxy.io
kind: EnvoyProxy
name: envoy-gateway-config
namespace: envoy-gateway-system
---
apiVersion: gateway.networking.k8s.io/v1
kind: Gateway
metadata:
name: envoy-gateway
namespace: envoy-gateway-system
spec:
gatewayClassName: envoy-gateway-class
listeners:
- name: domain-http
protocol: HTTP
port: 80
hostname: "domain"
allowedRoutes:
namespaces:
from: Same
- name: domain-https
protocol: HTTPS
port: 443
hostname: "domain"
tls:
mode: Terminate
certificateRefs:
- kind: Secret
name: domain-tls
allowedRoutes:
namespaces:
from: Same
---
apiVersion: v1
kind: Service
metadata:
name: envoy-gateway-static
namespace: envoy-gateway-system
spec:
type: ClusterIP
ports:
- name: http
port: 80
targetPort: 10080
protocol: TCP
- name: https
port: 443
targetPort: 10443
protocol: TCP
- name: ssh
port: 22
targetPort: 10022
protocol: TCP
selector:
gateway.envoyproxy.io/owning-gateway-name: envoy-gateway
gateway.envoyproxy.io/owning-gateway-namespace: envoy-gateway-systemkubectl apply -f envoy-gateway-default.yamlapiVersion: apps/v1
kind: DaemonSet
metadata:
name: envoy-gateway-haproxy
namespace: envoy-gateway-system
spec:
selector:
matchLabels:
app: envoy-gateway-haproxy
template:
metadata:
labels:
app: envoy-gateway-haproxy
spec:
nodeSelector:
node-role.kubernetes.io/edge: ""
tolerations:
- key: "node.kubernetes.io/edge"
operator: "Exists"
effect: "NoSchedule"
containers:
- name: haproxy
image: haproxy:3.3-alpine
ports:
- name: tcp
containerPort: 80
hostPort: 80
protocol: TCP
- name: tcp
containerPort: 443
hostPort: 443
protocol: TCP
- name: tcp
containerPort: 22
hostPort: 22
protocol: TCP
volumeMounts:
- name: envoy-gateway-haproxy-config
mountPath: /usr/local/etc/haproxy/haproxy.cfg
subPath: haproxy.cfg
volumes:
- name: envoy-gateway-haproxy-config
configMap:
name: envoy-gateway-haproxy-config
---
apiVersion: v1
kind: ConfigMap
metadata:
name: envoy-gateway-haproxy-config
namespace: envoy-gateway-system
data:
haproxy.cfg: |
global
log stdout format raw local0
defaults
log global
mode tcp
option tcplog
timeout connect 10s
timeout client 1m
timeout server 1m
resolvers k8s_dns
parse-resolv-conf
hold valid 10s
frontend http_front
bind *:80
mode tcp
default_backend envoy_gateway_http
backend envoy_gateway_http
mode tcp
server envoy envoy-gateway-static.envoy-gateway-system.svc.cluster.local:80 check resolvers k8s_dns init-addr none send-proxy-v2
frontend https_front
bind *:443
mode tcp
default_backend envoy_gateway_https
backend envoy_gateway_https
mode tcp
server envoy envoy-gateway-static.envoy-gateway-system.svc.cluster.local:443 check resolvers k8s_dns init-addr none send-proxy-v2
frontend ssh_front
bind *:22
mode tcp
timeout client 2h
default_backend envoy_gateway_ssh
backend envoy_gateway_ssh
mode tcp
timeout server 2h
server envoy envoy-gateway-static.envoy-gateway-system.svc.cluster.local:22 check resolvers k8s_dns init-addr none send-proxy-v2kubectl apply -f envoy-gateway-haproxy.yaml