Skip to content

Conversation

@renovate
Copy link
Contributor

@renovate renovate bot commented Jun 28, 2025

This PR contains the following updates:

Package Type Update Change
flux (source) required_provider patch 1.6.2 -> 1.6.3

Release Notes

fluxcd/terraform-provider-flux (flux)

v1.6.3

Compare Source

Release date: 2025-06-27

This release includes flux2 v2.6.3.


Configuration

📅 Schedule: Branch creation - "every weekend" (UTC), Automerge - At any time (no schedule defined).

🚦 Automerge: Enabled.

Rebasing: Whenever PR is behind base branch, or you tick the rebase/retry checkbox.

🔕 Ignore: Close this PR and you won't be reminded about this update again.


  • If you want to rebase/retry this PR, check this box

This PR was generated by Mend Renovate. View the repository job log.

@github-actions
Copy link


Terraform used the selected providers to generate the following execution
plan. Resource actions are indicated with the following symbols:
  + create
 <= read (data resources)

Terraform will perform the following actions:

  # cloudflare_record.dev_ipv4["talos-v1-10-3-controlplane-talos-v1-10-3-controlplane-1"] will be created
  + resource "cloudflare_record" "dev_ipv4" {
      + allow_overwrite = false
      + content         = (known after apply)
      + created_on      = (known after apply)
      + hostname        = (known after apply)
      + id              = (known after apply)
      + metadata        = (known after apply)
      + modified_on     = (known after apply)
      + name            = "dev.thetillhoff.de"
      + proxiable       = (known after apply)
      + proxied         = false
      + ttl             = 3600
      + type            = "A"
      + value           = (known after apply)
      + zone_id         = "94d9f474ce48a61513a68744b663f5e5"
    }

  # cloudflare_record.dev_ipv4["talos-v1-10-3-controlplane-talos-v1-10-3-controlplane-2"] will be created
  + resource "cloudflare_record" "dev_ipv4" {
      + allow_overwrite = false
      + content         = (known after apply)
      + created_on      = (known after apply)
      + hostname        = (known after apply)
      + id              = (known after apply)
      + metadata        = (known after apply)
      + modified_on     = (known after apply)
      + name            = "dev.thetillhoff.de"
      + proxiable       = (known after apply)
      + proxied         = false
      + ttl             = 3600
      + type            = "A"
      + value           = (known after apply)
      + zone_id         = "94d9f474ce48a61513a68744b663f5e5"
    }

  # cloudflare_record.dev_ipv6["talos-v1-10-3-controlplane-talos-v1-10-3-controlplane-1"] will be created
  + resource "cloudflare_record" "dev_ipv6" {
      + allow_overwrite = false
      + content         = (known after apply)
      + created_on      = (known after apply)
      + hostname        = (known after apply)
      + id              = (known after apply)
      + metadata        = (known after apply)
      + modified_on     = (known after apply)
      + name            = "dev.thetillhoff.de"
      + proxiable       = (known after apply)
      + proxied         = false
      + ttl             = 3600
      + type            = "AAAA"
      + value           = (known after apply)
      + zone_id         = "94d9f474ce48a61513a68744b663f5e5"
    }

  # cloudflare_record.dev_ipv6["talos-v1-10-3-controlplane-talos-v1-10-3-controlplane-2"] will be created
  + resource "cloudflare_record" "dev_ipv6" {
      + allow_overwrite = false
      + content         = (known after apply)
      + created_on      = (known after apply)
      + hostname        = (known after apply)
      + id              = (known after apply)
      + metadata        = (known after apply)
      + modified_on     = (known after apply)
      + name            = "dev.thetillhoff.de"
      + proxiable       = (known after apply)
      + proxied         = false
      + ttl             = 3600
      + type            = "AAAA"
      + value           = (known after apply)
      + zone_id         = "94d9f474ce48a61513a68744b663f5e5"
    }

  # module.k8s.module.cluster.flux_bootstrap_git.hydra will be created
  + resource "flux_bootstrap_git" "hydra" {
      + cluster_domain       = "cluster.local"
      + components           = [
          + "helm-controller",
          + "kustomize-controller",
          + "notification-controller",
          + "source-controller",
        ]
      + components_extra     = [
          + "image-automation-controller",
          + "image-reflector-controller",
        ]
      + delete_git_manifests = false
      + embedded_manifests   = false
      + id                   = (known after apply)
      + interval             = "1m0s"
      + keep_namespace       = false
      + log_level            = "info"
      + namespace            = "flux-system"
      + network_policy       = true
      + path                 = "kubernetes/clusters/hydra"
      + registry             = "ghcr.io/fluxcd"
      + repository_files     = (known after apply)
      + secret_name          = "flux-system"
      + timeouts             = {
          + create = "10m"
        }
      + version              = "v2.5.1"
      + watch_all_namespaces = true
    }

  # module.k8s.module.cluster.helm_release.cilium will be created
  + resource "helm_release" "cilium" {
      + atomic                     = false
      + chart                      = "cilium"
      + cleanup_on_fail            = false
      + create_namespace           = false
      + dependency_update          = false
      + disable_crd_hooks          = false
      + disable_openapi_validation = false
      + disable_webhooks           = false
      + force_update               = false
      + id                         = (known after apply)
      + lint                       = false
      + manifest                   = (known after apply)
      + max_history                = 0
      + metadata                   = (known after apply)
      + name                       = "cilium"
      + namespace                  = "kube-system"
      + pass_credentials           = false
      + recreate_pods              = false
      + render_subchart_notes      = true
      + replace                    = false
      + repository                 = "https://helm.cilium.io"
      + reset_values               = false
      + reuse_values               = false
      + skip_crds                  = false
      + status                     = "deployed"
      + timeout                    = 300
      + values                     = [
          + <<-EOT
                # Values documented at https://artifacthub.io/packages/helm/cilium/cilium/1.17.4
                
                # namespaceOverride: cilium-system # default is kube-system
                
                # Docs for cilium on talos at https://www.talos.dev/v1.10/kubernetes-guides/network/deploying-cilium/#with-kube-proxy
                ipam:
                  mode: kubernetes
                kubeProxyReplacement: true
                securityContext:
                  capabilities:
                    ciliumAgent:
                      - CHOWN
                      - KILL
                      - NET_ADMIN
                      - NET_RAW
                      - IPC_LOCK
                      # - SYS_MODULE # "As Talos does not allow loading Kernel modules by Kubernetes workloads, SYS_MODULE needs to be dropped from the Cilium default capability list."
                      - SYS_ADMIN
                      - SYS_RESOURCE
                      - DAC_OVERRIDE
                      - FOWNER
                      - SETGID
                      - SETUID
                    cleanCiliumState:
                      - NET_ADMIN
                      - SYS_ADMIN
                      - SYS_RESOURCE
                cgroup:
                  autoMount:
                    enabled: false
                  hostRoot: /sys/fs/cgroup
                k8sServiceHost: localhost
                k8sServicePort: 7445
                # ---
                # Enabling IPv6
                # TODO: Enable this once we have IPv6 support in the cluster (talos)
                # More info at https://docs.cilium.io/en/stable/helm-reference/
                # ipv6:
                #   enabled: true
                # ---
                encryption:
                  enabled: true # This enabled encryption for pod-to-pod traffic
                  type: wireguard
                  # Node Encryption is not useful to add, since "Cilium automatically disables node-to-node encryption from and to Kubernetes control-plane nodes"
                  # More info at https://docs.cilium.io/en/stable/security/network/encryption-wireguard/#node-to-node-encryption-beta
                  # nodeEncryption: true
                # ---
                # Enabling gatewayAPI
                # Using hostNetwork mode removes the need for separate loadbalancer
                # More info at https://docs.cilium.io/en/stable/network/servicemesh/gateway-api/gateway-api/#host-network-mode
                gatewayAPI:
                  enabled: true
                  hostNetwork:
                    enabled: true
                  # ALPN will attempt HTTP/2, then HTTP 1.1.
                  # Note that this will also enable appProtocol support, and services that wish to use HTTP/2 will need to indicate that via their `appProtocol`.
                  enableAlpn: true
                  enableAppProtocol: true
                # Enabling dedicated envoy daemonset for the gateway listener
                # The additional capabilities are required for gateways to listen on privileged ports
                # More info at https://docs.cilium.io/en/stable/network/servicemesh/gateway-api/gateway-api/#bind-to-privileged-port
                envoy:
                  enabled: true
                  securityContext:
                    capabilities:
                      keepCapNetBindService: true
                      envoy:
                        - NET_ADMIN
                        - SYS_ADMIN
                        - NET_BIND_SERVICE
            EOT,
        ]
      + verify                     = false
      + version                    = "1.17.4"
      + wait                       = true
      + wait_for_jobs              = true
    }

  # module.k8s.module.cluster.kubernetes_namespace.flux_system will be created
  + resource "kubernetes_namespace" "flux_system" {
      + id                               = (known after apply)
      + wait_for_default_service_account = false

      + metadata {
          + generation       = (known after apply)
          + name             = "flux-system"
          + resource_version = (known after apply)
          + uid              = (known after apply)
        }
    }

  # module.k8s.module.cluster.kubernetes_secret.sops_age will be created
  + resource "kubernetes_secret" "sops_age" {
      + data                           = (sensitive value)
      + id                             = (known after apply)
      + type                           = "Opaque"
      + wait_for_service_account_token = true

      + metadata {
          + generation       = (known after apply)
          + name             = "sops-age"
          + namespace        = "flux-system"
          + resource_version = (known after apply)
          + uid              = (known after apply)
        }
    }

  # module.k8s.module.cluster.talos_cluster_kubeconfig.kubeconfig will be created
  + resource "talos_cluster_kubeconfig" "kubeconfig" {
      + certificate_renewal_duration    = "720h"
      + client_configuration            = (sensitive value)
      + endpoint                        = (known after apply)
      + id                              = (known after apply)
      + kubeconfig_raw                  = (sensitive value)
      + kubernetes_client_configuration = (known after apply)
      + node                            = (known after apply)
    }

  # module.k8s.module.cluster.talos_machine_bootstrap.main will be created
  + resource "talos_machine_bootstrap" "main" {
      + client_configuration = (sensitive value)
      + endpoint             = (known after apply)
      + id                   = (known after apply)
      + node                 = (known after apply)
    }

  # module.k8s.module.cluster.time_sleep.wait_for_talos_bootstrap will be created
  + resource "time_sleep" "wait_for_talos_bootstrap" {
      + create_duration = "30s"
      + id              = (known after apply)
    }

  # module.k8s.module.config.hcloud_network.cluster_network will be created
  + resource "hcloud_network" "cluster_network" {
      + delete_protection        = false
      + expose_routes_to_vswitch = false
      + id                       = (known after apply)
      + ip_range                 = "10.0.0.0/8"
      + name                     = "hydra-network"
    }

  # module.k8s.module.config.hcloud_network_subnet.cluster_network_subnet will be created
  + resource "hcloud_network_subnet" "cluster_network_subnet" {
      + gateway      = (known after apply)
      + id           = (known after apply)
      + ip_range     = "10.1.0.0/16"
      + network_id   = (known after apply)
      + network_zone = "eu-central"
      + type         = "cloud"
    }

  # module.k8s.module.config.talos_machine_secrets.main will be created
  + resource "talos_machine_secrets" "main" {
      + client_configuration = (known after apply)
      + id                   = (known after apply)
      + machine_secrets      = (known after apply)
      + talos_version        = "v1.10"
    }

  # module.k8s.module.nodegroup["talos-v1-10-3-controlplane"].data.talos_client_configuration.main will be read during apply
  # (config refers to values not yet known)
 <= data "talos_client_configuration" "main" {
      + client_configuration = (sensitive value)
      + cluster_name         = "hydra"
      + endpoints            = [
          + (known after apply),
          + (known after apply),
        ]
      + id                   = (known after apply)
      + nodes                = [
          + (known after apply),
          + (known after apply),
        ]
      + talos_config         = (sensitive value)
    }

  # module.k8s.module.nodegroup["talos-v1-10-3-controlplane"].data.talos_machine_configuration.main will be read during apply
  # (config refers to values not yet known)
 <= data "talos_machine_configuration" "main" {
      + cluster_endpoint      = "https://hydra.k8s.thetillhoff.de:6443"
      + cluster_name          = "hydra"
      + config_patches        = [
          + <<-EOT
                machine:
                
                # Enable Longhorn dependencies
                # More info at https://longhorn.io/docs/1.8.1/advanced-resources/os-distro-specific/talos-linux-support/#v2-data-engine
                  sysctls:
                    vm.nr_hugepages: "1024"
                  kernel:
                    modules:
                      - name: nvme_tcp
                      - name: vfio_pci
                
                # Enable the Talos KubeSpan feature
                # More info at https://www.talos.dev/v1.10/talos-guides/network/kubespan/#enabling
                  network:
                    kubespan:
                      enabled: true
                
                # For now, IPv6 isn't properly supported by talos, not even in dualstack mode
                # # Enable DHCPv6
                #     interfaces:
                #     - interface: eth0 / enp0s1 # Mutually exclusive with deviceSelector
                #       deviceSelector: # Mutually exclusive with interface
                #         hardwareAddr: "*"
                #         driver: "*"
                #         physical: true # Select only physical devices
                #       dhcp: true
                #       dhcpOptions:
                #         ipv4: true
                #         ipv6: true
                
                  # For now, this isn't needed, as using the nodes as endpoints works just fine
                  # # This makes nodes reachable via the same name as the k8s cluster
                  # # More info at https://www.talos.dev/v1.10/reference/configuration/v1alpha1/config/#Config.machine
                  # certSANs:
                  #   - "hydra.k8s.thetillhoff.de"
                
                  # The following config regarding node taints leads to non-running kube-schedulers, kube-apiservers & core-dns pods at cluster creation
                  # nodeTaints:
                  #   # Cilium needs this taint to automatically restart the pods on the affected nodes after cilium is running on it
                  #   # More info at https://docs.cilium.io/en/stable/installation/taints/ & https://docs.cilium.io/en/stable/installation/k8s-install-helm/#restart-unmanaged-pods
                  #   node.cilium.io/agent-not-ready: true:NoExecute
                
                # Certificate rotation disabled as it needs manual approval of new certs
                # More info at https://kubernetes.io/docs/reference/access-authn-authz/kubelet-tls-bootstrapping/#certificate-rotation
                #   kubelet:
                #     extraArgs:
                #       rotate-server-certificates: true
                
                # Longhorn data path mounts
                # More info at https://longhorn.io/docs/1.8.1/advanced-resources/os-distro-specific/talos-linux-support/#data-path-mounts
                  kubelet:
                    extraMounts:
                      - destination: /var/lib/longhorn
                        type: bind
                        source: /var/lib/longhorn
                        options:
                          - bind
                          - rshared
                          - rw
                
                # Disk encryption only makes sense with tpm / vtpm, which is not supported by hcloud
                # More info at https://docs.hetzner.com/cloud/servers/faq#do-the-cloud-servers-support-vtpm-or-tpm
                #   systemDiskEncryption:
                #     ephemeral:
                #       provider: luks2
                #       keys:
                #         - nodeID: {}
                #           slot: 0
                #     state:
                #       provider: luks2
                #       keys:
                #         - nodeID: {}
                #           slot: 0
                
                cluster:
                  network:
                    # Don't use podSubnets & serviceSubnets from this section (to enable ipv6)! Instead configure them on the kubelet option in the machine section
                    # More info at https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/#kubelet-config-k8s-io-v1beta1-KubeletConfiguration
                    # And https://www.talos.dev/v1.10/reference/configuration/v1alpha1/config/#Config.machine.kubelet
                    # podSubnets:
                    #   - 10.244.0.0/16
                    #   - fd00:10:244::/116
                    # serviceSubnets:
                    #   - 10.96.0.0/12
                    #   - fd00:10:96::/112
                    cni:
                      name: none # Disable default CNI
                  proxy:
                    disabled: true # Disable default proxy, as cilium brings its own
                  allowSchedulingOnControlPlanes: true
                  discovery:
                    enabled: true
            EOT,
        ]
      + docs                  = false
      + examples              = false
      + id                    = (known after apply)
      + kubernetes_version    = "1.33.1"
      + machine_configuration = (sensitive value)
      + machine_secrets       = (sensitive value)
      + machine_type          = "controlplane"
    }

  # module.k8s.module.nodegroup["talos-v1-10-3-controlplane"].cloudflare_record.nodes_ipv4["talos-v1-10-3-controlplane-1"] will be created
  + resource "cloudflare_record" "nodes_ipv4" {
      + allow_overwrite = false
      + content         = (known after apply)
      + created_on      = (known after apply)
      + hostname        = (known after apply)
      + id              = (known after apply)
      + metadata        = (known after apply)
      + modified_on     = (known after apply)
      + name            = "hydra.k8s.thetillhoff.de"
      + proxiable       = (known after apply)
      + proxied         = false
      + ttl             = 60
      + type            = "A"
      + value           = (known after apply)
      + zone_id         = "94d9f474ce48a61513a68744b663f5e5"
    }

  # module.k8s.module.nodegroup["talos-v1-10-3-controlplane"].cloudflare_record.nodes_ipv4["talos-v1-10-3-controlplane-2"] will be created
  + resource "cloudflare_record" "nodes_ipv4" {
      + allow_overwrite = false
      + content         = (known after apply)
      + created_on      = (known after apply)
      + hostname        = (known after apply)
      + id              = (known after apply)
      + metadata        = (known after apply)
      + modified_on     = (known after apply)
      + name            = "hydra.k8s.thetillhoff.de"
      + proxiable       = (known after apply)
      + proxied         = false
      + ttl             = 60
      + type            = "A"
      + value           = (known after apply)
      + zone_id         = "94d9f474ce48a61513a68744b663f5e5"
    }

  # module.k8s.module.nodegroup["talos-v1-10-3-controlplane"].cloudflare_record.nodes_ipv6["talos-v1-10-3-controlplane-1"] will be created
  + resource "cloudflare_record" "nodes_ipv6" {
      + allow_overwrite = false
      + content         = (known after apply)
      + created_on      = (known after apply)
      + hostname        = (known after apply)
      + id              = (known after apply)
      + metadata        = (known after apply)
      + modified_on     = (known after apply)
      + name            = "hydra.k8s.thetillhoff.de"
      + proxiable       = (known after apply)
      + proxied         = false
      + ttl             = 60
      + type            = "AAAA"
      + value           = (known after apply)
      + zone_id         = "94d9f474ce48a61513a68744b663f5e5"
    }

  # module.k8s.module.nodegroup["talos-v1-10-3-controlplane"].cloudflare_record.nodes_ipv6["talos-v1-10-3-controlplane-2"] will be created
  + resource "cloudflare_record" "nodes_ipv6" {
      + allow_overwrite = false
      + content         = (known after apply)
      + created_on      = (known after apply)
      + hostname        = (known after apply)
      + id              = (known after apply)
      + metadata        = (known after apply)
      + modified_on     = (known after apply)
      + name            = "hydra.k8s.thetillhoff.de"
      + proxiable       = (known after apply)
      + proxied         = false
      + ttl             = 60
      + type            = "AAAA"
      + value           = (known after apply)
      + zone_id         = "94d9f474ce48a61513a68744b663f5e5"
    }

  # module.k8s.module.nodegroup["talos-v1-10-3-controlplane"].hcloud_server.nodes["talos-v1-10-3-controlplane-1"] will be created
  + resource "hcloud_server" "nodes" {
      + allow_deprecated_images    = false
      + backup_window              = (known after apply)
      + backups                    = false
      + datacenter                 = (known after apply)
      + delete_protection          = false
      + firewall_ids               = (known after apply)
      + id                         = (known after apply)
      + ignore_remote_firewall_ids = false
      + image                      = "241435885"
      + ipv4_address               = (known after apply)
      + ipv6_address               = (known after apply)
      + ipv6_network               = (known after apply)
      + keep_disk                  = false
      + location                   = "nbg1"
      + name                       = "talos-v1-10-3-controlplane-1"
      + primary_disk_size          = (known after apply)
      + rebuild_protection         = false
      + server_type                = "cax21"
      + shutdown_before_deletion   = true
      + status                     = (known after apply)

      + public_net {
          + ipv4         = (known after apply)
          + ipv4_enabled = true
          + ipv6         = (known after apply)
          + ipv6_enabled = true
        }
    }

  # module.k8s.module.nodegroup["talos-v1-10-3-controlplane"].hcloud_server.nodes["talos-v1-10-3-controlplane-2"] will be created
  + resource "hcloud_server" "nodes" {
      + allow_deprecated_images    = false
      + backup_window              = (known after apply)
      + backups                    = false
      + datacenter                 = (known after apply)
      + delete_protection          = false
      + firewall_ids               = (known after apply)
      + id                         = (known after apply)
      + ignore_remote_firewall_ids = false
      + image                      = "241435885"
      + ipv4_address               = (known after apply)
      + ipv6_address               = (known after apply)
      + ipv6_network               = (known after apply)
      + keep_disk                  = false
      + location                   = "nbg1"
      + name                       = "talos-v1-10-3-controlplane-2"
      + primary_disk_size          = (known after apply)
      + rebuild_protection         = false
      + server_type                = "cax21"
      + shutdown_before_deletion   = true
      + status                     = (known after apply)

      + public_net {
          + ipv4         = (known after apply)
          + ipv4_enabled = true
          + ipv6         = (known after apply)
          + ipv6_enabled = true
        }
    }

  # module.k8s.module.nodegroup["talos-v1-10-3-controlplane"].talos_machine_configuration_apply.main["talos-v1-10-3-controlplane-1"] will be created
  + resource "talos_machine_configuration_apply" "main" {
      + apply_mode                  = "auto"
      + client_configuration        = (sensitive value)
      + config_patches              = [
          + <<-EOT
                machine:
                
                # Enable Longhorn dependencies
                # More info at https://longhorn.io/docs/1.8.1/advanced-resources/os-distro-specific/talos-linux-support/#v2-data-engine
                  sysctls:
                    vm.nr_hugepages: "1024"
                  kernel:
                    modules:
                      - name: nvme_tcp
                      - name: vfio_pci
                
                # Enable the Talos KubeSpan feature
                # More info at https://www.talos.dev/v1.10/talos-guides/network/kubespan/#enabling
                  network:
                    kubespan:
                      enabled: true
                
                # For now, IPv6 isn't properly supported by talos, not even in dualstack mode
                # # Enable DHCPv6
                #     interfaces:
                #     - interface: eth0 / enp0s1 # Mutually exclusive with deviceSelector
                #       deviceSelector: # Mutually exclusive with interface
                #         hardwareAddr: "*"
                #         driver: "*"
                #         physical: true # Select only physical devices
                #       dhcp: true
                #       dhcpOptions:
                #         ipv4: true
                #         ipv6: true
                
                  # For now, this isn't needed, as using the nodes as endpoints works just fine
                  # # This makes nodes reachable via the same name as the k8s cluster
                  # # More info at https://www.talos.dev/v1.10/reference/configuration/v1alpha1/config/#Config.machine
                  # certSANs:
                  #   - "hydra.k8s.thetillhoff.de"
                
                  # The following config regarding node taints leads to non-running kube-schedulers, kube-apiservers & core-dns pods at cluster creation
                  # nodeTaints:
                  #   # Cilium needs this taint to automatically restart the pods on the affected nodes after cilium is running on it
                  #   # More info at https://docs.cilium.io/en/stable/installation/taints/ & https://docs.cilium.io/en/stable/installation/k8s-install-helm/#restart-unmanaged-pods
                  #   node.cilium.io/agent-not-ready: true:NoExecute
                
                # Certificate rotation disabled as it needs manual approval of new certs
                # More info at https://kubernetes.io/docs/reference/access-authn-authz/kubelet-tls-bootstrapping/#certificate-rotation
                #   kubelet:
                #     extraArgs:
                #       rotate-server-certificates: true
                
                # Longhorn data path mounts
                # More info at https://longhorn.io/docs/1.8.1/advanced-resources/os-distro-specific/talos-linux-support/#data-path-mounts
                  kubelet:
                    extraMounts:
                      - destination: /var/lib/longhorn
                        type: bind
                        source: /var/lib/longhorn
                        options:
                          - bind
                          - rshared
                          - rw
                
                # Disk encryption only makes sense with tpm / vtpm, which is not supported by hcloud
                # More info at https://docs.hetzner.com/cloud/servers/faq#do-the-cloud-servers-support-vtpm-or-tpm
                #   systemDiskEncryption:
                #     ephemeral:
                #       provider: luks2
                #       keys:
                #         - nodeID: {}
                #           slot: 0
                #     state:
                #       provider: luks2
                #       keys:
                #         - nodeID: {}
                #           slot: 0
                
                cluster:
                  network:
                    # Don't use podSubnets & serviceSubnets from this section (to enable ipv6)! Instead configure them on the kubelet option in the machine section
                    # More info at https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/#kubelet-config-k8s-io-v1beta1-KubeletConfiguration
                    # And https://www.talos.dev/v1.10/reference/configuration/v1alpha1/config/#Config.machine.kubelet
                    # podSubnets:
                    #   - 10.244.0.0/16
                    #   - fd00:10:244::/116
                    # serviceSubnets:
                    #   - 10.96.0.0/12
                    #   - fd00:10:96::/112
                    cni:
                      name: none # Disable default CNI
                  proxy:
                    disabled: true # Disable default proxy, as cilium brings its own
                  allowSchedulingOnControlPlanes: true
                  discovery:
                    enabled: true
            EOT,
        ]
      + endpoint                    = (known after apply)
      + id                          = (known after apply)
      + machine_configuration       = (sensitive value)
      + machine_configuration_input = (sensitive value)
      + node                        = (known after apply)
      + timeouts                    = {
          + create = "1m"
        }
    }

  # module.k8s.module.nodegroup["talos-v1-10-3-controlplane"].talos_machine_configuration_apply.main["talos-v1-10-3-controlplane-2"] will be created
  + resource "talos_machine_configuration_apply" "main" {
      + apply_mode                  = "auto"
      + client_configuration        = (sensitive value)
      + config_patches              = [
          + <<-EOT
                machine:
                
                # Enable Longhorn dependencies
                # More info at https://longhorn.io/docs/1.8.1/advanced-resources/os-distro-specific/talos-linux-support/#v2-data-engine
                  sysctls:
                    vm.nr_hugepages: "1024"
                  kernel:
                    modules:
                      - name: nvme_tcp
                      - name: vfio_pci
                
                # Enable the Talos KubeSpan feature
                # More info at https://www.talos.dev/v1.10/talos-guides/network/kubespan/#enabling
                  network:
                    kubespan:
                      enabled: true
                
                # For now, IPv6 isn't properly supported by talos, not even in dualstack mode
                # # Enable DHCPv6
                #     interfaces:
                #     - interface: eth0 / enp0s1 # Mutually exclusive with deviceSelector
                #       deviceSelector: # Mutually exclusive with interface
                #         hardwareAddr: "*"
                #         driver: "*"
                #         physical: true # Select only physical devices
                #       dhcp: true
                #       dhcpOptions:
                #         ipv4: true
                #         ipv6: true
                
                  # For now, this isn't needed, as using the nodes as endpoints works just fine
                  # # This makes nodes reachable via the same name as the k8s cluster
                  # # More info at https://www.talos.dev/v1.10/reference/configuration/v1alpha1/config/#Config.machine
                  # certSANs:
                  #   - "hydra.k8s.thetillhoff.de"
                
                  # The following config regarding node taints leads to non-running kube-schedulers, kube-apiservers & core-dns pods at cluster creation
                  # nodeTaints:
                  #   # Cilium needs this taint to automatically restart the pods on the affected nodes after cilium is running on it
                  #   # More info at https://docs.cilium.io/en/stable/installation/taints/ & https://docs.cilium.io/en/stable/installation/k8s-install-helm/#restart-unmanaged-pods
                  #   node.cilium.io/agent-not-ready: true:NoExecute
                
                # Certificate rotation disabled as it needs manual approval of new certs
                # More info at https://kubernetes.io/docs/reference/access-authn-authz/kubelet-tls-bootstrapping/#certificate-rotation
                #   kubelet:
                #     extraArgs:
                #       rotate-server-certificates: true
                
                # Longhorn data path mounts
                # More info at https://longhorn.io/docs/1.8.1/advanced-resources/os-distro-specific/talos-linux-support/#data-path-mounts
                  kubelet:
                    extraMounts:
                      - destination: /var/lib/longhorn
                        type: bind
                        source: /var/lib/longhorn
                        options:
                          - bind
                          - rshared
                          - rw
                
                # Disk encryption only makes sense with tpm / vtpm, which is not supported by hcloud
                # More info at https://docs.hetzner.com/cloud/servers/faq#do-the-cloud-servers-support-vtpm-or-tpm
                #   systemDiskEncryption:
                #     ephemeral:
                #       provider: luks2
                #       keys:
                #         - nodeID: {}
                #           slot: 0
                #     state:
                #       provider: luks2
                #       keys:
                #         - nodeID: {}
                #           slot: 0
                
                cluster:
                  network:
                    # Don't use podSubnets & serviceSubnets from this section (to enable ipv6)! Instead configure them on the kubelet option in the machine section
                    # More info at https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/#kubelet-config-k8s-io-v1beta1-KubeletConfiguration
                    # And https://www.talos.dev/v1.10/reference/configuration/v1alpha1/config/#Config.machine.kubelet
                    # podSubnets:
                    #   - 10.244.0.0/16
                    #   - fd00:10:244::/116
                    # serviceSubnets:
                    #   - 10.96.0.0/12
                    #   - fd00:10:96::/112
                    cni:
                      name: none # Disable default CNI
                  proxy:
                    disabled: true # Disable default proxy, as cilium brings its own
                  allowSchedulingOnControlPlanes: true
                  discovery:
                    enabled: true
            EOT,
        ]
      + endpoint                    = (known after apply)
      + id                          = (known after apply)
      + machine_configuration       = (sensitive value)
      + machine_configuration_input = (sensitive value)
      + node                        = (known after apply)
      + timeouts                    = {
          + create = "1m"
        }
    }

  # module.k8s.module.nodegroup["talos-v1-10-3-controlplane"].time_sleep.wait_for_dns will be created
  + resource "time_sleep" "wait_for_dns" {
      + create_duration = "60s"
      + id              = (known after apply)
    }

  # module.k8s.module.nodegroup["talos-v1-10-3-controlplane"].time_sleep.wait_for_talos_config_apply will be created
  + resource "time_sleep" "wait_for_talos_config_apply" {
      + create_duration = "5s"
      + id              = (known after apply)
    }

  # module.k8s.module.nodegroup["talos-v1-10-3-controlplane"].time_sleep.wait_for_vm_creation will be created
  + resource "time_sleep" "wait_for_vm_creation" {
      + create_duration = "10s"
      + id              = (known after apply)
    }

Plan: 25 to add, 0 to change, 0 to destroy.

Changes to Outputs:
  + kubeconfig  = (sensitive value)
  + talosconfig = (sensitive value)

@renovate renovate bot merged commit 02206e8 into main Jun 28, 2025
2 checks passed
@renovate renovate bot deleted the renovate/flux-1.x branch June 28, 2025 04:57
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment

Labels

None yet

Projects

None yet

Development

Successfully merging this pull request may close these issues.

1 participant