From a883e4ad95ea7c6463cd04ce08c9146a55829930 Mon Sep 17 00:00:00 2001 From: Boris Litvinenko Date: Wed, 25 Dec 2024 17:26:27 +0300 Subject: [PATCH] add ipv6 only option --- docs/ansible/vars.md | 5 +++++ .../group_vars/k8s_cluster/k8s-cluster.yml | 9 ++++++--- .../templates/cri-dockerd.service.j2 | 2 +- .../control-plane/tasks/kubeadm-secondary.yml | 4 ++-- .../control-plane/tasks/kubeadm-setup.yml | 4 ++-- .../templates/kubeadm-config.v1beta3.yaml.j2 | 18 +++++++++--------- .../templates/kubeadm-config.v1beta4.yaml.j2 | 19 ++++++++++--------- roles/kubernetes/node/defaults/main.yml | 2 +- .../templates/loadbalancer/haproxy.cfg.j2 | 4 ++-- .../node/templates/loadbalancer/nginx.conf.j2 | 4 ++-- .../tasks/0080-system-configurations.yml | 2 +- .../preinstall/tasks/0090-etchosts.yml | 6 +++++- .../kubespray-defaults/defaults/main/main.yml | 9 ++++++--- roles/kubespray-defaults/tasks/main.yaml | 12 +++++++++++- roles/network_plugin/calico/tasks/check.yml | 2 +- roles/network_plugin/calico/tasks/install.yml | 8 ++++---- .../calico/templates/calico-config.yml.j2 | 6 +++++- .../calico/templates/calico-node.yml.j2 | 10 +++++++--- .../flannel/templates/cni-flannel.yml.j2 | 4 +++- .../kube-ovn/templates/cni-kube-ovn.yml.j2 | 18 +++++++++--------- .../kube-ovn/templates/cni-ovn.yml.j2 | 6 +++--- roles/reset/tasks/main.yml | 2 +- 22 files changed, 96 insertions(+), 60 deletions(-) diff --git a/docs/ansible/vars.md b/docs/ansible/vars.md index 0bda826f57b..4a71c04c324 100644 --- a/docs/ansible/vars.md +++ b/docs/ansible/vars.md @@ -43,6 +43,8 @@ Some variables of note include: * *ip6* - IPv6 address to use for binding services. (host var) If *enable_dual_stack_networks* is set to ``true`` and *ip6* is defined, kubelet's ``--node-ip`` and node's ``InternalIP`` will be the combination of *ip* and *ip6*. + If *enable_ipv6only_stack_networks* is set to ``true``, + kubelet's ``--node-ip`` and node's ``InternalIP`` will be use only *ip6*. * *loadbalancer_apiserver* - If defined, all hosts will connect to this address instead of localhost for kube_control_planes and kube_control_plane[0] for kube_nodes. See more details in the @@ -85,6 +87,8 @@ following default cluster parameters: * *enable_dual_stack_networks* - Setting this to true will provision both IPv4 and IPv6 networking for pods and services. +* *enable_ipv6only_stack_networks* - Setting this to true will provision IPv6 only networking for pods and services. + * *kube_service_addresses_ipv6* - Subnet for cluster IPv6 IPs (default is ``fd85:ee78:d8a6:8607::1000/116``). Must not overlap with ``kube_pods_subnet_ipv6``. * *kube_pods_subnet_ipv6* - Subnet for Pod IPv6 IPs (default is ``fd85:ee78:d8a6:8607::1:0000/112``). Must not overlap with ``kube_service_addresses_ipv6``. @@ -155,6 +159,7 @@ and ``kube_pods_subnet``, for example from the ``172.18.0.0/16``. ## Enabling Dual Stack (IPV4 + IPV6) networking If *enable_dual_stack_networks* is set to ``true``, Dual Stack networking will be enabled in the cluster. This will use the default IPv4 and IPv6 subnets specified in the defaults file in the ``kubespray-defaults`` role, unless overridden of course. The default config will give you room for up to 256 nodes with 126 pods per node, and up to 4096 services. +Also you can use *enable_ipv6only_stack_networks* for ipv6 only networking. ## DNS variables diff --git a/inventory/sample/group_vars/k8s_cluster/k8s-cluster.yml b/inventory/sample/group_vars/k8s_cluster/k8s-cluster.yml index e6fc89bf0aa..0195a4943a7 100644 --- a/inventory/sample/group_vars/k8s_cluster/k8s-cluster.yml +++ b/inventory/sample/group_vars/k8s_cluster/k8s-cluster.yml @@ -100,19 +100,22 @@ kube_network_node_prefix: 24 # Configure Dual Stack networking (i.e. both IPv4 and IPv6) enable_dual_stack_networks: false +# Configure IPv6 only +enable_ipv6only_stack_networks: false + # Kubernetes internal network for IPv6 services, unused block of space. -# This is only used if enable_dual_stack_networks is set to true +# This is used if enable_dual_stack_networks or enable_ipv6only_stack_networks is set to true # This provides 4096 IPv6 IPs kube_service_addresses_ipv6: fd85:ee78:d8a6:8607::1000/116 # Internal network. When used, it will assign IPv6 addresses from this range to individual pods. # This network must not already be in your network infrastructure! -# This is only used if enable_dual_stack_networks is set to true. +# This is used if enable_dual_stack_networks or enable_ipv6only_stack_networks is set to true # This provides room for 256 nodes with 254 pods per node. kube_pods_subnet_ipv6: fd85:ee78:d8a6:8607::1:0000/112 # IPv6 subnet size allocated to each for pods. -# This is only used if enable_dual_stack_networks is set to true +# This is used if enable_dual_stack_networks or enable_ipv6only_stack_networks is set to true # This provides room for 254 pods per node. kube_network_node_prefix_ipv6: 120 diff --git a/roles/container-engine/cri-dockerd/templates/cri-dockerd.service.j2 b/roles/container-engine/cri-dockerd/templates/cri-dockerd.service.j2 index df88c7dabde..79950fb2bb1 100644 --- a/roles/container-engine/cri-dockerd/templates/cri-dockerd.service.j2 +++ b/roles/container-engine/cri-dockerd/templates/cri-dockerd.service.j2 @@ -7,7 +7,7 @@ Requires=cri-dockerd.socket [Service] Type=notify -ExecStart={{ bin_dir }}/cri-dockerd --container-runtime-endpoint {{ cri_socket }} --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin --network-plugin=cni --pod-cidr={{ kube_pods_subnet }} --pod-infra-container-image={{ pod_infra_image_repo }}:{{ pod_infra_version }} --log-level {{ cri_dockerd_log_level }} {% if enable_dual_stack_networks %}--ipv6-dual-stack=True{% endif %} +ExecStart={{ bin_dir }}/cri-dockerd --container-runtime-endpoint {{ cri_socket }} --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin --network-plugin=cni --pod-cidr={{ [kube_pods_subnet if not enable_ipv6only_stack_networks, kube_pods_subnet_ipv6 if (enable_dual_stack_networks or enable_ipv6only_stack_networks)] | reject('match', '^$') | join(',') }}{{ '' }} --pod-infra-container-image={{ pod_infra_image_repo }}:{{ pod_infra_version }} --log-level {{ cri_dockerd_log_level }} {% if enable_dual_stack_networks %}--ipv6-dual-stack=True{% endif %} ExecReload=/bin/kill -s HUP $MAINPID TimeoutSec=0 diff --git a/roles/kubernetes/control-plane/tasks/kubeadm-secondary.yml b/roles/kubernetes/control-plane/tasks/kubeadm-secondary.yml index 6dbf4df1bb6..a148e0cf074 100644 --- a/roles/kubernetes/control-plane/tasks/kubeadm-secondary.yml +++ b/roles/kubernetes/control-plane/tasks/kubeadm-secondary.yml @@ -43,8 +43,8 @@ - name: Wait for k8s apiserver wait_for: - host: "{{ kubeadm_discovery_address.split(':')[0] }}" - port: "{{ kubeadm_discovery_address.split(':')[1] }}" + host: "{{ kubeadm_discovery_address | regex_replace(':\\d+$', '') }}" + port: "{{ kubeadm_discovery_address.split(':')[-1] }}" timeout: 180 diff --git a/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml b/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml index ae7b7506fe5..dd13038d649 100644 --- a/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml +++ b/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml @@ -25,16 +25,16 @@ - name: Kubeadm | aggregate all SANs set_fact: - apiserver_sans: "{{ (sans_base + groups['kube_control_plane'] + sans_lb + sans_lb_ip + sans_supp + sans_access_ip + sans_ip + sans_address + sans_override + sans_hostname + sans_fqdn + sans_kube_vip_address) | unique }}" + apiserver_sans: "{{ (sans_base + groups['kube_control_plane'] + sans_apiserver_ip + sans_lb + sans_lb_ip + sans_supp + sans_access_ip + sans_ip + sans_address + sans_override + sans_hostname + sans_fqdn + sans_kube_vip_address) | unique }}" vars: sans_base: - "kubernetes" - "kubernetes.default" - "kubernetes.default.svc" - "kubernetes.default.svc.{{ dns_domain }}" - - "{{ kube_apiserver_ip }}" - "localhost" - "127.0.0.1" + sans_apiserver_ip: "{{ [kube_apiserver_ip] if not enable_ipv6only_stack_networks else [] }}" sans_lb: "{{ [apiserver_loadbalancer_domain_name] if apiserver_loadbalancer_domain_name is defined else [] }}" sans_lb_ip: "{{ [loadbalancer_apiserver.address] if loadbalancer_apiserver is defined and loadbalancer_apiserver.address is defined else [] }}" sans_supp: "{{ supplementary_addresses_in_ssl_keys if supplementary_addresses_in_ssl_keys is defined else [] }}" diff --git a/roles/kubernetes/control-plane/templates/kubeadm-config.v1beta3.yaml.j2 b/roles/kubernetes/control-plane/templates/kubeadm-config.v1beta3.yaml.j2 index 123a68ad579..7d29ff0c8ed 100644 --- a/roles/kubernetes/control-plane/templates/kubeadm-config.v1beta3.yaml.j2 +++ b/roles/kubernetes/control-plane/templates/kubeadm-config.v1beta3.yaml.j2 @@ -94,9 +94,9 @@ dns: imageTag: {{ coredns_image_tag }} networking: dnsDomain: {{ dns_domain }} - serviceSubnet: "{{ kube_service_addresses }}{{ ',' + kube_service_addresses_ipv6 if enable_dual_stack_networks else '' }}" + serviceSubnet: "{{ [kube_service_addresses if not enable_ipv6only_stack_networks, kube_service_addresses_ipv6 if (enable_dual_stack_networks or enable_ipv6only_stack_networks)] | reject('match', '^$') | join(',') }}{{ '' }}" {% if kube_network_plugin is defined and kube_network_plugin not in ["kube-ovn"] %} - podSubnet: "{{ kube_pods_subnet }}{{ ',' + kube_pods_subnet_ipv6 if enable_dual_stack_networks else '' }}" + podSubnet: "{{ [kube_pods_subnet if not enable_ipv6only_stack_networks, kube_pods_subnet_ipv6 if (enable_dual_stack_networks or enable_ipv6only_stack_networks)] | reject('match', '^$') | join(',') }}{{ '' }}" {% endif %} {% if kubeadm_feature_gates %} featureGates: @@ -143,7 +143,7 @@ apiServer: etcd-servers-overrides: "/events#{{ etcd_events_access_addresses_semicolon }}" {% endif %} service-node-port-range: {{ kube_apiserver_node_port_range }} - service-cluster-ip-range: "{{ kube_service_addresses }}{{ ',' + kube_service_addresses_ipv6 if enable_dual_stack_networks else '' }}" + service-cluster-ip-range: "{{ [kube_service_addresses if not enable_ipv6only_stack_networks, kube_service_addresses_ipv6 if (enable_dual_stack_networks or enable_ipv6only_stack_networks)] | reject('match', '^$') | join(',') }}{{ '' }}" kubelet-preferred-address-types: "{{ kubelet_preferred_address_types }}" profiling: "{{ kube_profiling }}" request-timeout: "{{ kube_apiserver_request_timeout }}" @@ -293,18 +293,18 @@ controllerManager: node-monitor-grace-period: {{ kube_controller_node_monitor_grace_period }} node-monitor-period: {{ kube_controller_node_monitor_period }} {% if kube_network_plugin is defined and kube_network_plugin not in ["kube-ovn"] %} - cluster-cidr: "{{ kube_pods_subnet }}{{ ',' + kube_pods_subnet_ipv6 if enable_dual_stack_networks else '' }}" + cluster-cidr: "{{ [kube_pods_subnet if not enable_ipv6only_stack_networks, kube_pods_subnet_ipv6 if (enable_dual_stack_networks or enable_ipv6only_stack_networks)] | reject('match', '^$') | join(',') }}{{ '' }}" {% endif %} - service-cluster-ip-range: "{{ kube_service_addresses }}{{ ',' + kube_service_addresses_ipv6 if enable_dual_stack_networks else '' }}" + service-cluster-ip-range: "{{ [kube_service_addresses if not enable_ipv6only_stack_networks, kube_service_addresses_ipv6 if (enable_dual_stack_networks or enable_ipv6only_stack_networks)] | reject('match', '^$') | join(',') }}{{ '' }}" {% if kube_network_plugin is defined and kube_network_plugin == "calico" and not calico_ipam_host_local %} allocate-node-cidrs: "false" -{% else %} -{% if enable_dual_stack_networks %} +{% elif enable_ipv6only_stack_networks %} + node-cidr-mask-size-ipv6: "{{ kube_network_node_prefix_ipv6 }}" +{% elif enable_dual_stack_networks %} node-cidr-mask-size-ipv4: "{{ kube_network_node_prefix }}" node-cidr-mask-size-ipv6: "{{ kube_network_node_prefix_ipv6 }}" {% else %} node-cidr-mask-size: "{{ kube_network_node_prefix }}" -{% endif %} {% endif %} profiling: "{{ kube_profiling }}" terminated-pod-gc-threshold: "{{ kube_controller_terminated_pod_gc_threshold }}" @@ -383,7 +383,7 @@ clientConnection: kubeconfig: {{ kube_proxy_client_kubeconfig }} qps: {{ kube_proxy_client_qps }} {% if kube_network_plugin is defined and kube_network_plugin not in ["kube-ovn"] %} -clusterCIDR: "{{ kube_pods_subnet }}{{ ',' + kube_pods_subnet_ipv6 if enable_dual_stack_networks else '' }}" +clusterCIDR: "{{ [kube_pods_subnet if not enable_ipv6only_stack_networks, kube_pods_subnet_ipv6 if (enable_dual_stack_networks or enable_ipv6only_stack_networks)] | reject('match', '^$') | join(',') }}{{ '' }}" {% endif %} configSyncPeriod: {{ kube_proxy_config_sync_period }} conntrack: diff --git a/roles/kubernetes/control-plane/templates/kubeadm-config.v1beta4.yaml.j2 b/roles/kubernetes/control-plane/templates/kubeadm-config.v1beta4.yaml.j2 index be03b489f54..02b0fe6c1e7 100644 --- a/roles/kubernetes/control-plane/templates/kubeadm-config.v1beta4.yaml.j2 +++ b/roles/kubernetes/control-plane/templates/kubeadm-config.v1beta4.yaml.j2 @@ -106,9 +106,9 @@ dns: imageTag: {{ coredns_image_tag }} networking: dnsDomain: {{ dns_domain }} - serviceSubnet: "{{ kube_service_addresses }}{{ ',' + kube_service_addresses_ipv6 if enable_dual_stack_networks else '' }}" + serviceSubnet: "{{ [kube_service_addresses if not enable_ipv6only_stack_networks, kube_service_addresses_ipv6 if (enable_dual_stack_networks or enable_ipv6only_stack_networks)] | reject('match', '^$') | join(',') }}{{ '' }}" {% if kube_network_plugin is defined and kube_network_plugin not in ["kube-ovn"] %} - podSubnet: "{{ kube_pods_subnet }}{{ ',' + kube_pods_subnet_ipv6 if enable_dual_stack_networks else '' }}" + podSubnet: "{{ [kube_pods_subnet if not enable_ipv6only_stack_networks, kube_pods_subnet_ipv6 if (enable_dual_stack_networks or enable_ipv6only_stack_networks)] | reject('match', '^$') | join(',') }}{{ '' }}" {% endif %} {% if kubeadm_feature_gates %} featureGates: @@ -169,7 +169,7 @@ apiServer: - name: service-node-port-range value: "{{ kube_apiserver_node_port_range }}" - name: service-cluster-ip-range - value: "{{ kube_service_addresses }}{{ ',' + kube_service_addresses_ipv6 if enable_dual_stack_networks else '' }}" + value: "{{ [kube_service_addresses if not enable_ipv6only_stack_networks, kube_service_addresses_ipv6 if (enable_dual_stack_networks or enable_ipv6only_stack_networks)] | reject('match', '^$') | join(',') }}{{ '' }}" - name: kubelet-preferred-address-types value: "{{ kubelet_preferred_address_types }}" - name: profiling @@ -351,15 +351,17 @@ controllerManager: value: "{{ kube_controller_node_monitor_period }}" {% if kube_network_plugin is defined and kube_network_plugin not in ["kube-ovn"] %} - name: cluster-cidr - value: "{{ kube_pods_subnet }}{{ ',' + kube_pods_subnet_ipv6 if enable_dual_stack_networks else '' }}" + value: "{{ [kube_pods_subnet if not enable_ipv6only_stack_networks, kube_pods_subnet_ipv6 if (enable_dual_stack_networks or enable_ipv6only_stack_networks)] | reject('match', '^$') | join(',') }}{{ '' }}" {% endif %} - name: service-cluster-ip-range - value: "{{ kube_service_addresses }}{{ ',' + kube_service_addresses_ipv6 if enable_dual_stack_networks else '' }}" + value: "{{ [kube_service_addresses if not enable_ipv6only_stack_networks, kube_service_addresses_ipv6 if (enable_dual_stack_networks or enable_ipv6only_stack_networks)] | reject('match', '^$') | join(',') }}{{ '' }}" {% if kube_network_plugin is defined and kube_network_plugin == "calico" and not calico_ipam_host_local %} - name: allocate-node-cidrs value: "false" -{% else %} -{% if enable_dual_stack_networks %} +{% elif enable_ipv6only_stack_networks %} + - name: node-cidr-mask-size-ipv6 + value: "{{ kube_network_node_prefix_ipv6 }}" +{% elif enable_dual_stack_networks %} - name: node-cidr-mask-size-ipv4 value: "{{ kube_network_node_prefix }}" - name: node-cidr-mask-size-ipv6 @@ -367,7 +369,6 @@ controllerManager: {% else %} - name: node-cidr-mask-size value: "{{ kube_network_node_prefix }}" -{% endif %} {% endif %} - name: profiling value: "{{ kube_profiling }}" @@ -478,7 +479,7 @@ clientConnection: kubeconfig: {{ kube_proxy_client_kubeconfig }} qps: {{ kube_proxy_client_qps }} {% if kube_network_plugin is defined and kube_network_plugin not in ["kube-ovn"] %} -clusterCIDR: "{{ kube_pods_subnet }}{{ ',' + kube_pods_subnet_ipv6 if enable_dual_stack_networks else '' }}" +clusterCIDR: "{{ [kube_pods_subnet if not enable_ipv6only_stack_networks, kube_pods_subnet_ipv6 if (enable_dual_stack_networks or enable_ipv6only_stack_networks)] | reject('match', '^$') | join(',') }}{{ '' }}" {% endif %} configSyncPeriod: {{ kube_proxy_config_sync_period }} conntrack: diff --git a/roles/kubernetes/node/defaults/main.yml b/roles/kubernetes/node/defaults/main.yml index c51e00f0e46..e2e6617a311 100644 --- a/roles/kubernetes/node/defaults/main.yml +++ b/roles/kubernetes/node/defaults/main.yml @@ -1,6 +1,6 @@ --- # advertised host IP for kubelet. This affects network plugin config. Take caution -kubelet_address: "{{ ip | default(fallback_ip) }}{{ (',' + ip6) if enable_dual_stack_networks and ip6 is defined else '' }}" +kubelet_address: "{{ [ip | default(fallback_ip) if not enable_ipv6only_stack_networks, ip6 | default(fallback_ip6) if (enable_dual_stack_networks or enable_ipv6only_stack_networks)] | reject('match', '^$') | join(',') }}{{ '' }}" # bind address for kubelet. Set to 0.0.0.0 to listen on all interfaces kubelet_bind_address: "{{ ip | default('0.0.0.0') }}" diff --git a/roles/kubernetes/node/templates/loadbalancer/haproxy.cfg.j2 b/roles/kubernetes/node/templates/loadbalancer/haproxy.cfg.j2 index 1cd8b411499..f1618aefc89 100644 --- a/roles/kubernetes/node/templates/loadbalancer/haproxy.cfg.j2 +++ b/roles/kubernetes/node/templates/loadbalancer/haproxy.cfg.j2 @@ -22,7 +22,7 @@ defaults {% if loadbalancer_apiserver_healthcheck_port is defined -%} frontend healthz bind 0.0.0.0:{{ loadbalancer_apiserver_healthcheck_port }} - {% if enable_dual_stack_networks -%} + {% if enable_dual_stack_networks or enable_ipv6only_stack_networks -%} bind :::{{ loadbalancer_apiserver_healthcheck_port }} {% endif -%} mode http @@ -31,7 +31,7 @@ frontend healthz frontend kube_api_frontend bind 127.0.0.1:{{ loadbalancer_apiserver_port|default(kube_apiserver_port) }} - {% if enable_dual_stack_networks -%} + {% if enable_dual_stack_networks or enable_ipv6only_stack_networks -%} bind [::1]:{{ loadbalancer_apiserver_port|default(kube_apiserver_port) }}; {% endif -%} mode tcp diff --git a/roles/kubernetes/node/templates/loadbalancer/nginx.conf.j2 b/roles/kubernetes/node/templates/loadbalancer/nginx.conf.j2 index d6b5cce4ee7..ec1e7017496 100644 --- a/roles/kubernetes/node/templates/loadbalancer/nginx.conf.j2 +++ b/roles/kubernetes/node/templates/loadbalancer/nginx.conf.j2 @@ -20,7 +20,7 @@ stream { server { listen 127.0.0.1:{{ loadbalancer_apiserver_port|default(kube_apiserver_port) }}; - {% if enable_dual_stack_networks -%} + {% if enable_dual_stack_networks or enable_ipv6only_stack_networks -%} listen [::1]:{{ loadbalancer_apiserver_port|default(kube_apiserver_port) }}; {% endif -%} proxy_pass kube_apiserver; @@ -44,7 +44,7 @@ http { {% if loadbalancer_apiserver_healthcheck_port is defined -%} server { listen {{ loadbalancer_apiserver_healthcheck_port }}; - {% if enable_dual_stack_networks -%} + {% if enable_dual_stack_networks or enable_ipv6only_stack_networks -%} listen [::]:{{ loadbalancer_apiserver_healthcheck_port }}; {% endif -%} location /healthz { diff --git a/roles/kubernetes/preinstall/tasks/0080-system-configurations.yml b/roles/kubernetes/preinstall/tasks/0080-system-configurations.yml index 8941a649a4c..43aee1f396f 100644 --- a/roles/kubernetes/preinstall/tasks/0080-system-configurations.yml +++ b/roles/kubernetes/preinstall/tasks/0080-system-configurations.yml @@ -84,7 +84,7 @@ value: "1" state: present reload: true - when: enable_dual_stack_networks | bool + when: (enable_dual_stack_networks or enable_ipv6only_stack_networks) | bool - name: Check if we need to set fs.may_detach_mounts stat: diff --git a/roles/kubernetes/preinstall/tasks/0090-etchosts.yml b/roles/kubernetes/preinstall/tasks/0090-etchosts.yml index 0b44d26adc6..cc8c64bf97f 100644 --- a/roles/kubernetes/preinstall/tasks/0090-etchosts.yml +++ b/roles/kubernetes/preinstall/tasks/0090-etchosts.yml @@ -3,7 +3,11 @@ set_fact: etc_hosts_inventory_block: |- {% for item in (groups['k8s_cluster'] + groups['etcd'] | default([]) + groups['calico_rr'] | default([])) | unique -%} - {% if 'access_ip' in hostvars[item] or 'ip' in hostvars[item] or 'ansible_default_ipv4' in hostvars[item] -%} + {% if enable_ipv6only_stack_networks -%} + {{ hostvars[item]['access_ip_v6'] | default(hostvars[item]['ip6'] | default(hostvars[item]['ansible_default_ipv6']['address'])) }} + {%- if ('ansible_hostname' in hostvars[item] and item != hostvars[item]['ansible_hostname']) %} {{ hostvars[item]['ansible_hostname'] }}.{{ dns_domain }} {{ hostvars[item]['ansible_hostname'] }} {% else %} {{ item }}.{{ dns_domain }} {{ item }} {% endif %} + + {% else %} {{ hostvars[item]['access_ip'] | default(hostvars[item]['ip'] | default(hostvars[item]['ansible_default_ipv4']['address'])) }} {%- if ('ansible_hostname' in hostvars[item] and item != hostvars[item]['ansible_hostname']) %} {{ hostvars[item]['ansible_hostname'] }}.{{ dns_domain }} {{ hostvars[item]['ansible_hostname'] }} {% else %} {{ item }}.{{ dns_domain }} {{ item }} {% endif %} diff --git a/roles/kubespray-defaults/defaults/main/main.yml b/roles/kubespray-defaults/defaults/main/main.yml index 32a78545a3d..f557e422da6 100644 --- a/roles/kubespray-defaults/defaults/main/main.yml +++ b/roles/kubespray-defaults/defaults/main/main.yml @@ -227,6 +227,9 @@ kube_network_node_prefix: 24 # Configure Dual Stack networking (i.e. both IPv4 and IPv6) enable_dual_stack_networks: false +# Configure only IPv6 networking +enable_ipv6only_stack_networks: false + # Kubernetes internal network for IPv6 services, unused block of space. # This is only used if enable_dual_stack_networks is set to true # This provides 4096 IPv6 IPs @@ -247,7 +250,7 @@ kube_network_node_prefix_ipv6: 120 # listening on. # NOTE: loadbalancer_apiserver_localhost somewhat alters the final API enpdoint # access IP value (automatically evaluated below) -kube_apiserver_ip: "{{ kube_service_addresses | ansible.utils.ipaddr('net') | ansible.utils.ipaddr(1) | ansible.utils.ipaddr('address') }}" +kube_apiserver_ip: "{{ kube_service_addresses_ipv6 if enable_ipv6only_stack_networks else kube_service_addresses | ansible.utils.ipaddr('net') | ansible.utils.ipaddr(1) | ansible.utils.ipaddr('address') }}" # NOTE: If you specific address/interface and use loadbalancer_apiserver_localhost # loadbalancer_apiserver_localhost (nginx/haproxy) will deploy on control plane nodes on 127.0.0.1:{{ loadbalancer_apiserver_port | default(kube_apiserver_port) }} too. @@ -548,9 +551,9 @@ ssl_ca_dirs: |- # Vars for pointing to kubernetes api endpoints kube_apiserver_count: "{{ groups['kube_control_plane'] | length }}" -kube_apiserver_address: "{{ ip | default(hostvars[inventory_hostname]['fallback_ip']) }}" +kube_apiserver_address: "{{ ip6 | default(hostvars[inventory_hostname]['fallback_ip6']) if enable_ipv6only_stack_networks else ip | default(hostvars[inventory_hostname]['fallback_ip']) }}" kube_apiserver_access_address: "{{ access_ip | default(kube_apiserver_address) }}" -first_kube_control_plane_address: "{{ hostvars[groups['kube_control_plane'][0]]['access_ip'] | default(hostvars[groups['kube_control_plane'][0]]['ip'] | default(hostvars[groups['kube_control_plane'][0]]['fallback_ip'])) }}" +first_kube_control_plane_address: "{{ hostvars[groups['kube_control_plane'][0]]['access_ip'] | default(hostvars[groups['kube_control_plane'][0]]['ip'] | default(hostvars[groups['kube_control_plane'][0]]['fallback_ip'])) if not enable_ipv6only_stack_networks else hostvars[groups['kube_control_plane'][0]]['access_ip_v6'] | default(hostvars[groups['kube_control_plane'][0]]['ip6'] | default(hostvars[groups['kube_control_plane'][0]]['fallback_ip6'])) }}" loadbalancer_apiserver_localhost: "{{ loadbalancer_apiserver is not defined }}" loadbalancer_apiserver_type: "nginx" # applied if only external loadbalancer_apiserver is defined, otherwise ignored diff --git a/roles/kubespray-defaults/tasks/main.yaml b/roles/kubespray-defaults/tasks/main.yaml index a26ce63a22d..6eaeae76c9f 100644 --- a/roles/kubespray-defaults/tasks/main.yaml +++ b/roles/kubespray-defaults/tasks/main.yaml @@ -16,7 +16,17 @@ - name: Set fallback_ip set_fact: fallback_ip: "{{ ansible_default_ipv4.address | d('127.0.0.1') }}" - when: fallback_ip is not defined + when: fallback_ip6 is not defined + - name: Gather ansible_default_ipv6 + setup: + gather_subset: '!all,network' + filter: "ansible_default_ipv6" + when: ansible_default_ipv6 is not defined + ignore_unreachable: true + - name: Set fallback_ip6 + set_fact: + fallback_ip6: "{{ ansible_default_ipv6.address | d('::1') }}" + when: fallback_ip6 is not defined - name: Set no_proxy import_tasks: no_proxy.yml diff --git a/roles/network_plugin/calico/tasks/check.yml b/roles/network_plugin/calico/tasks/check.yml index aef34bb2cdf..2da31ed983b 100644 --- a/roles/network_plugin/calico/tasks/check.yml +++ b/roles/network_plugin/calico/tasks/check.yml @@ -192,6 +192,6 @@ - "calico_ipip_mode_ipv6 in ['Never']" msg: "Calico doesn't support ipip tunneling for the IPv6" when: - - enable_dual_stack_networks + - (enable_dual_stack_networks or enable_ipv6only_stack_networks) run_once: true delegate_to: "{{ groups['kube_control_plane'][0] }}" diff --git a/roles/network_plugin/calico/tasks/install.yml b/roles/network_plugin/calico/tasks/install.yml index 2d80b110cdf..c7cdf342f07 100644 --- a/roles/network_plugin/calico/tasks/install.yml +++ b/roles/network_plugin/calico/tasks/install.yml @@ -107,7 +107,7 @@ changed_when: false when: - inventory_hostname == groups['kube_control_plane'][0] - - enable_dual_stack_networks + - (enable_dual_stack_networks or enable_ipv6only_stack_networks) - name: Calico | Ensure that calico_pool_cidr_ipv6 is within kube_pods_subnet_ipv6 when defined assert: @@ -117,7 +117,7 @@ - inventory_hostname == groups['kube_control_plane'][0] - calico_conf_ipv6.stdout is defined and calico_conf_ipv6.stdout == "0" - calico_pool_cidr_ipv6 is defined - - enable_dual_stack_networks + - (enable_dual_stack_networks or enable_ipv6only_stack_networks) - name: Calico | kdd specific configuration when: @@ -256,7 +256,7 @@ - name: Calico | Configure Calico IPv6 Pool when: - inventory_hostname == groups['kube_control_plane'][0] - - enable_dual_stack_networks | bool + - (enable_dual_stack_networks or enable_ipv6only_stack_networks) | bool block: - name: Calico | Get existing calico ipv6 network pool command: "{{ bin_dir }}/calicoctl.sh get ippool {{ calico_pool_name }}-ipv6 -o json" @@ -350,7 +350,7 @@ {% if not calico_no_global_as_num | default(false) %}"asNumber": {{ global_as_num }},{% endif %} "nodeToNodeMeshEnabled": {{ nodeToNodeMeshEnabled | default('true') }} , {% if calico_advertise_cluster_ips | default(false) %} - "serviceClusterIPs": [{"cidr": "{{ kube_service_addresses }}" } {{ ',{"cidr":"' + kube_service_addresses_ipv6 + '"}' if enable_dual_stack_networks else '' }}],{% endif %} + "serviceClusterIPs": [{{ ['{"cidr": "' + kube_service_addresses + '"}' if not enable_ipv6only_stack_networks, '{"cidr": "' + kube_service_addresses_ipv6 + '"}' if (enable_dual_stack_networks or enable_ipv6only_stack_networks)] | reject('match', '^$') | join(',') }}],{% endif %} {% if calico_advertise_service_loadbalancer_ips | length > 0 %}"serviceLoadBalancerIPs": {{ _service_loadbalancer_ips }},{% endif %} "serviceExternalIPs": {{ _service_external_ips | default([]) }} } diff --git a/roles/network_plugin/calico/templates/calico-config.yml.j2 b/roles/network_plugin/calico/templates/calico-config.yml.j2 index d949af1ec6b..b1158a5327c 100644 --- a/roles/network_plugin/calico/templates/calico-config.yml.j2 +++ b/roles/network_plugin/calico/templates/calico-config.yml.j2 @@ -56,10 +56,14 @@ data: {% else %} "ipam": { "type": "calico-ipam", - {% if enable_dual_stack_networks %} + {% if enable_ipv6only_stack_networks %} + "assign_ipv6": "true" + {% elif enable_dual_stack_networks %} "assign_ipv6": "true", {% endif %} + {% if not enable_ipv6only_stack_networks %} "assign_ipv4": "true" + {% endif %} }, {% endif %} {% if calico_allow_ip_forwarding %} diff --git a/roles/network_plugin/calico/templates/calico-node.yml.j2 b/roles/network_plugin/calico/templates/calico-node.yml.j2 index 10151cdb651..7aa87cf2db0 100644 --- a/roles/network_plugin/calico/templates/calico-node.yml.j2 +++ b/roles/network_plugin/calico/templates/calico-node.yml.j2 @@ -259,13 +259,15 @@ spec: # no effect. This should fall within `--cluster-cidr`. # - name: CALICO_IPV4POOL_CIDR # value: "192.168.0.0/16" +{% if not enable_ipv6only_stack_networks %} - name: CALICO_IPV4POOL_IPIP value: "{{ calico_ipv4pool_ipip }}" +{% endif %} # Enable or Disable VXLAN on the default IP pool. - name: CALICO_IPV4POOL_VXLAN value: "Never" - name: FELIX_IPV6SUPPORT - value: "{{ enable_dual_stack_networks | default(false) }}" + value: "{{ (enable_dual_stack_networks or enable_ipv6only_stack_networks) | default(false) }}" # Set Felix logging to "info" - name: FELIX_LOGSEVERITYSCREEN value: "{{ calico_loglevel }}" @@ -308,9 +310,11 @@ spec: - name: IP_AUTODETECTION_METHOD value: "can-reach=$(NODEIP)" {% endif %} +{% if not enable_ipv6only_stack_networks %} - name: IP value: "autodetect" -{% if calico_ip6_auto_method is defined and enable_dual_stack_networks %} +{% endif %} +{% if calico_ip6_auto_method is defined and (enable_dual_stack_networks or enable_ipv6only_stack_networks) %} - name: IP6_AUTODETECTION_METHOD value: "{{ calico_ip6_auto_method }}" {% endif %} @@ -318,7 +322,7 @@ spec: - name: FELIX_MTUIFACEPATTERN value: "{{ calico_felix_mtu_iface_pattern }}" {% endif %} -{% if enable_dual_stack_networks %} +{% if enable_dual_stack_networks or enable_ipv6only_stack_networks %} - name: IP6 value: autodetect {% endif %} diff --git a/roles/network_plugin/flannel/templates/cni-flannel.yml.j2 b/roles/network_plugin/flannel/templates/cni-flannel.yml.j2 index 9c36d01ba5d..a45181e11cc 100644 --- a/roles/network_plugin/flannel/templates/cni-flannel.yml.j2 +++ b/roles/network_plugin/flannel/templates/cni-flannel.yml.j2 @@ -30,9 +30,11 @@ data: } net-conf.json: | { +{% if not enable_ipv6only_stack_networks %} "Network": "{{ kube_pods_subnet }}", "EnableIPv4": true, -{% if enable_dual_stack_networks %} +{% endif %} +{% if enable_dual_stack_networks or enable_ipv6only_stack_networks %} "EnableIPv6": true, "IPv6Network": "{{ kube_pods_subnet_ipv6 }}", {% endif %} diff --git a/roles/network_plugin/kube-ovn/templates/cni-kube-ovn.yml.j2 b/roles/network_plugin/kube-ovn/templates/cni-kube-ovn.yml.j2 index f4acdedac10..9f85e0db405 100644 --- a/roles/network_plugin/kube-ovn/templates/cni-kube-ovn.yml.j2 +++ b/roles/network_plugin/kube-ovn/templates/cni-kube-ovn.yml.j2 @@ -240,14 +240,14 @@ spec: imagePullPolicy: {{ k8s_image_pull_policy }} args: - /kube-ovn/start-controller.sh - - --default-cidr={{ kube_pods_subnet }}{% if enable_dual_stack_networks %},{{ kube_ovn_pool_cidr_ipv6 | default(kube_pods_subnet_ipv6) }}{% endif %}{{ '' }} + - --default-cidr={{ [kube_pods_subnet if not enable_ipv6only_stack_networks, kube_pods_subnet_ipv6 if (enable_dual_stack_networks or enable_ipv6only_stack_networks)] | reject('match', '^$') | join(',') }}{{ '' }} - --default-gateway={% if kube_ovn_default_gateway is defined %}{{ kube_ovn_default_gateway }}{% endif %}{{ '' }} - --default-gateway-check={{ kube_ovn_default_gateway_check | string }} - --default-logical-gateway={{ kube_ovn_default_logical_gateway | string }} - --default-u2o-interconnection={{ kube_ovn_u2o_interconnection }} - --default-exclude-ips={% if kube_ovn_default_exclude_ips is defined %}{{ kube_ovn_default_exclude_ips }}{% endif %}{{ '' }} - - --node-switch-cidr={{ kube_ovn_node_switch_cidr }}{% if enable_dual_stack_networks %},{{ kube_ovn_node_switch_cidr_ipv6 }}{% endif %}{{ '' }} - - --service-cluster-ip-range={{ kube_service_addresses }}{% if enable_dual_stack_networks %},{{ kube_service_addresses_ipv6 }}{% endif %}{{ '' }} + - --node-switch-cidr={{ [kube_ovn_node_switch_cidr if not enable_ipv6only_stack_networks, kube_ovn_node_switch_cidr_ipv6 if (enable_dual_stack_networks or enable_ipv6only_stack_networks)] | reject('match', '^$') | join(',') }}{{ '' }} + - --service-cluster-ip-range={{ [kube_service_addresses if not enable_ipv6only_stack_networks, kube_service_addresses_ipv6 if (enable_dual_stack_networks or enable_ipv6only_stack_networks)] | reject('match', '^$') | join(',') }}{{ '' }} - --network-type={{ kube_ovn_network_type }} - --default-interface-name={{ kube_ovn_default_interface_name | default('') }} - --default-vlan-id={{ kube_ovn_default_vlan_id }} @@ -403,7 +403,7 @@ spec: args: - --enable-mirror={{ kube_ovn_traffic_mirror | lower }} - --encap-checksum={{ kube_ovn_encap_checksum | lower }} - - --service-cluster-ip-range={{ kube_service_addresses }}{% if enable_dual_stack_networks %},{{ kube_service_addresses_ipv6 }}{% endif %}{{ '' }} + - --service-cluster-ip-range={{ [kube_service_addresses if not enable_ipv6only_stack_networks, kube_service_addresses_ipv6 if (enable_dual_stack_networks or enable_ipv6only_stack_networks)] | reject('match', '^$') | join(',') }}{{ '' }} - --iface={{ kube_ovn_iface | default('') }} - --dpdk-tunnel-iface={{ kube_ovn_dpdk_tunnel_iface }} - --network-type={{ kube_ovn_network_type }} @@ -588,7 +588,7 @@ spec: command: - /kube-ovn/kube-ovn-pinger args: - - --external-address={{ kube_ovn_external_address }}{% if enable_dual_stack_networks %},{{ kube_ovn_external_address_ipv6 }}{% endif %}{{ '' }} + - --external-address={{ [kube_ovn_external_address if not enable_ipv6only_stack_networks, kube_ovn_external_address_ipv6 if (enable_dual_stack_networks or enable_ipv6only_stack_networks)] | reject('match', '^$') | join(',') }}{{ '' }} - --external-dns={{ kube_ovn_external_dns }} - --logtostderr=false - --alsologtostderr=true @@ -837,7 +837,7 @@ spec: - name: metrics port: 10661 type: ClusterIP -{% if enable_dual_stack_networks %} +{% if enable_dual_stack_networks or enable_ipv6only_stack_networks %} ipFamilyPolicy: PreferDualStack {% endif %} selector: @@ -852,7 +852,7 @@ metadata: labels: app: kube-ovn-pinger spec: -{% if enable_dual_stack_networks %} +{% if enable_dual_stack_networks or enable_ipv6only_stack_networks %} ipFamilyPolicy: PreferDualStack {% endif %} selector: @@ -869,7 +869,7 @@ metadata: labels: app: kube-ovn-controller spec: -{% if enable_dual_stack_networks %} +{% if enable_dual_stack_networks or enable_ipv6only_stack_networks %} ipFamilyPolicy: PreferDualStack {% endif %} selector: @@ -886,7 +886,7 @@ metadata: labels: app: kube-ovn-cni spec: -{% if enable_dual_stack_networks %} +{% if enable_dual_stack_networks or enable_ipv6only_stack_networks %} ipFamilyPolicy: PreferDualStack {% endif %} selector: diff --git a/roles/network_plugin/kube-ovn/templates/cni-ovn.yml.j2 b/roles/network_plugin/kube-ovn/templates/cni-ovn.yml.j2 index 453ac60722d..956ce352288 100644 --- a/roles/network_plugin/kube-ovn/templates/cni-ovn.yml.j2 +++ b/roles/network_plugin/kube-ovn/templates/cni-ovn.yml.j2 @@ -260,7 +260,7 @@ spec: port: 6641 targetPort: 6641 type: ClusterIP -{% if enable_dual_stack_networks %} +{% if enable_dual_stack_networks or enable_ipv6only_stack_networks %} ipFamilyPolicy: PreferDualStack {% endif %} selector: @@ -280,7 +280,7 @@ spec: port: 6642 targetPort: 6642 type: ClusterIP -{% if enable_dual_stack_networks %} +{% if enable_dual_stack_networks or enable_ipv6only_stack_networks %} ipFamilyPolicy: PreferDualStack {% endif %} selector: @@ -300,7 +300,7 @@ spec: port: 6643 targetPort: 6643 type: ClusterIP -{% if enable_dual_stack_networks %} +{% if enable_dual_stack_networks or enable_ipv6only_stack_networks %} ipFamilyPolicy: PreferDualStack {% endif %} selector: diff --git a/roles/reset/tasks/main.yml b/roles/reset/tasks/main.yml index b68a1777b61..c422fe4b37a 100644 --- a/roles/reset/tasks/main.yml +++ b/roles/reset/tasks/main.yml @@ -203,7 +203,7 @@ - nat - mangle - raw - when: flush_iptables | bool and enable_dual_stack_networks + when: flush_iptables | bool and (enable_dual_stack_networks or enable_ipv6only_stack_networks) tags: - ip6tables