From 919621d0208f3037914282bab10de4179bf76016 Mon Sep 17 00:00:00 2001 From: Robert Wimmer <2039811+githubixx@users.noreply.github.com> Date: Tue, 6 Feb 2024 23:24:00 +0100 Subject: [PATCH] 12.0.0+1.15.0 (#35) * refactor Molecule setup * update cilium_chart_version to 1.15.0 * introduce cilium_chart_values_directory variable * molecule/default/group_vars/all.yml: add comment * update README and CHANGELOG * molecule/default/group_vars/all.yml: change demo PW --- CHANGELOG.md | 6 + README.md | 22 +- defaults/main.yml | 12 +- molecule/default/collections.yml | 7 - molecule/default/converge.yml | 14 +- molecule/default/group_vars/all.yml | 248 +++++++++++------- molecule/default/group_vars/k8s.yml | 10 + molecule/default/group_vars/k8s_etcd.yml | 18 ++ molecule/default/host_vars/test-assets.yml | 2 +- .../default/host_vars/test-controller1.yml | 11 +- .../default/host_vars/test-controller2.yml | 11 +- .../default/host_vars/test-controller3.yml | 2 +- molecule/default/host_vars/test-etcd1.yml | 8 + molecule/default/host_vars/test-etcd2.yml | 8 + molecule/default/host_vars/test-etcd3.yml | 8 + molecule/default/host_vars/test-worker1.yml | 13 +- molecule/default/host_vars/test-worker2.yml | 4 +- molecule/default/molecule.yml | 60 ++++- molecule/default/prepare.yml | 111 ++++---- molecule/default/requirements.yml | 16 -- molecule/default/templates/values.yml.j2 | 49 ++++ tasks/install.yml | 3 + tasks/template.yml | 3 + tasks/upgrade.yml | 3 + 24 files changed, 443 insertions(+), 206 deletions(-) delete mode 100644 molecule/default/collections.yml create mode 100644 molecule/default/group_vars/k8s.yml create mode 100644 molecule/default/group_vars/k8s_etcd.yml create mode 100644 molecule/default/host_vars/test-etcd1.yml create mode 100644 molecule/default/host_vars/test-etcd2.yml create mode 100644 molecule/default/host_vars/test-etcd3.yml delete mode 100644 molecule/default/requirements.yml create mode 100644 molecule/default/templates/values.yml.j2 diff --git a/CHANGELOG.md b/CHANGELOG.md index 1510ac1..de5f4f6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,11 @@ # Changelog +## 12.0.0+1.15.0 + +- upgrade to Cilium `v1.15.0` +- refactor Molecule setup +- introduce `cilium_chart_values_directory` variable + ## 11.0.6+1.14.5 - fix Github action diff --git a/README.md b/README.md index 9b1983e..0cb9116 100644 --- a/README.md +++ b/README.md @@ -6,7 +6,7 @@ This Ansible role installs [Cilium](https://docs.cilium.io) network on a Kuberne Versions -------- -I tag every release and try to stay with [semantic versioning](http://semver.org). If you want to use the role I recommend to checkout the latest tag. The master branch is basically development while the tags mark stable releases. But in general I try to keep master in good shape too. A tag `11.0.0+1.14.4` means this is release `11.0.0` of this role and it contains Cilium chart version `1.14.4`. If the role itself changes `X.Y.Z` before `+` will increase. If the Cilium chart version changes `X.Y.Z` after `+` will increase too. This allows to tag bugfixes and new major versions of the role while it's still developed for a specific Cilium release. +I tag every release and try to stay with [semantic versioning](http://semver.org). If you want to use the role I recommend to checkout the latest tag. The master branch is basically development while the tags mark stable releases. But in general I try to keep master in good shape too. A tag `12.0.0+1.15.0` means this is release `12.0.0` of this role and it contains Cilium chart version `1.15.0`. If the role itself changes `X.Y.Z` before `+` will increase. If the Cilium chart version changes `X.Y.Z` after `+` will increase too. This allows to tag bugfixes and new major versions of the role while it's still developed for a specific Cilium release. Requirements ------------ @@ -28,7 +28,7 @@ Role Variables ```yaml # Helm chart version -cilium_chart_version: "1.14.5" +cilium_chart_version: "1.15.0" # Helm chart name cilium_chart_name: "cilium" @@ -39,6 +39,14 @@ cilium_chart_url: "https://helm.cilium.io/" # Kubernetes namespace where Cilium resources should be installed cilium_namespace: "cilium" +# Directory that contains Helm chart values file. Ansible will try to locate +# a file called "values.yml.j2" or "values.yaml.j2" in the specified directory +# (".j2" because you can use the usual Jinja2 template stuff there). +# If not found the default "templates/cilium_values_default.yml.j2" will be +# used (which can be used as a template BTW). The content of this file +# will be provided to "helm install/template" command as values file. +cilium_chart_values_directory: "/tmp/cilium/helm" + # etcd settings. If "cilium_etcd_enabled" variable is defined and set to "true", # Cilium etcd settings are generated and deployed. Otherwise all the following # "cilium_etcd_*" settings are ignored. @@ -116,11 +124,9 @@ Usage The first thing to do is to check `templates/cilium_values_default.yml.j2`. This file contains the values/settings for the Cilium Helm chart that are different to the default ones which are located [here](https://github.com/cilium/cilium/blob/master/install/kubernetes/cilium/values.yaml). The default values of this Ansible role are using a TLS enabled `etcd` cluster. If you have a self hosted/bare metal Kubernetes cluster chances are high that there is already running an `etcd` cluster for the Kubernetes API server which is the case for me. I'm using my Ansible [etcd role](https://github.com/githubixx/ansible-role-etcd) to install such an `etcd` cluster and my [Kubernetes Certificate Authority role](https://github.com/githubixx/ansible-role-kubernetes-ca) to generate the certificates. So if you used my roles you can use this Cilium role basically as is. -The `templates/cilium_values_default.yml.j2` template also contains some `if` clauses to use an `etcd` cluster that is not TLS enabled. See `defaults/main.yml` to check which values can be changed. You can also introduce your own variables and use it in `templates/cilium_values_user.yml.j2` if you want of course. - -But nothing is made in stone ;-) To use your own values just create a file called `cilium_values_user.yml.j2` and put it into the `templates` directory. Then this Cilium role will use that file to render the Helm values. You can use `templates/cilium_values_default.yml.j2` as a template or just start from scratch. As mentioned above you can modify all settings for the Cilium Helm chart that are different to the default ones which are located [here](https://github.com/cilium/cilium/blob/master/install/kubernetes/cilium/values.yaml). +The `templates/cilium_values_default.yml.j2` template also contains some `if` clauses to use an `etcd` cluster that is not TLS enabled. See `defaults/main.yml` to check which values can be changed. You can also introduce your own variables. To use your own values just create a file called `values.yml.j2` or `values.yaml.j2` and put it into the directory specified in `cilium_chart_values_directory`. Then this role will use that file to render the Helm values. -After the values file (`templates/cilium_values_default.yml.j2` or `templates/cilium_values_user.yml.j2`) is in place and the `defaults/main.yml` values are checked the role can be installed. Most of the role's tasks are executed locally by default so to say as quite a few tasks need to communicate with the Kubernetes API server or executing [Helm](https://helm.sh/) commands. But you can delegate this kind of tasks to a different host by using `cilium_delegate_to` variable (see above). Just make sure that the host you delegate these kind of tasks has connection to the Kubernetes API server and the user a valid `KUBECONFIG` file. +After the values file is in place and the `defaults/main.yml` values are checked the role can be installed. Most of the role's tasks are executed locally by default so to say as quite a few tasks need to communicate with the Kubernetes API server or executing [Helm](https://helm.sh/) commands. But you can delegate this kind of tasks to a different host by using `cilium_delegate_to` variable (see above). Just make sure that the host you delegate these kind of tasks has connection to the Kubernetes API server and the user a valid `KUBECONFIG` file. The default action is to just render the Kubernetes resources YAML file after replacing all Jinja2 variables and stuff like that. In the `Example Playbook` section below there is an `Example 2 (assign tag to role)`. The role `githubixx.cilium_kubernetes` has a tag `role-cilium-kubernetes` assigned. Assuming that the values for the Helm chart should be rendered (nothing will be installed in this case) and the playbook is called `k8s.yml` execute the following command: @@ -146,9 +152,9 @@ ansible-playbook --tags=role-cilium-kubernetes --extra-vars cilium_action=instal To check if everything was deployed use the usual `kubectl` commands like `kubectl -n get pods -o wide`. -As [Cilium](https://docs.cilium.io) issues updates/upgrades every few weeks/months the role also can do upgrades. The role basically executes what is described in [Cilium upgrade guide](https://docs.cilium.io/en/v1.14/operations/upgrade/). That means the Cilium pre-flight check will be installed and some checks are executed before the update actually takes place. Have a look at `tasks/upgrade.yml` to see what's happening before, during and after the update. Of course you should consult [Cilium upgrade guide](https://docs.cilium.io/en/v1.14/operations/upgrade/) in general to check for major changes and stuff like that before upgrading. Also make sure to check the [Upgrade Notes](https://docs.cilium.io/en/stable/operations/upgrade/#current-release-required-changes)! +As [Cilium](https://docs.cilium.io) issues updates/upgrades every few weeks/months the role also can do upgrades. The role basically executes what is described in [Cilium upgrade guide](https://docs.cilium.io/en/v1.15/operations/upgrade/). That means the Cilium pre-flight check will be installed and some checks are executed before the update actually takes place. Have a look at `tasks/upgrade.yml` to see what's happening before, during and after the update. Of course you should consult [Cilium upgrade guide](https://docs.cilium.io/en/v1.15/operations/upgrade/) in general to check for major changes and stuff like that before upgrading. Also make sure to check the [Upgrade Notes](https://docs.cilium.io/en/stable/operations/upgrade/#current-release-required-changes)! -If a upgrade wasn't successful a [Roll back](https://docs.cilium.io/en/v1.14/operations/upgrade/#step-3-rolling-back) to a previous version can be basically initiated by just changing `cilium_chart_version` variable. But you should definitely read the Cilium [roll back guide](https://docs.cilium.io/en/v1.14/operations/upgrade/#step-3-rolling-back). Switching between minor releases is normally not an issue but switching from one major release to a previous one might be not so easy. +If a upgrade wasn't successful a [Roll back](https://docs.cilium.io/en/v1.15/operations/upgrade/#step-3-rolling-back) to a previous version can be basically initiated by just changing `cilium_chart_version` variable. But you should definitely read the Cilium [roll back guide](https://docs.cilium.io/en/v1.15/operations/upgrade/#step-3-rolling-back). Switching between minor releases is normally not an issue but switching from one major release to a previous one might be not so easy. Also check `templates/cilium_values_default_pre_flight_check.yml.j2`. If you need to adjust values for the `pre-flight` check you can either change that file or create a file `templates/cilium_values_user_pre_flight_check.yml.j2` with your own values. diff --git a/defaults/main.yml b/defaults/main.yml index 90749f8..e1d4942 100644 --- a/defaults/main.yml +++ b/defaults/main.yml @@ -1,6 +1,6 @@ --- -# Helm chart version (uses Cilium v1.14.5) -cilium_chart_version: "1.14.5" +# Helm chart version (uses Cilium v1.15.0) +cilium_chart_version: "1.15.0" # Helm release name cilium_release_name: "cilium" @@ -17,6 +17,14 @@ cilium_chart_url: "https://helm.cilium.io/" # Kubernetes namespace where Cilium resources should be installed cilium_namespace: "cilium" +# Directory that contains Helm chart values file. Ansible will try to locate +# a file called "values.yml.j2" or "values.yaml.j2" in the specified directory +# (".j2" because you can use the usual Jinja2 template stuff there). +# If not found the default "templates/cilium_values_default.yml.j2" will be +# used (which can be used as a template BTW). The content of this file +# will be provided to "helm install/template" command as values file. +cilium_chart_values_directory: "/tmp/cilium/helm" + # etcd settings. If "cilium_etcd_enabled" variable is defined and set to "true", # Cilium etcd settings are generated and deployed. Otherwise all the following # "cilium_etcd_*" settings are ignored. diff --git a/molecule/default/collections.yml b/molecule/default/collections.yml deleted file mode 100644 index 23e386c..0000000 --- a/molecule/default/collections.yml +++ /dev/null @@ -1,7 +0,0 @@ ---- -# Copyright (C) 2023 Robert Wimmer -# SPDX-License-Identifier: GPL-3.0-or-later - -collections: - - ansible.posix - - kubernetes.core diff --git a/molecule/default/converge.yml b/molecule/default/converge.yml index 6992d92..7a0cbfe 100644 --- a/molecule/default/converge.yml +++ b/molecule/default/converge.yml @@ -2,10 +2,19 @@ # Copyright (C) 2023 Robert Wimmer # SPDX-License-Identifier: GPL-3.0-or-later +- name: Gather facts + hosts: all + become: true + gather_facts: true + tasks: + - name: Populate Ansible hostVars + ansible.builtin.setup: + - name: Setup Cilium hosts: k8s_worker become: true - gather_facts: true + environment: + K8S_AUTH_KUBECONFIG: "{{ k8s_admin_conf_dir }}/admin.kubeconfig" tasks: - name: Include Cilium role ansible.builtin.include_role: @@ -16,7 +25,8 @@ - name: Setup tooling to make worker nodes usable hosts: test-assets become: true - gather_facts: true + environment: + K8S_AUTH_KUBECONFIG: "{{ k8s_admin_conf_dir }}/admin.kubeconfig" tasks: - name: Setup tooling when: diff --git a/molecule/default/group_vars/all.yml b/molecule/default/group_vars/all.yml index c9035be..dda0577 100644 --- a/molecule/default/group_vars/all.yml +++ b/molecule/default/group_vars/all.yml @@ -2,146 +2,156 @@ # Copyright (C) 2023 Robert Wimmer # SPDX-License-Identifier: GPL-3.0-or-later +# Use "systemd-timesyncd" for time services. It's available by default. harden_linux_ntp: "systemd-timesyncd" -# Password for user "root" and "cilium" is "cilium" -harden_linux_root_password: "$6$rounds=656000$mysecretsalt$IPK2dn/YVQ4vSNE7y/KlZOUY1kBo3V0pMDKSwVwrJk2R6s5IeAnoucTR2YcOvttZCjLkg3PuIp8Flx16kLNQy." -harden_linux_deploy_user: "cilium" -harden_linux_deploy_user_password: "$6$rounds=656000$mysecretsalt$IPK2dn/YVQ4vSNE7y/KlZOUY1kBo3V0pMDKSwVwrJk2R6s5IeAnoucTR2YcOvttZCjLkg3PuIp8Flx16kLNQy." -harden_linux_deploy_user_home: "/home/cilium" +# Password for user "root" and "vagrant" is "vagrant" in both cases. As +# "vagrant" user is available in every Vagrant Ubuntu Box just use it. +harden_linux_root_password: "$6$ec6PmcEygP6do8Ls$847Pqqo1fXJFeMvPkmP3ipLQ9vhny1PYtwnnIptpZ1Sc8KXUuPGu29aUTOdNdgIfxR3Bix5SUkNfSMMCetej41" +harden_linux_deploy_user: "vagrant" +harden_linux_deploy_user_password: "$6$ec6PmcEygP6do8Ls$847Pqqo1fXJFeMvPkmP3ipLQ9vhny1PYtwnnIptpZ1Sc8KXUuPGu29aUTOdNdgIfxR3Bix5SUkNfSMMCetej41" +harden_linux_deploy_user_home: "/home/vagrant" +harden_linux_deploy_user_uid: "1000" +harden_linux_deploy_user_shell: "/bin/bash" +# Enable IP forwarding for IPv4 and IPv6 harden_linux_sysctl_settings_user: "net.ipv4.ip_forward": 1 "net.ipv6.conf.default.forwarding": 1 "net.ipv6.conf.all.forwarding": 1 +# Let SSHd listen on port 22, allow password authentication and allow "root" +# login. The last two settings are not recommended for production use but for +# this test deployment it's okay as it makes debugging easier and faster. harden_linux_sshd_settings_user: "^Port ": "Port 22" "^PasswordAuthentication": "PasswordAuthentication yes" "^PermitRootLogin": "PermitRootLogin yes" -harden_linux_ufw_rules: - - rule: "allow" - to_port: "22" - protocol: "tcp" - - rule: "allow" - to_port: "51820" - protocol: "udp" - - rule: "allow" - to_port: "80" - protocol: "tcp" - - rule: "allow" - to_port: "443" - protocol: "tcp" - - rule: "allow" - to_port: "25" - protocol: "tcp" - -harden_linux_ufw_allow_networks: - - "10.0.0.0/8" - - "172.16.0.0/12" - - "192.168.0.0/16" - +# Enable logging for UFW. harden_linux_ufw_logging: 'on' +# Set the default forward policy to "ACCEPT". harden_linux_ufw_defaults_user: "^DEFAULT_FORWARD_POLICY": 'DEFAULT_FORWARD_POLICY="ACCEPT"' +# Don't block SSH logins from the following networks even login attempts fail +# for a few times. harden_linux_sshguard_whitelist: - "127.0.0.0/8" - "::1/128" - "10.0.0.0/8" + - "172.16.0.0/12" + - "192.168.0.0/16" -etcd_ca_conf_directory: "/tmp/k8s" +# DNS +harden_linux_systemd_resolved_settings: + - DNS= + - DNS=8.8.8.8 1.1.1.1 2606:4700:4700::1111 2620:fe::fe + - FallbackDNS= + - FallbackDNS=149.112.112.112 1.0.0.1 2620:fe::9 2606:4700:4700::1001 + - DNSOverTLS= + - DNSOverTLS=opportunistic + +# Directory where the etcd certificates are stored on the Ansible controller +# host. Certificate files for etcd will be copied from this directory to +# the etcd nodes. +etcd_ca_conf_directory: "{{ k8s_ca_conf_directory }}" +# Directory where the etcd certificates are stored on the etcd hosts. etcd_conf_dir: "/etc/etcd" +# Interface where the etcd service is listening on. etcd_interface: "{{ k8s_interface }}" +# A few additional settings for etcd. etcd_settings_user: "heartbeat-interval": "250" "election-timeout": "2500" +# Host names and IP addresses in the etcd certificates. etcd_cert_hosts: - localhost - 127.0.0.1 - - 10.32.0.1 - - kubernetes - - kubernetes.default - - kubernetes.default.svc - - kubernetes.default.svc.cluster - - kubernetes.svc.cluster.local - - 192.168.10.5 - - 192.168.10.10 - - 192.168.10.20 - - 192.168.10.30 - - 192.168.10.100 - - 192.168.10.110 - - 192.168.10.120 - - 192.168.10.130 - - 10.10.10.5 - - 10.10.10.10 - - 10.10.10.20 - - 10.10.10.30 - - 10.10.10.100 - - 10.10.10.110 - - 10.10.10.120 - - 10.10.10.130 - - test-controller1 - - test-controller2 - - test-controller3 - - test-worker1 - - test-worker2 - - test-worker3 - +# This list should contain all etcd clients that wants to connect to the etcd +# cluster. The most important client is "kube-apiserver" of course. Also +# "cilium" should connect. So we add this here too to generate the needed +# certificates. etcd_additional_clients: - - k8s-apiserver-etcd - cilium + - k8s-apiserver-etcd -k8s_ca_conf_directory: "/tmp/k8s" -k8s_ca_conf_directory_perm: "0775" -k8s_ca_file_perm: "0666" -k8s_ca_certificate_owner: "{{ k8s_config_owner }}" -k8s_ca_certificate_group: "{{ k8s_config_group }}" - -k8s_release: "1.27.5" +# Directory where the Kubernetes certificates are stored on the Ansible +# controller host. +k8s_ca_conf_directory: "/tmp/k8s-ca" +# Permissions for the Kubernetes CA directory. +k8s_ca_conf_directory_perm: "0700" +# Permissions for the Kubernetes CA files. +k8s_ca_file_perm: "0600" +# Owner of the Kubernetes CA files. +k8s_ca_certificate_owner: "vagrant" +# Group of the Kubernetes CA files. +k8s_ca_certificate_group: "vagrant" + +# Interface where the Kubernetes control plane services are listening on. k8s_interface: "wg0" -k8s_controller_delegate_to: "test-assets" - -k8s_config_cluster_name: "cilium" -k8s_config_directory: "/tmp/k8s" -k8s_config_directory_perm: "0777" -k8s_config_file_perm: "0666" -k8s_config_owner: "root" -k8s_config_group: "root" - -k8s_encryption_config_directory: "{{k8s_config_directory}}" +# Interface where the etcd daemons listening on. +k8s_ctl_etcd_interface: "{{ etcd_interface }}" + +# Delegate tasks like creating the Kubernetes CA certificates to the following +# host. This host also communicates with the "kube-apiserver" if required +# for certain tasks. +k8s_ctl_delegate_to: "test-assets" + +# Directory where the Kubernetes certificates are stored on the Ansible +# controller host and where the Ansible can find them to be copied to the +# Kubernetes control plane nodes. +k8s_ctl_ca_conf_directory: "{{ k8s_ca_conf_directory }}" + +# The name of the Kubernetes cluster. +k8s_config_cluster_name: "k8s" + +# Directory where "admin.kubeconfig" (the credentials file) for the "admin" +# user is stored +k8s_admin_conf_dir: "{{ '~/k8s/configs' | expanduser }}" +# Permissions for the directory specified in "k8s_admin_conf_dir" +k8s_admin_conf_dir_perm: "0700" +# Owner of the directory specified in "k8s_admin_conf_dir" and for +# "admin.kubeconfig" stored in this directory. +k8s_admin_conf_owner: "root" +# Group of the directory specified in "k8s_admin_conf_dir" and for +# "admin.kubeconfig" stored in this directory. +k8s_admin_conf_group: "root" + +# Run Kubernetes control plane services as the following user. +k8s_run_as_user: "k8s" +# Run Kubernetes control plane services as the following group. +k8s_run_as_group: "k8s" + +# Key used for encrypting secrets (encryption at-rest) by the +# "kube-apiserver". k8s_encryption_config_key: "Y29uZmlndXJhdGlvbjIyCg==" -k8s_encryption_config_owner: "{{ k8s_config_owner }}" -k8s_encryption_config_group: "{{ k8s_config_owner }}" -k8s_encryption_config_directory_perm: "{{ k8s_config_directory_perm }}" -k8s_encryption_config_file_perm: "{{ k8s_config_file_perm }}" - -k8s_conf_dir: "/var/lib/kubernetes" - -k8s_worker_kubelet_conf_dir: "/var/lib/kubelet" - -k8s_apiserver_secure_port: "6443" +# Additional settings for the "kube-apiserver". k8s_apiserver_settings_user: "enable-aggregator-routing": "true" k8s_worker_kubelet_settings: - "config": "{{k8s_worker_kubelet_conf_dir}}/kubelet-config.yaml" - "node-ip": "{{hostvars[inventory_hostname]['ansible_' + k8s_interface].ipv4.address}}" - "kubeconfig": "{{k8s_worker_kubelet_conf_dir}}/kubeconfig" + "config": "{{ k8s_worker_kubelet_conf_dir }}/kubelet-config.yaml" + "node-ip": "{{ hostvars[inventory_hostname]['ansible_' + k8s_interface].ipv4.address }}" + "kubeconfig": "{{ k8s_worker_kubelet_conf_dir }}/kubeconfig" "seccomp-default": "" -containerd_flavor: "k8s" +# Directory for the "runc" binaries +runc_bin_directory: "/usr/local/sbin" + +# Directory to store the "containerd" archive after download containerd_tmp_directory: "/tmp" -containerd_runc_binary_directory: "/usr/local/sbin" -containerd_crictl_config_file: "crictl.yaml" -containerd_crictl_config_directory: "/etc" -containerd_cni_binary_directory: "/opt/cni/bin" +# Use "etcd" for Cilium cilium_etcd_enabled: "true" +# Delegate Cilium tasks that needs to communicate with the Kubernetes API +# server to the following host. cilium_delegate_to: "test-assets" +# Template directory for custom "values.yml.j2" +cilium_chart_values_directory: "templates" +# Show debug output for Cilium Helm commands. cilium_helm_show_commands: true cilium_etcd_interface: "{{ k8s_interface }}" cilium_etcd_client_port: 2379 @@ -153,13 +163,53 @@ cilium_etcd_cafile: "ca-etcd.pem" cilium_etcd_certfile: "cert-cilium.pem" cilium_etcd_keyfile: "cert-cilium-key.pem" +# Delegate tasks to create CoreDNS K8s resources to this host. coredns_delegate_to: "test-assets" +# Common name for "etcd" certificate authority certificates. ca_etcd_csr_cn: "etcd" +ca_etcd_csr_key_algo: "ecdsa" +ca_etcd_csr_key_size: "384" + +# Common name for "kube-apiserver" certificate authority certificate. ca_k8s_apiserver_csr_cn: "kubernetes" -etcd_server_csr_cn: "etcd" -etcd_peer_csr_cn: "etcd" -etcd_client_csr_cn_prefix: "etcd" -k8s_apiserver_csr_cn: "kubernetes" -k8s_admin_csr_cn: "admin" -k8s_controller_manager_sa_csr_cn: "service-accounts" +ca_k8s_apiserver_csr_key_algo: "ecdsa" +ca_k8s_apiserver_csr_key_size: "384" + +# Common names for "etcd" server, peer and client certificates. +etcd_server_csr_cn: "etcd-server" +etcd_server_csr_key_algo: "ecdsa" +etcd_server_csr_key_size: "384" + +etcd_peer_csr_cn: "etcd-peer" +etcd_peer_csr_key_algo: "ecdsa" +etcd_peer_csr_key_size: "384" + +etcd_client_csr_cn_prefix: "etcd-client" +etcd_client_csr_key_algo: "ecdsa" +etcd_client_csr_key_size: "384" + +# Common names for kube-apiserver, admin and kube-controller-manager certificates. +k8s_apiserver_csr_cn: "k8s-apiserver" +k8s_apiserver_csr_key_algo: "ecdsa" +k8s_apiserver_csr_key_size: "384" + +k8s_admin_csr_cn: "k8s-admin" +k8s_admin_csr_key_algo: "ecdsa" +k8s_admin_csr_key_size: "384" + +k8s_worker_csr_key_algo: "ecdsa" +k8s_worker_csr_key_size: "384" + +k8s_controller_manager_csr_key_algo: "ecdsa" +k8s_controller_manager_csr_key_size: "384" + +k8s_scheduler_csr_key_algo: "ecdsa" +k8s_scheduler_csr_key_size: "384" + +k8s_controller_manager_sa_csr_cn: "k8s-service-accounts" +k8s_controller_manager_sa_csr_key_algo: "ecdsa" +k8s_controller_manager_sa_csr_key_size: "384" + +k8s_kube_proxy_csr_key_algo: "ecdsa" +k8s_kube_proxy_csr_key_size: "384" diff --git a/molecule/default/group_vars/k8s.yml b/molecule/default/group_vars/k8s.yml new file mode 100644 index 0000000..802a004 --- /dev/null +++ b/molecule/default/group_vars/k8s.yml @@ -0,0 +1,10 @@ +--- +# Copyright (C) 2023 Robert Wimmer +# SPDX-License-Identifier: GPL-3.0-or-later + +# Allow all traffic from the following networks. +harden_linux_ufw_allow_networks: + - "10.32.0.0/16" # Server Cluster IP range + - "10.200.0.0/16" # Pod IP range + - "10.10.10.0/24" # Wireguard IP range + - "172.16.10.0/24" # VM IP range diff --git a/molecule/default/group_vars/k8s_etcd.yml b/molecule/default/group_vars/k8s_etcd.yml new file mode 100644 index 0000000..a2a280d --- /dev/null +++ b/molecule/default/group_vars/k8s_etcd.yml @@ -0,0 +1,18 @@ +--- +# Copyright (C) 2023 Robert Wimmer +# SPDX-License-Identifier: GPL-3.0-or-later + +# Open a few ports for ssh, Wireguard and etcd +harden_linux_ufw_rules: + - rule: "allow" + to_port: "22" + protocol: "tcp" + - rule: "allow" + to_port: "51820" + protocol: "udp" + - rule: "allow" + to_port: "2379" + protocol: "tcp" + - rule: "allow" + to_port: "2380" + protocol: "tcp" diff --git a/molecule/default/host_vars/test-assets.yml b/molecule/default/host_vars/test-assets.yml index bd31c27..65df01f 100644 --- a/molecule/default/host_vars/test-assets.yml +++ b/molecule/default/host_vars/test-assets.yml @@ -5,4 +5,4 @@ wireguard_address: "10.10.10.5/24" wireguard_port: 51820 wireguard_persistent_keepalive: "30" -wireguard_endpoint: "192.168.10.5" +wireguard_endpoint: "172.16.10.5" diff --git a/molecule/default/host_vars/test-controller1.yml b/molecule/default/host_vars/test-controller1.yml index fa24cee..96fe1ae 100644 --- a/molecule/default/host_vars/test-controller1.yml +++ b/molecule/default/host_vars/test-controller1.yml @@ -5,4 +5,13 @@ wireguard_address: "10.10.10.10/24" wireguard_port: 51820 wireguard_persistent_keepalive: "30" -wireguard_endpoint: "192.168.10.10" +wireguard_endpoint: "172.16.10.10" + +ha_proxy_frontend_bind_address: "127.0.0.1" +ha_proxy_frontend_port: "16443" + +k8s_ctl_api_endpoint_host: "127.0.0.1" +k8s_ctl_api_endpoint_port: "16443" + +k8s_worker_api_endpoint_host: "{{ k8s_ctl_api_endpoint_host }}" +k8s_worker_api_endpoint_port: "{{ k8s_ctl_api_endpoint_port }}" diff --git a/molecule/default/host_vars/test-controller2.yml b/molecule/default/host_vars/test-controller2.yml index aae5f1d..fba1072 100644 --- a/molecule/default/host_vars/test-controller2.yml +++ b/molecule/default/host_vars/test-controller2.yml @@ -5,4 +5,13 @@ wireguard_address: "10.10.10.20/24" wireguard_port: 51820 wireguard_persistent_keepalive: "30" -wireguard_endpoint: "192.168.10.20" +wireguard_endpoint: "172.16.10.20" + +ha_proxy_frontend_bind_address: "127.0.0.1" +ha_proxy_frontend_port: "16443" + +k8s_ctl_api_endpoint_host: "127.0.0.1" +k8s_ctl_api_endpoint_port: "16443" + +k8s_worker_api_endpoint_host: "{{ k8s_ctl_api_endpoint_host }}" +k8s_worker_api_endpoint_port: "{{ k8s_ctl_api_endpoint_port }}" diff --git a/molecule/default/host_vars/test-controller3.yml b/molecule/default/host_vars/test-controller3.yml index ac8b59c..ab6d02e 100644 --- a/molecule/default/host_vars/test-controller3.yml +++ b/molecule/default/host_vars/test-controller3.yml @@ -5,4 +5,4 @@ wireguard_address: "10.10.10.30/24" wireguard_port: 51820 wireguard_persistent_keepalive: "30" -wireguard_endpoint: "192.168.10.30" +wireguard_endpoint: "172.16.10.30" diff --git a/molecule/default/host_vars/test-etcd1.yml b/molecule/default/host_vars/test-etcd1.yml new file mode 100644 index 0000000..adec115 --- /dev/null +++ b/molecule/default/host_vars/test-etcd1.yml @@ -0,0 +1,8 @@ +--- +# Copyright (C) 2023 Robert Wimmer +# SPDX-License-Identifier: GPL-3.0-or-later + +wireguard_address: "10.10.10.100/24" +wireguard_port: 51820 +wireguard_persistent_keepalive: "30" +wireguard_endpoint: "172.16.10.100" diff --git a/molecule/default/host_vars/test-etcd2.yml b/molecule/default/host_vars/test-etcd2.yml new file mode 100644 index 0000000..d6f5809 --- /dev/null +++ b/molecule/default/host_vars/test-etcd2.yml @@ -0,0 +1,8 @@ +--- +# Copyright (C) 2023 Robert Wimmer +# SPDX-License-Identifier: GPL-3.0-or-later + +wireguard_address: "10.10.10.110/24" +wireguard_port: 51820 +wireguard_persistent_keepalive: "30" +wireguard_endpoint: "172.16.10.110" diff --git a/molecule/default/host_vars/test-etcd3.yml b/molecule/default/host_vars/test-etcd3.yml new file mode 100644 index 0000000..f168075 --- /dev/null +++ b/molecule/default/host_vars/test-etcd3.yml @@ -0,0 +1,8 @@ +--- +# Copyright (C) 2023 Robert Wimmer +# SPDX-License-Identifier: GPL-3.0-or-later + +wireguard_address: "10.10.10.120/24" +wireguard_port: 51820 +wireguard_persistent_keepalive: "30" +wireguard_endpoint: "172.16.10.120" diff --git a/molecule/default/host_vars/test-worker1.yml b/molecule/default/host_vars/test-worker1.yml index 3d0d9a5..a096701 100644 --- a/molecule/default/host_vars/test-worker1.yml +++ b/molecule/default/host_vars/test-worker1.yml @@ -2,7 +2,16 @@ # Copyright (C) 2023 Robert Wimmer # SPDX-License-Identifier: GPL-3.0-or-later -wireguard_address: "10.10.10.100/24" +wireguard_address: "10.10.10.200/24" wireguard_port: 51820 wireguard_persistent_keepalive: "30" -wireguard_endpoint: "192.168.10.100" +wireguard_endpoint: "172.16.10.200" + +ha_proxy_frontend_bind_address: "127.0.0.1" +ha_proxy_frontend_port: "16443" + +k8s_ctl_api_endpoint_host: "127.0.0.1" +k8s_ctl_api_endpoint_port: "16443" + +k8s_worker_api_endpoint_host: "{{ k8s_ctl_api_endpoint_host }}" +k8s_worker_api_endpoint_port: "{{ k8s_ctl_api_endpoint_port }}" diff --git a/molecule/default/host_vars/test-worker2.yml b/molecule/default/host_vars/test-worker2.yml index 03b4095..cf6ba00 100644 --- a/molecule/default/host_vars/test-worker2.yml +++ b/molecule/default/host_vars/test-worker2.yml @@ -2,7 +2,7 @@ # Copyright (C) 2023 Robert Wimmer # SPDX-License-Identifier: GPL-3.0-or-later -wireguard_address: "10.10.10.110/24" +wireguard_address: "10.10.10.210/24" wireguard_port: 51820 wireguard_persistent_keepalive: "30" -wireguard_endpoint: "192.168.10.110" +wireguard_endpoint: "172.16.10.210" diff --git a/molecule/default/molecule.yml b/molecule/default/molecule.yml index 4ba5e37..c15fec3 100644 --- a/molecule/default/molecule.yml +++ b/molecule/default/molecule.yml @@ -13,7 +13,7 @@ driver: platforms: - name: test-assets - box: generic/ubuntu2004 + box: generic/ubuntu2204 memory: 2048 cpus: 2 groups: @@ -24,67 +24,103 @@ platforms: - auto_config: true network_name: private_network type: static - ip: 192.168.10.5 + ip: 172.16.10.5 - name: test-controller1 box: generic/ubuntu2204 memory: 2048 cpus: 2 groups: - vpn + - haproxy - k8s_controller - k8s_worker - - k8s_etcd - k8s interfaces: - auto_config: true network_name: private_network type: static - ip: 192.168.10.10 + ip: 172.16.10.10 - name: test-controller2 box: generic/ubuntu2204 memory: 2048 cpus: 2 groups: - vpn + - haproxy - k8s_controller - k8s_worker - - k8s_etcd - k8s interfaces: - auto_config: true network_name: private_network type: static - ip: 192.168.10.20 + ip: 172.16.10.20 - name: test-controller3 - box: generic/ubuntu2004 + box: generic/ubuntu2204 memory: 2048 cpus: 2 groups: - vpn - k8s_controller - k8s_worker - - k8s_etcd - k8s interfaces: - auto_config: true network_name: private_network type: static - ip: 192.168.10.30 + ip: 172.16.10.30 + - name: test-etcd1 + box: generic/ubuntu2204 + memory: 2048 + cpus: 2 + groups: + - vpn + - k8s_etcd + interfaces: + - auto_config: true + network_name: private_network + type: static + ip: 172.16.10.100 + - name: test-etcd2 + box: generic/ubuntu2204 + memory: 2048 + cpus: 2 + groups: + - vpn + - k8s_etcd + interfaces: + - auto_config: true + network_name: private_network + type: static + ip: 172.16.10.110 + - name: test-etcd3 + box: generic/ubuntu2204 + memory: 2048 + cpus: 2 + groups: + - vpn + - k8s_etcd + interfaces: + - auto_config: true + network_name: private_network + type: static + ip: 172.16.10.120 - name: test-worker1 box: generic/ubuntu2204 memory: 2048 cpus: 2 groups: - vpn + - haproxy - k8s_worker - k8s interfaces: - auto_config: true network_name: private_network type: static - ip: 192.168.10.100 + ip: 172.16.10.200 - name: test-worker2 - box: generic/ubuntu2004 + box: generic/ubuntu2204 memory: 2048 cpus: 2 groups: @@ -95,7 +131,7 @@ platforms: - auto_config: true network_name: private_network type: static - ip: 192.168.10.110 + ip: 172.16.10.210 provisioner: name: ansible diff --git a/molecule/default/prepare.yml b/molecule/default/prepare.yml index 4373b28..07634d5 100644 --- a/molecule/default/prepare.yml +++ b/molecule/default/prepare.yml @@ -9,7 +9,7 @@ gather_facts: true tasks: - name: Update APT package cache - apt: + ansible.builtin.apt: update_cache: true cache_valid_time: 3600 @@ -20,7 +20,7 @@ gather_facts: true tasks: - name: Setup harden_linux role - include_role: + ansible.builtin.include_role: name: githubixx.harden_linux - name: Setup Wireguard VPN @@ -30,26 +30,30 @@ gather_facts: true tasks: - name: Setup wireguard role - include_role: + ansible.builtin.include_role: name: githubixx.ansible_role_wireguard -- name: Setup Kubernetes certificates +- name: Setup cfssl hosts: k8s_assets become: true gather_facts: false tasks: - name: Install cfssl - include_role: + ansible.builtin.include_role: name: githubixx.cfssl +- name: Setup Kubernetes certificates + hosts: k8s_assets + gather_facts: false + tasks: - name: Generate etcd and K8s TLS certificates - include_role: + ansible.builtin.include_role: name: githubixx.kubernetes_ca - name: Copy certificate files from assets to local host ansible.posix.synchronize: mode: pull - src: "{{ k8s_config_directory }}" + src: "{{ k8s_ca_conf_directory }}" dest: "/tmp" - name: Setup etcd @@ -59,10 +63,10 @@ gather_facts: true tasks: - name: Include etcd role - include_role: + ansible.builtin.include_role: name: githubixx.etcd -- name: Setup Kubernetes authentication files +- name: Setup Kubernetes client tooling hosts: k8s_assets become: true gather_facts: true @@ -71,45 +75,6 @@ ansible.builtin.include_role: name: githubixx.kubectl - - name: Generate kubelet kubeconfig - ansible.builtin.include_tasks: - file: "tasks/kubelets.yml" - - - name: Generate kube-proxy kubeconfig - ansible.builtin.include_tasks: - file: "tasks/kube-proxy.yml" - - - name: Generate kube-controller-manager kubeconfig - ansible.builtin.include_tasks: - file: "tasks/kube-controller-manager.yml" - - - name: Generate kube-scheduler kubeconfig - ansible.builtin.include_tasks: - file: "tasks/kube-scheduler.yml" - - - name: Generate kube-admin-user kubeconfig - ansible.builtin.include_tasks: - file: "tasks/kube-admin-user.yml" - - - name: Generate kubeencryption configuration - ansible.builtin.include_tasks: - file: "tasks/kubeencryptionconfig.yml" - - - name: Copy configuration files from assets to local host - ansible.posix.synchronize: - mode: pull - src: "{{ k8s_config_directory }}" - dest: "/tmp" - -- name: Setup Kubernetes tooling - hosts: k8s_assets - become: true - gather_facts: true - tasks: - - name: Generate configuration for kubectl - ansible.builtin.include_tasks: - file: "tasks/kubectlconfig.yml" - - name: Install support packages ansible.builtin.package: name: "{{ packages }}" @@ -128,24 +93,66 @@ ansible.builtin.include_role: name: gantsign.helm -- name: Setup Kubernetes controller - hosts: k8s_controller +- name: Setup HAProxy + hosts: haproxy + remote_user: vagrant become: true gather_facts: true tasks: - - name: Include kubernetes_controller role + - name: Setup haproxy role ansible.builtin.include_role: - name: githubixx.kubernetes_controller + name: githubixx.haproxy - name: Setup containerd hosts: k8s_worker become: true gather_facts: true tasks: + - name: Include runc role + ansible.builtin.include_role: + name: githubixx.runc + + - name: Include CNI role + ansible.builtin.include_role: + name: githubixx.cni + - name: Include containerd role ansible.builtin.include_role: name: githubixx.containerd +- name: Setup Kubernetes controller + hosts: k8s_controller + become: true + gather_facts: true + tasks: + - name: Include kubernetes_controller role + ansible.builtin.include_role: + name: githubixx.kubernetes_controller + +- name: Prepare kubeconfig for vagrant user + hosts: k8s_assets + become: true + gather_facts: false + vars: + k8s_controller__vagrant_kube_directory: "/home/vagrant/.kube" + tasks: + - name: Ensure .kube directory in vagrant home + ansible.builtin.file: + path: "{{ k8s_controller__vagrant_kube_directory }}" + state: directory + mode: "0700" + owner: "vagrant" + group: "vagrant" + + - name: Copy admin.kubeconfig to vagrant home directory + ansible.builtin.copy: + src: "{{ k8s_admin_conf_dir }}/admin.kubeconfig" + dest: "{{ k8s_controller__vagrant_kube_directory }}/config" + mode: "0400" + remote_src: true + owner: "vagrant" + group: "vagrant" + - name: Setup Kubernetes worker hosts: k8s_worker become: true diff --git a/molecule/default/requirements.yml b/molecule/default/requirements.yml deleted file mode 100644 index 4e6efa5..0000000 --- a/molecule/default/requirements.yml +++ /dev/null @@ -1,16 +0,0 @@ ---- -# Copyright (C) 2022 Robert Wimmer -# SPDX-License-Identifier: GPL-3.0-or-later - -roles: - - gantsign.helm - - githubixx.ansible_role_wireguard - - githubixx.harden_linux - - githubixx.cfssl - - githubixx.kubernetes_ca - - githubixx.etcd - - githubixx.kubectl - - githubixx.kubernetes_controller - - githubixx.kubernetes_worker - - githubixx.containerd - - githubixx.cilium_cli diff --git a/molecule/default/templates/values.yml.j2 b/molecule/default/templates/values.yml.j2 new file mode 100644 index 0000000..2aca0f8 --- /dev/null +++ b/molecule/default/templates/values.yml.j2 @@ -0,0 +1,49 @@ +# +# See https://github.com/cilium/cilium/blob/master/install/kubernetes/cilium/values.yaml +# for all possible values. +# +# All possible Jinja2 features/filters can be used but keep in mind that the +# result needs to be a valid YAML file. + +# The Helm option keepDeprecatedProbes=true will keep the exec probe in +# the new DaemonSet. +keepDeprecatedProbes: true + +# The eBPF-based masquerading implementation is the most efficient implementation. +# It requires Linux kernel >= 4.19. +# See: https://docs.cilium.io/en/stable/concepts/networking/masquerading/ +bpf: + masquerade: true + +# Keep "kube-proxy" functionality +kubeProxyReplacement: "false" + +# Enable nodePort +nodePort: + enabled: true + enableHealthCheck: false + +# Enable socketLB +socketLB: + enabled: true + +# Use Cilium's IPAM. Every K8s node will get a "/24" for Pod IPs out of the +# range specifed in "clusterPoolIPv4PodCIDRList". +ipam: + mode: "cluster-pool" + ciliumNodeUpdateRate: "15s" + operator: + clusterPoolIPv4PodCIDRList: ["10.100.0.0/16"] + clusterPoolIPv4MaskSize: 24 + +cni: + chainingMode: portmap +{% if cilium_etcd_enabled is defined and cilium_etcd_enabled == "true" -%} +etcd: + enabled: true + endpoints: + {% for host in groups[cilium_etcd_nodes_group] -%} + - {% if cilium_etcd_secrets_name is defined %}https{% else %}http{% endif -%}://{{ hostvars[host]['ansible_' + cilium_etcd_interface].ipv4.address }}:{{ cilium_etcd_client_port }} + {% endfor -%} + ssl: {% if cilium_etcd_secrets_name is defined %}true{% else %}false{% endif -%} +{% endif %} diff --git a/tasks/install.yml b/tasks/install.yml index 9ba18f8..6fecc2a 100644 --- a/tasks/install.yml +++ b/tasks/install.yml @@ -65,9 +65,12 @@ vars: params: files: + - values.yml.j2 + - values.yaml.j2 - cilium_values_user.yml.j2 - cilium_values_default.yml.j2 paths: + - "{{ cilium_chart_values_directory }}" - templates - name: Install chart diff --git a/tasks/template.yml b/tasks/template.yml index 7ba4e17..cc5731e 100644 --- a/tasks/template.yml +++ b/tasks/template.yml @@ -23,9 +23,12 @@ vars: params: files: + - values.yml.j2 + - values.yaml.j2 - cilium_values_user.yml.j2 - cilium_values_default.yml.j2 paths: + - "{{ cilium_chart_values_directory }}" - templates - name: Render template diff --git a/tasks/upgrade.yml b/tasks/upgrade.yml index b757cf0..4d3550e 100644 --- a/tasks/upgrade.yml +++ b/tasks/upgrade.yml @@ -175,9 +175,12 @@ vars: params: files: + - values.yml.j2 + - values.yaml.j2 - cilium_values_user.yml.j2 - cilium_values_default.yml.j2 paths: + - "{{ cilium_chart_values_directory }}" - templates - name: Upgrade Helm chart