diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..b166423 --- /dev/null +++ b/.gitignore @@ -0,0 +1,22 @@ +* +!*.* +!*/ +!LICENSE +!README +!Makefile +!Dockerfile +!Containerfile +.vscode +vendor/ +_artifacts/ +dist/ +*.swp +*.cover +.idea/ +*.bak +.govc.env +.deploy.env +.workdir +bin/govc +mirantis.lic +.tox diff --git a/README.md b/README.md index e69de29..9921189 100644 --- a/README.md +++ b/README.md @@ -0,0 +1,135 @@ +Virtualised MCC/MOSK for on-premise self-evaluation +=================================================== + +Introduction +============ +Project is developed for demo purposes on +[MCC]() +and [MOSK](https://docs.mirantis.com/mosk/latest/overview.html) +products on top of Vsphere infrastrcutre. + +Pre-requesities +=============== + +Network configuration +--------------------- +Demo environment setup requires two dedicated networks assigned for the +deployment: + +* LCM network. Used for MCC cluster setup (including machines provisioning) + and also to access the MCC services. From MCC standpoint it is used as + public network to download MCC artifacts, + so it should have access to the internet or to the proxy (if used). + +* Openstack network. Used to access Virtual machines created on top of + deployed Openstack cluster. Network should be routable in your infrastructure, + so you can access the Openstack VMs. + +**_NOTE:_** Vsphere networks must be configured with following network policies: + +* Promiscuous mode: Accept +* MAC address changes: Accept +* Forged transmits: Accept + +User permissions +---------------- + +Vsphere user manages full installation of MCC product onto your infrastructure +and requires following privileges: + +* Datastore +* Distributed switch +* Folder +* Global +* Host local operations +* Network +* Resource +* Scheduled task +* Sessions +* Storage views +* Tasks +* Virtual machine + +Seed node +--------- +Seed or bootstrap node is an initial node in MCC deployment which holds +bootstrap cluster and MCC configuration. It is mandatory to prepare +this seed node from the Ubuntu 22.04 image. +You can download official Ubuntu 22.04 `vmdk` image +from following [download page](https://cloud-images.ubuntu.com/releases/22.04/release/). +You can upload the image directly to dedicated Vsphere datastore and provide path +to it via `VSPHERE_VMDK_IMAGE_DATASTORE_PATH` variable or you can download +the image locally and provide it via `VSPHERE_VMDK_IMAGE_LOCAL_PATH` variable. + +The alternative (and less-preferred) way is to use existing +VM template of Ubuntu 22.04 with cloud-init installed of the latest version. +The VM template can be provided via `VSPHERE_VM_TEMPLATE` variable. +Please specify full path to template to unique identify it in your Vsphere cluster. + +Get started +=========== + +Environment variables +--------------------- + +Run following command to get detailed information about the script +and the available commands and parameters: + +```./deploy.sh help``` + +Minimal mandatory parameters +---------------------------- + +``` +VSPHERE_SERVER="" +VSPHERE_USERNAME="" +VSPHERE_PASSWORD="" +VSPHERE_DATACENTER="" +VSPHERE_DATASTORE="//datastore/" +VSPHERE_RESOURCE_POOL="//host//Resources/" +VSPHERE_VMDK_IMAGE_DATASTORE_PATH="/ubuntu-22.04-server-cloudimg-amd64.vmdk" +VSPHERE_FOLDER="//vm//mcc" +VSPHERE_SERVER_INSECURE="true" + +VSPHERE_NETWORK_LCM="//network/" +NETWORK_LCM_SUBNET=172.16.10.0/24 +NETWORK_LCM_GATEWAY=172.16.10.1 +NETWORK_LCM_RANGE=172.16.10.2-172.16.10.100 + +VSPHERE_NETWORK_OPENSTACK="//network/" +NETWORK_OPENSTACK_SUBNET=172.16.20.0/24 +NETWORK_OPENSTACK_GATEWAY=172.16.20.1 +NETWORK_OPENSTACK_RANGE=172.16.20.2-172.16.20.100 + +NTP_SERVERS=us.pool.ntp.org,pool.ntp.org +NAMESERVERS=8.8.8.8,8.8.4.4 +``` + +Proxy settings +-------------- + +``` +HTTP_PROXY="" +HTTPS_PROXY="" +NO_PROXY="" # should include vsphere fqdn and IP +PROXY_CA_CERTIFICATE_PATH="//certificate.pem" # in case of MITM proxy +``` + +Deploy MCC environment +---------------------- + +MCC environment deployment includes: + +* seed node setup +* creating and provisioning for management and child cluster machines +* deployment of MCC management cluster +* deployment of MCC child cluster: + * ceph deployment + * openstack deployment + +Each stage can be run separately. See `./deploy.sh help` for details. + +To deploy whole env with one command: + +```./deploy.sh all``` + diff --git a/bin/prepare_network.py b/bin/prepare_network.py new file mode 100644 index 0000000..17ad852 --- /dev/null +++ b/bin/prepare_network.py @@ -0,0 +1,66 @@ +import ipaddress +import sys + +print(sys.argv) + +if len(sys.argv) != 4: + raise Exception("prepare_network.py requires exactly 3 arguments") + +net_type = sys.argv[1] +net_range = sys.argv[2] +out_file = sys.argv[3] + +net_required_ranges = { + 'pxe': { + 'NETWORK_PXE_BRIDGE_IP': 1, + 'NETWORK_PXE_DHCP_RANGE': 10, + 'NETWORK_PXE_STATIC_RANGE_MGMT': 3, + 'NETWORK_PXE_METALLB_RANGE': 5 + }, + 'lcm': { + 'NETWORK_LCM_SEED_IP': 1, + 'NETWORK_LCM_MGMT_LB_HOST': 1, + 'NETWORK_LCM_METALLB_RANGE_MGMT': 15, + 'NETWORK_LCM_CHILD_LB_HOST': 1, + 'NETWORK_LCM_STATIC_RANGE_MGMT': 3, + 'NETWORK_LCM_METALLB_RANGE_CHILD': 7, + 'NETWORK_LCM_STATIC_RANGE_CHILD': 7, + 'NETWORK_LCM_METALLB_OPENSTACK_ADDRESS': 1 + } +} + +if not net_required_ranges.get(net_type, False): + raise Exception(f"unknown network type is provided: {net_type}") + +required_num_ips = 0 +for _, v in net_required_ranges[net_type].items(): + required_num_ips += v + +net_range_start, net_range_end = net_range.split('-')[0], net_range.split('-')[1] +ranges = ipaddress.summarize_address_range(ipaddress.IPv4Address(net_range_start), + ipaddress.IPv4Address(net_range_end)) +addresses = [] +for ir in ranges: + for ip in ir: + addresses.append(ip) + +if len(addresses) < required_num_ips: + raise Exception("Not enough IP addresses for deployment." + f"Required: {required_num_ips}. Provided: {len(addresses)}") + +result = {} +cur_index = 0 +for item, amount in net_required_ranges[net_type].items(): + if amount == 1: + result[item] = str(addresses[cur_index]) + else: + result[item] = str(f"{addresses[cur_index]}-{addresses[cur_index+amount-1]}") + + cur_index += amount + +f = open(out_file, 'w') +for k, v in result.items(): + f.write(f"export {k}={v}\n") +f.close() + +print(f"Ranges for {net_type} network were generated successfully") diff --git a/bin/prepare_seed_node.sh b/bin/prepare_seed_node.sh new file mode 100755 index 0000000..a9eb3a9 --- /dev/null +++ b/bin/prepare_seed_node.sh @@ -0,0 +1,147 @@ +#!/usr/bin/env bash + +set -eux + +script_dir="$(dirname "${BASH_SOURCE[0]}")" +prep_seed_node_env_file="${script_dir}/.prepare_seed_node.env" +if [ -f "${prep_seed_node_env_file}" ]; then + # shellcheck source=/dev/null + chmod +x "${prep_seed_node_env_file}" && source "${prep_seed_node_env_file}" +fi + +HTTP_PROXY="${HTTP_PROXY:=}" +HTTPS_PROXY="${HTTPS_PROXY:=}" +NO_PROXY="${NO_PROXY:=}" +PROXY_CA_CERTIFICATE_PATH="${PROXY_CA_CERTIFICATE_PATH:=}" + +MCC_CDN_REGION="${MCC_CDN_REGION:=}" +MCC_CDN_BASE_URL="${MCC_CDN_BASE_URL:=}" +MCC_RELEASES_URL="${MCC_RELEASES_URL:=}" +SEED_NODE_USER="${SEED_NODE_USER:="mcc-user"}" +MCC_VERSION="${MCC_VERSION:=}" + +kaas_release_yaml="" +releases_dir="kaas-bootstrap/releases" + +# fail fast +if [ "${MCC_CDN_REGION}" != "public" ] && [ -z "${MCC_VERSION}" ]; then + echo "Error: MCC_VERSION must be provided for non-public cdn region" + exit 1 +fi + +sudo mkdir -p /etc/docker/ +cat << EOF > daemon.json +{ + "default-address-pools": + [ + {"base":"10.200.0.0/16","size":24} + ], + "proxies": { + "http-proxy": "${HTTP_PROXY}", + "https-proxy": "${HTTPS_PROXY}", + "no-proxy": "${NO_PROXY}" + } +} +EOF +sudo mv daemon.json /etc/docker/daemon.json + +apt_cmd="DEBIAN_FRONTEND=noninteractive apt-get" +if [ -n "${HTTP_PROXY}" ] || [ -n "${HTTPS_PROXY}" ]; then + apt_cmd="http_proxy=${HTTP_PROXY} https_proxy=${HTTPS_PROXY} ${apt_cmd}" + if [ -n "${NO_PROXY}" ]; then + apt_cmd="no_proxy=${NO_PROXY} ${apt_cmd}" + fi + if [ -n "${PROXY_CA_CERTIFICATE_PATH}" ]; then + sudo cp "${PROXY_CA_CERTIFICATE_PATH}" /usr/local/share/ca-certificates/ + sudo update-ca-certificates + fi +fi +apt_cmd="sudo ${apt_cmd}" + +${apt_cmd} update +${apt_cmd} install \ + arping bridge-utils docker.io golang-cfssl ipmitool net-tools tar traceroute wget -y +sudo usermod -aG docker "${SEED_NODE_USER}" + +function get_kaas_release_yaml { + kaas_release_yaml="$(find "${releases_dir}/kaas" -name "*.yaml" -type f)" + # Sanity check: only one kaas release file should exist there + if [ "$(echo "${kaas_release_yaml}" | wc -l)" -ne "1" ]; then + echo "Error: more than one yaml file is found in kaas releases folder" + exit 1 + fi + + echo "${kaas_release_yaml}" +} + +wget_cmd=$(which wget) +if [ -z "${wget_cmd}" ]; then + echo "Error: wget command is not found" + exit 1 +fi +wget_cmd="${wget_cmd} --tries 5 --no-verbose --show-progress --waitretry=15 --retry-connrefused" + +if [ -n "${HTTPS_PROXY}" ] || [ -n "${HTTP_PROXY}" ]; then + wget_proxy_optons="-e use_proxy=yes" + if [ -n "${HTTPS_PROXY}" ]; then + wget_proxy_optons="${wget_proxy_optons} -e https_proxy=${HTTPS_PROXY}" + fi + if [ -n "${HTTP_PROXY}" ]; then + wget_proxy_optons="${wget_proxy_optons} -e http_proxy=${HTTP_PROXY}" + fi + if [ -n "${NO_PROXY}" ]; then + wget_proxy_optons="${wget_proxy_optons} -e no_proxy=${NO_PROXY}" + fi + if [ -n "${PROXY_CA_CERTIFICATE_PATH}" ]; then + wget_proxy_optons="${wget_proxy_optons} --ca-certificate=${PROXY_CA_CERTIFICATE_PATH}" + fi + wget_cmd="${wget_cmd} ${wget_proxy_optons}" +fi + +yq_bin=$(which yq || true) +if [ -z "${yq_bin}" ]; then + os_tag=$(uname -s) + yq_bin_url="https://github.com/mikefarah/yq/releases/download/v${YQ_VERSION}/yq_${os_tag}_amd64" + yq_bin="/home/${SEED_NODE_USER}/yq" + ${wget_cmd} -O "${yq_bin}" "${yq_bin_url}" + chmod a+x "${yq_bin}" +fi + +if [ "${MCC_CDN_REGION}" == "public" ]; then + ${wget_cmd} https://binary.mirantis.com/releases/get_container_cloud.sh + chmod a+x get_container_cloud.sh + ./get_container_cloud.sh +else + kaas_release_yaml="kaas/${MCC_VERSION}.yaml" + mkdir -p ${releases_dir}/{kaas,cluster} + + pushd "${releases_dir}" || exit 1 + + # Donwload kaas release + ${wget_cmd} "${MCC_RELEASES_URL}/releases/${kaas_release_yaml}" -O "${kaas_release_yaml}" + + # Download cluster releases + for cr in $(${yq_bin} eval '.spec.supportedClusterReleases[].version' "${kaas_release_yaml}"); do + cr_file="cluster/${cr}.yaml" + ${wget_cmd} "${MCC_RELEASES_URL}/releases/${cr_file}" -O "${cr_file}" + done + + bootstrap_version="$(${yq_bin} eval '.spec.bootstrap.version' "${kaas_release_yaml}")" + + popd || exit 1 + + bootstrap_tarball_url="${MCC_CDN_BASE_URL}/core/bin/bootstrap-linux-${bootstrap_version}.tar.gz" + ${wget_cmd} --show-progress "${bootstrap_tarball_url}" + tar -xzf "$(basename "${bootstrap_tarball_url}")" -C kaas-bootstrap +fi + +if [ -z "${kaas_release_yaml}" ]; then + kaas_release_yaml=$(get_kaas_release_yaml) +fi +if [ -z "${MCC_VERSION}" ]; then + mcc_version="$(${yq_bin} eval '.spec.version' "${kaas_release_yaml}")" + # Return kaas version + echo "${mcc_version}" > "${script_dir}/mcc_version" +fi + +echo "export PATH=\$PATH:/home/${SEED_NODE_USER}/kaas-bootstrap/bin" >> "/home/${SEED_NODE_USER}/.bashrc" diff --git a/bin/render_template.py b/bin/render_template.py new file mode 100644 index 0000000..45d835c --- /dev/null +++ b/bin/render_template.py @@ -0,0 +1,5 @@ +import os +import sys +import jinja2 + +sys.stdout.write(jinja2.Template(sys.stdin.read()).render(os.environ)) diff --git a/bin/requirements.txt b/bin/requirements.txt new file mode 100644 index 0000000..8dc27a5 --- /dev/null +++ b/bin/requirements.txt @@ -0,0 +1,2 @@ +Jinja2==3.1.4 +ipaddress==1.0.23 diff --git a/deploy.sh b/deploy.sh new file mode 100755 index 0000000..c76f7f6 --- /dev/null +++ b/deploy.sh @@ -0,0 +1,1569 @@ +#!/usr/bin/env bash + +set -eou pipefail + +# FIXME: stdout redirect breaks `read` logic +#exec > >(logger -s -t "$(basename "$0")") 2>&1 + +: "${MCC_DEMO_DEBUG:=}" +script_dir="$(dirname "${BASH_SOURCE[0]}")" +: "${ENV_FILE:="${script_dir}/.deploy.env"}" +work_dir="${script_dir}/.workdir" +mcc_version_file="${work_dir}/.mcc_version" +virtualenv_dir="${work_dir}/venv" +mkdir -p "${work_dir}" + +# Proxy variables +export HTTP_PROXY="${HTTP_PROXY:=}" +export HTTPS_PROXY="${HTTPS_PROXY:=}" +export NO_PROXY="${NO_PROXY:=}" +export PROXY_CA_CERTIFICATE_PATH="${PROXY_CA_CERTIFICATE_PATH:=}" + +# YQ variables +: "${YQ_VERSION:="4.13.0"}" + +function _print_header { + echo "########## Running stage: ${1} ##########" +} + +function set_vars { + _print_header "${FUNCNAME[0]}" + # Vsphere variables + collect_vsphere_vars + + # Seed node variables + export SEED_NODE_USER="${SEED_NODE_USER:="mcc-user"}" + export SEED_NODE_PXE_BRIDGE="${SEED_NODE_PXE_BRIDGE:="br0"}" + : "${SEED_NODE_CPU_NUM:=8}" + : "${SEED_NODE_MEMORY_MB:=16384}" + : "${SEED_NODE_DISK_SIZE:=30GiB}" + + # Network variables + _prepare_pxe_net_vars + _prepare_lcm_net_vars + _prepare_openstack_net_vars + + if [ -n "${HTTP_PROXY}" ] || [ -n "${HTTPS_PROXY}" ]; then + if ! [[ "${NO_PROXY}" =~ ${NETWORK_PXE_SUBNET} ]]; then + NO_PROXY="${NO_PROXY},${NETWORK_PXE_SUBNET}" + fi + if ! [[ "${NO_PROXY}" =~ ${NETWORK_LCM_SUBNET} ]]; then + NO_PROXY="${NO_PROXY},${NETWORK_LCM_SUBNET}" + fi + if ! [[ "${NO_PROXY}" =~ ${NETWORK_OPENSTACK_SUBNET} ]]; then + NO_PROXY="${NO_PROXY},${NETWORK_OPENSTACK_SUBNET}" + fi + fi + + export NTP_SERVERS="${NTP_SERVERS:=}" + export NAMESERVERS="${NAMESERVERS:=}" + if [ -z "${NAMESERVERS}" ]; then + echo "Error: NAMESERVERS must be provided" + exit 1 + fi + + # Machine variables + # Management cluster machines + : "${MGMT_MACHINES_CPU_NUM:=8}" + : "${MGMT_MACHINES_MEMORY_MB:=32768}" + : "${MGMT_MACHINES_DISK_SIZE:=150GiB}" + # Child cluster machines + : "${CHILD_WORKER_MACHINES_CPU_NUM:=8}" + : "${CHILD_CONTROL_MACHINES_CPU_NUM:=8}" + : "${CHILD_WORKER_MACHINES_MEMORY_MB:=24576}" + : "${CHILD_CONTROL_MACHINES_MEMORY_MB:=32768}" + # root disk + : "${CHILD_MACHINES_ROOT_DISK_SIZE:=80GiB}" + # Ceph disk + : "${CHILD_MACHINES_CEPH_DISK_SIZE:=40GiB}" + + # SSH variables + : "${SSH_PRIVATE_KEY_PATH:="${work_dir}/mcc_id_rsa"}" + : "${SSH_PUBLIC_KEY_PATH:="${work_dir}/mcc_id_rsa.pub"}" + + # Govc variables + : "${GOVC_FOLDER:="${script_dir}/bin"}" + : "${GOVC_BIN:="${GOVC_FOLDER}/govc"}" + : "${GOVC_BIN_VERSION:="v0.43.0"}" + : "${GOVC_BIN_OS_TAG:=}" + : "${GOVC_BIN_OS_ARCH:=}" + + # Timeout variables + : "${MGMT_CLUSTER_READINESS_TIMEOUT:=90}" + : "${CHILD_CLUSTER_READINESS_TIMEOUT:=90}" + : "${CHILD_CEPH_CLUSTER_TIMEOUT:=20}" + : "${OSDPL_APPLIED_TIMEOUT:=60}" + : "${OPENSTACK_READINESS_TIMEOUT:=90}" + : "${BMH_READINESS_TIMEOUT:=30}" + : "${IRONIC_DEPLOYMENT_TIMEOUT:=30}" + + # MCC global variables + export MCC_CDN_REGION="${MCC_CDN_REGION:="public"}" + export MCC_CDN_BASE_URL="${MCC_CDN_BASE_URL:=}" + export MCC_RELEASES_URL="${MCC_RELEASES_URL:=}" + + if [ -z "${MCC_CDN_BASE_URL}" ]; then + case "${MCC_CDN_REGION}" in + internal-ci ) + MCC_CDN_BASE_URL="https://artifactory.mcp.mirantis.net/artifactory/binary-dev-kaas-virtual" + ;; + internal-eu ) + MCC_CDN_BASE_URL="https://artifactory-eu.mcp.mirantis.net/artifactory/binary-dev-kaas-virtual" + ;; + public-ci ) + MCC_CDN_BASE_URL="https://binary-dev-kaas-virtual.mcp.mirantis.com" + ;; + public ) + MCC_CDN_BASE_URL="https://binary.mirantis.com" + ;; + * ) + die "Unknown CDN region: ${MCC_CDN_REGION}" + ;; + esac + fi + + # MCC management cluster variables + export MCC_MGMT_CLUSTER_NAME="${MCC_MGMT_CLUSTER_NAME:="mcc-mgmt"}" + export MCC_SERVICEUSER_PASSWORD="${MCC_SERVICEUSER_PASSWORD:=}" + : "${MCC_LICENSE_FILE:="mirantis.lic"}" + + # MCC child cluster variables + export MCC_CHILD_CLUSTER_NAME="${MCC_CHILD_CLUSTER_NAME:="mcc-child"}" + export MCC_CHILD_CLUSTER_NAMESPACE="${MCC_CHILD_CLUSTER_NAMESPACE:="child-ns"}" + export MCC_CHILD_CLUSTER_RELEASE="${MCC_CHILD_CLUSTER_RELEASE:=""}" + export MCC_CHILD_OPENSTACK_RELEASE="${MCC_CHILD_OPENSTACK_RELEASE:="antelope"}" + export MCC_OPENSTACK_PUBLIC_DOMAIN="${MCC_OPENSTACK_PUBLIC_DOMAIN:="it.just.works"}" + + # ================================================================================== + # Internal variables mostly: + + # Skip step variables + : "${SKIP_GOVC_DOWNLOAD:=}" + : "${SKIP_VSPHERE_VMS_CREATION:=}" + : "${SKIP_SEED_NODE_CREATION:="false"}" + : "${SKIP_SEED_NODE_SETUP:="false"}" + + if [[ -z "${NETWORK_LCM_SEED_IP}" ]] && [[ "${SKIP_SEED_NODE_CREATION}" =~ [Tt]rue ]]; then + echo "Error: NETWORK_LCM_SEED_IP must be set if SKIP_SEED_NODE_CREATION is set to true" + exit 1 + fi + + # MCC_VERSION should be used for internal deployments or for standalone stages run only + : "${MCC_VERSION:=}" + : "${APPLY_COREDNS_HACK:="true"}" + : "${VM_NAME_PREFIX:=""}" +} + +function _prepare_pxe_net_vars() { + local out_file="${work_dir}/pxe-net.out" + export NETWORK_PXE_SUBNET="${NETWORK_PXE_SUBNET:="10.0.0.0/26"}" + export NETWORK_PXE_RANGE="${NETWORK_PXE_RANGE:="10.0.0.2-10.0.0.60"}" + python_exec "${script_dir}/bin/prepare_network.py" pxe "${NETWORK_PXE_RANGE}" "${out_file}" + # shellcheck source=/dev/null + chmod +x "${out_file}" && source "${out_file}" + export NETWORK_PXE_BRIDGE_IP \ + NETWORK_PXE_DHCP_RANGE \ + NETWORK_PXE_STATIC_RANGE_MGMT \ + NETWORK_PXE_METALLB_RANGE +} + +function _prepare_lcm_net_vars() { + local out_file="${work_dir}/lcm-net.out" + export NETWORK_LCM_SUBNET="${NETWORK_LCM_SUBNET:=}" + export NETWORK_LCM_GATEWAY="${NETWORK_LCM_GATEWAY:=}" + + if [ -z "${NETWORK_LCM_SUBNET}" ] || \ + [ -z "${NETWORK_LCM_GATEWAY}" ]; then + echo "Error: some LCM network variables are not set, but mandatory: + NETWORK_LCM_SUBNET: ${NETWORK_LCM_SUBNET} + NETWORK_LCM_GATEWAY: ${NETWORK_LCM_GATEWAY} + " + fi + + export NETWORK_LCM_RANGE="${NETWORK_LCM_RANGE:=}" + if [ -n "${NETWORK_LCM_RANGE}" ]; then + python_exec "${script_dir}/bin/prepare_network.py" lcm "${NETWORK_LCM_RANGE}" "${out_file}" + # shellcheck source=/dev/null + chmod +x "${out_file}" && source "${out_file}" + else + # If NETWORK_LCM_RANGE is not provided, we expect to get more detailed input + if [ -z "${NETWORK_LCM_SEED_IP}" ] || \ + [ -z "${NETWORK_LCM_MGMT_LB_HOST}" ] || \ + [ -z "${NETWORK_LCM_CHILD_LB_HOST}" ] || \ + [ -z "${NETWORK_LCM_METALLB_RANGE_MGMT}" ] || \ + [ -z "${NETWORK_LCM_STATIC_RANGE_MGMT}" ] || \ + [ -z "${NETWORK_LCM_METALLB_RANGE_CHILD}" ] || \ + [ -z "${NETWORK_LCM_STATIC_RANGE_CHILD}" ] || \ + [ -z "${NETWORK_LCM_METALLB_OPENSTACK_ADDRESS}" ]; then + echo "Error: some LCM network variables are not set, but mandatory: + NETWORK_LCM_SUBNET: ${NETWORK_LCM_SUBNET} + NETWORK_LCM_GATEWAY: ${NETWORK_LCM_GATEWAY} + NETWORK_LCM_MGMT_LB_HOST: ${NETWORK_LCM_MGMT_LB_HOST} + NETWORK_LCM_MGMT_LB_HOST: ${NETWORK_LCM_MGMT_LB_HOST} + NETWORK_LCM_CHILD_LB_HOST: ${NETWORK_LCM_CHILD_LB_HOST} + NETWORK_LCM_METALLB_RANGE_MGMT: ${NETWORK_LCM_METALLB_RANGE_MGMT} + NETWORK_LCM_METALLB_RANGE_CHILD: ${NETWORK_LCM_METALLB_RANGE_CHILD} + NETWORK_LCM_STATIC_RANGE_CHILD: ${NETWORK_LCM_STATIC_RANGE_CHILD} + NETWORK_LCM_METALLB_OPENSTACK_ADDRESS: ${NETWORK_LCM_METALLB_OPENSTACK_ADDRESS} + " + exit 1 + fi + fi + + export NETWORK_LCM_SEED_IP="${NETWORK_LCM_SEED_IP:=}" + export NETWORK_LCM_MGMT_LB_HOST="${NETWORK_LCM_MGMT_LB_HOST:=}" + export NETWORK_LCM_CHILD_LB_HOST="${NETWORK_LCM_CHILD_LB_HOST:=}" + export NETWORK_LCM_METALLB_RANGE_MGMT="${NETWORK_LCM_METALLB_RANGE_MGMT:=}" + export NETWORK_LCM_STATIC_RANGE_MGMT="${NETWORK_LCM_STATIC_RANGE_MGMT:=}" + export NETWORK_LCM_METALLB_RANGE_CHILD="${NETWORK_LCM_METALLB_RANGE_CHILD:=}" + export NETWORK_LCM_STATIC_RANGE_CHILD="${NETWORK_LCM_STATIC_RANGE_CHILD:=}" + export NETWORK_LCM_METALLB_OPENSTACK_ADDRESS="${NETWORK_LCM_METALLB_OPENSTACK_ADDRESS:=}" +} + +function _prepare_openstack_net_vars() { + export NETWORK_OPENSTACK_SUBNET="${NETWORK_OPENSTACK_SUBNET:=}" + export NETWORK_OPENSTACK_GATEWAY="${NETWORK_OPENSTACK_GATEWAY:=}" + export NETWORK_OPENSTACK_RANGE="${NETWORK_OPENSTACK_RANGE:=}" + + if [ -z "${NETWORK_OPENSTACK_SUBNET}" ] || \ + [ -z "${NETWORK_OPENSTACK_GATEWAY}" ] || \ + [ -z "${NETWORK_OPENSTACK_RANGE}" ]; then + echo "Error: some Openstack network variables are not set, but mandatory: + NETWORK_OPENSTACK_SUBNET: ${NETWORK_OPENSTACK_SUBNET} + NETWORK_OPENSTACK_GATEWAY: ${NETWORK_OPENSTACK_GATEWAY} + NETWORK_OPENSTACK_RANGE: ${NETWORK_OPENSTACK_RANGE} + " + exit 1 + fi + + export network_openstack_range_start network_openstack_range_end + network_openstack_range_start="$(echo "${NETWORK_OPENSTACK_RANGE}" | cut -d '-' -f 1)" + network_openstack_range_end="$(echo "${NETWORK_OPENSTACK_RANGE}" | cut -d '-' -f 2)" +} + +function usage() { + echo "Usage: deploy.sh" + echo "" + echo "Available commands:" + echo "" + echo " all starts MCC environment deployment:" + echo " 1. create_seed_vm" + echo " 2. create_mgmt_cluster_vms" + echo " 3. create_child_cluster_vms" + echo " 4. prepare_mgmt_cluster_templates" + echo " 5. setup_bootstrap_cluster" + echo " 6. prepare_child_cluster_templates" + echo " 7. deploy_mgmt_cluster" + echo " 8. deploy_child_cluster" + echo " 9. deploy_openstack" + echo " 10. apply_coredns_hack" + echo " create_seed_vm creates seed node VM" + echo " setup_bootstrap_cluster creates a Kind bootstrap cluster on seed node" + echo " create_mgmt_cluster_vms creates a set of VMs for management cluster" + echo " create_child_cluster_vms creates a set if VMs for child cluster" + echo " prepare_mgmt_cluster_templates renders k8s objects templates for management cluster deployment. Result is stored in ${work_dir}/templates/management" + echo " prepare_child_cluster_templates renders k8s objects templates for child cluster deployment. Result is stored in ${work_dir}/templates/child" + echo " deploy_mgmt_cluster deploys management cluster by applying rendered k8s objects YAMLs (after 'prepare_mgmt_cluster_templates' action)" + echo " deploy_child_cluster deploys child cluster by applying rendered k8s objects YAMLs (after 'prepare_child_cluster_templates' action)" + echo " apply_coredns_hack apply hack for coredns on child cluster. Note: custom Openstack hostnames have to be resolved inside child cluster," + echo " otherwise the Openstack endpoints are not accessible. If user adds Openstack endpoints to the DNS, the hack is not needed" + echo " cleanup cleanup VMs from the provided folder on Vsphere" + echo " cleanup_bootstrap_cluster cleanup bootstrap cluster from seed node. Useful when management cluster deployment" + echo " is required to be restarted from scratch" + echo " help shows this help message" + echo "" + echo "Required binaries:" + echo " curl" + echo " jq" + echo " mktemp" + echo " python3" + echo " scp" + echo " ssh" + echo " ssh-keygen" + echo " tar" + echo " virtualenv" + echo "" + echo "Supported environment variables:" + echo "" + echo " Common variables:" + echo " MCC_DEMO_DEBUG whether to enable debug logging for scripts" + echo " ENV_FILE file with environment vairalbes for script" + echo "" + echo " Vsphere variables:" + echo " VSPHERE_SERVER Vsphere server fqdn or ip" + echo " VSPHERE_SERVER_PORT Port to access Vsphere API. Default is 443" + echo " VSPHERE_SERVER_PROTOCOL Protocol to access Vsphere API. Default is https" + echo " VSPHERE_SERVER_INSECURE Whether to ignore Vsphere server ssl certificate" + echo " VSPHERE_USERNAME User name to access Vsphere API" + echo " VSPHERE_PASSWORD User password to access Vsphere API" + echo " VSPHERE_DATACENTER Vsphere datacenter name" + echo " VSPHERE_DATASTORE Vsphere datastore full path (preferred) or name. Example //datastore/" + echo " VSPHERE_DATASTORE_MGMT_CLUSTER Vsphere datastore full path (preferred) or name for management cluster machines. Example //datastore/" + echo " VSPHERE_DATASTORE_CHILD_CLUSTER Vsphere datastore full path (preferred) or name for child cluster machines. Example //datastore/" + echo " VSPHERE_NETWORK_LCM Vsphere network full path (preferred) or name fosr MCC. Example //network/" + echo " VSPHERE_NETWORK_OPENSTACK Vsphere network full path (preferred) or name for Openstack. Example //network/" + echo " VSPHERE_RESOURCE_POOL Vsphere resource pool full path (preferred) or name. Example //host//Resources/" + echo " VSPHERE_FOLDER Vsphere folder pool full path (preferred) or name to place VMs. Defaults to //vm/mcc" + echo " VSPHERE_VMDK_IMAGE_DATASTORE_PATH Path to Ubuntu 22.04 vmdk image on datastore (preferred over VSPHERE_VM_TEMPLATE)" + echo " VSPHERE_VMDK_IMAGE_LOCAL_PATH Local path to Ubuntu 22.04 vmdk image (preferred over VSPHERE_VM_TEMPLATE). Image will be uploaded to datastore by the script" + echo " VSPHERE_VM_TEMPLATE Full path (preferred) or name of Ubuntu 22.04 VM template on Vsphere" + echo "" + echo " Network variables:" + echo "" + echo " NAMESERVERS Comma-separated list of nameservers for MCC clusters" + echo " NTP_SERVERS Comma-separated list of ntp servers for MCC clusters" + echo "" + echo " LCM (MCC control) network:" + echo " NETWORK_LCM_SUBNET CIDR of LCM network. Example 172.16.10.0/24" + echo " NETWORK_LCM_GATEWAY Gateway of LCM network" + echo " NETWORK_LCM_RANGE Range from LCM network which can be used for MCC deployment. Range will be automatically splitted" + echo " for MCC need. Minimal required number of addresses - 40. Example 172.16.10.10-172.16.10.50." + echo " If you want to manually allocated IPs use parameters NETWORK_LCM variables below" + echo "" + echo " NETWORK_LCM_SEED_IP Seed node address" + echo " NETWORK_LCM_MGMT_LB_HOST Load balancer address for MCC management cluster" + echo " NETWORK_LCM_CHILD_LB_HOST Load balancer address for MCC child cluster" + echo " NETWORK_LCM_METALLB_RANGE_MGMT Metallb address range for MCC management cluster" + echo " NETWORK_LCM_STATIC_RANGE_MGMT Address range for MCC management cluster nodes" + echo " NETWORK_LCM_METALLB_RANGE_CHILD Metallb address range for MCC child cluster" + echo " NETWORK_LCM_STATIC_RANGE_CHILD Address range for MCC child cluster nodes" + echo " NETWORK_LCM_METALLB_OPENSTACK_ADDRESS Adress for Openstack services" + echo "" + echo " Openstack network:" + echo " NETWORK_OPENSTACK_SUBNET CIDR of Openstack network. Example 172.16.20.0/24" + echo " NETWORK_OPENSTACK_GATEWAY Gateway of Openstack network" + echo " NETWORK_OPENSTACK_RANGE Range from Openstack network. Minimal required number of addresses - 5. Example 172.16.20.10-172.16.20.50." + echo "" + echo " PXE network (override only if defaults are not suitable):" + echo " NETWORK_PXE_SUBNET CIDR of Openstack network. Default is 10.0.0.0/26" + echo " NETWORK_PXE_RANGE Range from PXE network. Default is 10.0.0.2-10.0.0.60" + echo "" + echo " Machine variables:" + echo " SEED_NODE_CPU_NUM Seed node CPU num. Default is 8" + echo " SEED_NODE_MEMORY_MB Seed node RAM in MB. Default is 16384" + echo " SEED_NODE_DISK_SIZE Seed node disk size. Default is 30GiB" + echo " SEED_NODE_USER User name to access seed node via ssh. Default is 'mcc-user'" + echo " SEED_NODE_PXE_BRIDGE PXE bridge name for MCC setup. Default is 'br0'" + echo " MGMT_MACHINES_MEMORY_MB Management cluster machines RAM in MB. Default is 32768" + echo " MGMT_MACHINES_CPU_NUM Management cluster machines CPU. Default is 8" + echo " MGMT_MACHINES_DISK_SIZE Management cluster machines disk size. Default is 150GiB" + echo " CHILD_CONTROL_MACHINES_CPU_NUM Child cluster control machines CPU num. Default is 8" + echo " CHILD_CONTROL_MACHINES_MEMORY_MB Child cluster control machines RAM in MB. Default is 32768" + echo " CHILD_WORKER_MACHINES_CPU_NUM Child cluster worker machines CPU num. Default is 8" + echo " CHILD_WORKER_MACHINES_MEMORY_MB Child cluster worker machines RAM in MB. Default is 24576" + echo " CHILD_MACHINES_ROOT_DISK_SIZE Child cluster machines disk size for /root partition. Default is 80GiB" + echo " CHILD_MACHINES_CEPH_DISK_SIZE Child cluster machines disk size for ceph. Default is 40GiB" + echo "" + echo " MCC variables:" + echo " MCC_MGMT_CLUSTER_NAME Name for MCC management cluster. Default is mcc-mgmt" + echo " MCC_SERVICEUSER_PASSWORD 'serviceuser' password to access MCC management cluster web UI. Default is auto-generated" + echo " MCC_LICENSE_FILE Local path to MCC licence file" + echo " MCC_CHILD_CLUSTER_NAME Name for MCC child cluster. Default is mcc-child" + echo " MCC_CHILD_CLUSTER_NAMESPACE Namespace where MCC child cluster is going to be created. Defaults is child-ns" + echo " MCC_CHILD_CLUSTER_RELEASE Cluster release for MCC child cluster. Default is auto-selected" + echo " MCC_CHILD_OPENSTACK_RELEASE Openstack release for MCC child cluster. Default is auto-selected" + echo " MCC_OPENSTACK_PUBLIC_DOMAIN Public domain for Openstack" + echo "" + echo " Proxy variables:" + echo " HTTP_PROXY HTTP proxy" + echo " HTTPS_PROXY HTTPS proxy" + echo " NO_PROXY Comma-separated list of IPs/FQDNs which should be accessible without proxy" + echo " PROXY_CA_CERTIFICATE_PATH Proxy certificate path (for MITM proxy)" + echo "" + echo " SSH variables" + echo " SSH_PRIVATE_KEY_PATH Path to private ssh key to access Seed node and MCC cluster machines." + echo " If empty, the new ssh key pair will be generated" + echo " SSH_PUBLIC_KEY_PATH Path to public ssh key" + echo "" + echo " Govc variables:" + echo " GOVC_BIN Path to exiting govc binary. Leave empty, so the binary will be downloaded by script" + echo " GOVC_BIN_VERSION Govc binary version" + echo " GOVC_BIN_OS_TAG Govc OS tag (darwin,linux)" + echo " GOVC_BIN_OS_ARCH Govc OS arch (x86_64,arm64)" + echo " GOVC_FOLDER Folder where govc binary is downloaded" + echo "" + echo " Timeout variables (minutes):" + echo " MGMT_CLUSTER_READINESS_TIMEOUT Time to wait for mgmt cluster object readiness. Default: 90 (min)" + echo " MCC artifacts (container images, helm charts etc.) may take more time to download" + echo " on a poor Internet connection (f.e. via proxy)" + echo " CHILD_CLUSTER_READINESS_TIMEOUT Time to wait for child cluster object readiness. Default: 90 (min)" + echo " CHILD_CEPH_CLUSTER_TIMEOUT Time to wait for kaascephcluster object readiness on child. Default: 20 (min)" + echo " OSDPL_APPLIED_TIMEOUT Time to wait for osdpl object state 'APPLIED'. Default: 60 (min)" + echo " OPENSTACK_READINESS_TIMEOUT Time to wait for the all openstack components readiness. Default: 90 (min)" + echo " BMH_READINESS_TIMEOUT Time to wait for all baremetalhosts (per cluster) became 'available' or 'provisioned'. Default: 30 (min)" + echo " IRONIC_DEPLOYMENT_TIMEOUT Time to wait for ironic deployment readiness. Default: 30 (min)" + echo " Provisioning artifacts (OS images, kernels, initramfs etc) may take more time to download" + echo " on a poor Internet connection (f.e. via proxy)" + echo "" +} + +function verify_binaries { + case "$(uname -s)" in + Linux*) base64_encode_cmd="base64 -w 0";; + Darwin*) base64_encode_cmd="base64";; + *) die "Unexpected system: $(uname -s)" + esac + curl_bin=$(which curl) + ssh_bin=$(which ssh) + ssh_bin="${ssh_bin} -o LogLevel=ERROR -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" + scp_bin=$(which scp) + scp_bin="${scp_bin} -o LogLevel=ERROR -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" + ssh_keygen_bin=$(which ssh-keygen) + tar_bin=$(which tar) + mktemp_bin=$(which mktemp) + jq_bin=$(which jq) + + virtualenv_bin=$(which virtualenv) + ${virtualenv_bin} "${virtualenv_dir}" + set +u + # shellcheck source=/dev/null + source "${virtualenv_dir}/bin/activate" + set -u + if { [ -n "${HTTP_PROXY}" ] || [ -n "${HTTPS_PROXY}" ] ;} && [ -n "${PROXY_CA_CERTIFICATE_PATH}" ]; then + export PIP_CERT="${PROXY_CA_CERTIFICATE_PATH}" + fi + pip3 install -r "${script_dir}/bin/requirements.txt" + deactivate + + echo "Basic binaries have been verified" +} + +function python_exec { + # shellcheck source=/dev/null + source "${virtualenv_dir}/bin/activate" + python3 "$@" + deactivate +} + +function render_template { + python_exec "${script_dir}/bin/render_template.py" +} + +function collect_vsphere_vars { + export VSPHERE_SERVER="${VSPHERE_SERVER:=}" + export VSPHERE_SERVER_PORT="${VSPHERE_SERVER_PORT:="443"}" + export VSPHERE_SERVER_PROTOCOL="${VSPHERE_SERVER_PROTOCOL:="https"}" + export VSPHERE_SERVER_INSECURE="${VSPHERE_SERVER_INSECURE:="false"}" + export VSPHERE_USERNAME="${VSPHERE_USERNAME:=}" + export VSPHERE_PASSWORD="${VSPHERE_PASSWORD:=}" + : "${VSPHERE_DATACENTER:=}" + : "${VSPHERE_DATASTORE:=}" + : "${VSPHERE_DATASTORE_MGMT_CLUSTER:="${VSPHERE_DATASTORE}"}" + : "${VSPHERE_DATASTORE_CHILD_CLUSTER:="${VSPHERE_DATASTORE}"}" + : "${VSPHERE_NETWORK_LCM:=}" + : "${VSPHERE_NETWORK_OPENSTACK:=}" + : "${VSPHERE_RESOURCE_POOL:=}" + : "${VSPHERE_FOLDER:="${VSPHERE_DATACENTER}/vm/mcc"}" + : "${VSPHERE_VM_TEMPLATE:=}" + : "${VSPHERE_VMDK_IMAGE_LOCAL_PATH:=}" + : "${VSPHERE_VMDK_IMAGE_DATASTORE_PATH:=}" + + if [ -z "${VSPHERE_SERVER}" ] \ + || [ -z "${VSPHERE_USERNAME}" ] \ + || [ -z "${VSPHERE_PASSWORD}" ] \ + || [ -z "${VSPHERE_DATACENTER}" ] \ + || [ -z "${VSPHERE_NETWORK_LCM}" ] \ + || [ -z "${VSPHERE_NETWORK_OPENSTACK}" ] \ + || [ -z "${VSPHERE_RESOURCE_POOL}" ]; then + echo "Error: some vsphere vars are not provided:" + echo " VSPHERE_SERVER: ${VSPHERE_SERVER}" + echo " VSPHERE_USERNAME: ${VSPHERE_USERNAME}" + echo " VSPHERE_PASSWORD: ${VSPHERE_PASSWORD}" + echo " VSPHERE_DATACENTER: ${VSPHERE_DATACENTER}" + echo " VSPHERE_NETWORK_LCM: ${VSPHERE_NETWORK_LCM}" + echo " VSPHERE_NETWORK_OPENSTACK: ${VSPHERE_NETWORK_OPENSTACK}" + echo " VSPHERE_RESOURCE_POOL: ${VSPHERE_RESOURCE_POOL}" + echo " VSPHERE_VM_TEMPLATE: ${VSPHERE_VM_TEMPLATE}" + exit 1 + fi + if [ -z "${VSPHERE_VMDK_IMAGE_DATASTORE_PATH}" ] \ + && [ -z "${VSPHERE_VMDK_IMAGE_LOCAL_PATH}" ] \ + && [ -z "${VSPHERE_VM_TEMPLATE}" ]; then + echo "Error: Vsphere VM image has to be provided via one of the following variables: + VSPHERE_VMDK_IMAGE_DATASTORE_PATH + VSPHERE_VMDK_IMAGE_LOCAL_PATH + VSPHERE_VM_TEMPLATE" + exit 1 + fi + + if [ -z "${VSPHERE_DATASTORE}" ]; then + if [ -z "${VSPHERE_DATASTORE_MGMT_CLUSTER}" ]; then + echo "VSPHERE_DATASTORE_MGMT_CLUSTER or VSPHERE_DATASTORE must be provided" + exit 1 + fi + if [ -z "${VSPHERE_DATASTORE_CHILD_CLUSTER}" ]; then + echo "VSPHERE_DATASTORE_CHILD_CLUSTER or VSPHERE_DATASTORE must be provided" + exit 1 + fi + fi + + echo "Vsphere variables have been verified" + + # Ensure some vsphere objects are provided by full path, not just name + if ! [[ "${VSPHERE_FOLDER}" =~ ^/.* ]]; then + VSPHERE_FOLDER="/${VSPHERE_DATACENTER}/vm/${VSPHERE_FOLDER}" + fi + + if ! [[ "${VSPHERE_NETWORK_LCM}" =~ ^/.* ]]; then + VSPHERE_NETWORK_LCM="/${VSPHERE_DATACENTER}/network/${VSPHERE_NETWORK_LCM}" + fi + + if ! [[ "${VSPHERE_NETWORK_OPENSTACK}" =~ ^/.* ]]; then + VSPHERE_NETWORK_OPENSTACK="/${VSPHERE_DATACENTER}/network/${VSPHERE_NETWORK_OPENSTACK}" + fi + + cat << EOF > "${work_dir}/govc.env" +#!/bin/bash +export GOVC_URL=${VSPHERE_SERVER_PROTOCOL}://${VSPHERE_SERVER}:${VSPHERE_SERVER_PORT} +export GOVC_USERNAME=${VSPHERE_USERNAME} +export GOVC_PASSWORD=${VSPHERE_PASSWORD} +EOF + if [[ "${VSPHERE_SERVER_INSECURE}" =~ [Tt]rue ]]; then + echo "export GOVC_INSECURE=true" >> "${work_dir}/govc.env" + fi + + # shellcheck source=/dev/null + chmod +x "${work_dir}/govc.env" && source "${work_dir}/govc.env" +} + +function _curl { + local curl_cmd="${curl_bin}" + if [ -n "${HTTP_PROXY}" ] || [ -n "${HTTPS_PROXY}" ]; then + if [ -n "${HTTP_PROXY}" ]; then + curl_cmd="${curl_bin} -x ${HTTP_PROXY}" + fi + if [ -n "${HTTPS_PROXY}" ]; then + curl_cmd="${curl_bin} -x ${HTTPS_PROXY}" + fi + if [ -n "${NO_PROXY}" ]; then + curl_cmd="${curl_cmd} --noproxy ${NO_PROXY}" + fi + if [ -n "${PROXY_CA_CERTIFICATE_PATH}" ]; then + curl_cmd="${curl_cmd} --cacert ${PROXY_CA_CERTIFICATE_PATH}" + fi + fi + + ${curl_cmd} "$@" +} + +function ensure_govc_lib { + if [[ "${SKIP_GOVC_DOWNLOAD}" =~ [Tt]rue ]]; then + if ! [ -f "${GOVC_BIN}" ]; then + echo "Error: govc binary download is skipped, but GOVC_BIN is not provided" + exit 1 + else + echo "GOVC is already in place: ${GOVC_BIN}" + return + fi + fi + + if [ -z "${GOVC_BIN_OS_TAG}" ]; then + GOVC_BIN_OS_TAG=$(uname -s) + fi + if [ -z "${GOVC_BIN_OS_ARCH}" ]; then + GOVC_BIN_OS_ARCH=$(uname -m) + fi + + local govc_bin_download_url govc_archive_name tempdir + govc_bin_download_url="https://github.com/vmware/govmomi/releases/download/${GOVC_BIN_VERSION}/govc_${GOVC_BIN_OS_TAG}_${GOVC_BIN_OS_ARCH}.tar.gz" + govc_archive_name=$(basename "${govc_bin_download_url}") + tempdir=$("${mktemp_bin}" -d) + + _curl -fL -o "${tempdir}/${govc_archive_name}" "${govc_bin_download_url}" + + ${tar_bin} xzf "${tempdir}/${govc_archive_name}" -C "${tempdir}" + mkdir -p "${GOVC_FOLDER}" + mv "${tempdir}/govc" "${GOVC_FOLDER}" + rm -rf "${tempdir}" +} + +function verify_vsphere_objects { + # Verify credentials + ${GOVC_BIN} about + # Verify objects: + # 1. Datastore + if [ -n "${VSPHERE_DATASTORE}" ]; then + ${GOVC_BIN} datastore.info "${VSPHERE_DATASTORE}" + fi + if [ -n "${VSPHERE_DATASTORE_MGMT_CLUSTER}" ] && [ "${VSPHERE_DATASTORE_MGMT_CLUSTER}" != "${VSPHERE_DATASTORE}" ]; then + ${GOVC_BIN} datastore.info "${VSPHERE_DATASTORE_MGMT_CLUSTER}" + fi + if [ -n "${VSPHERE_DATASTORE_CHILD_CLUSTER}" ] && [ "${VSPHERE_DATASTORE_CHILD_CLUSTER}" != "${VSPHERE_DATASTORE}" ]; then + ${GOVC_BIN} datastore.info "${VSPHERE_DATASTORE_CHILD_CLUSTER}" + fi + # 2. Networks + ${GOVC_BIN} object.collect "${VSPHERE_NETWORK_LCM}" + ${GOVC_BIN} object.collect "${VSPHERE_NETWORK_OPENSTACK}" + # 3. Resource pool + ${GOVC_BIN} pool.info "${VSPHERE_RESOURCE_POOL}" + # 4. Folder (create if does not exist) + ${GOVC_BIN} folder.create "${VSPHERE_FOLDER}" || true + # 5. VM image + if [ -n "${VSPHERE_VMDK_IMAGE_DATASTORE_PATH}" ]; then + ${GOVC_BIN} datastore.ls -ds="${VSPHERE_DATASTORE_MGMT_CLUSTER}" "${VSPHERE_VMDK_IMAGE_DATASTORE_PATH}" + elif [ -n "${VSPHERE_VMDK_IMAGE_LOCAL_PATH}" ]; then + if ! [[ "${VSPHERE_VMDK_IMAGE_LOCAL_PATH}" =~ .*\.vmdk ]]; then + echo "Error: only VMDK image is supported" + exit 1 + fi + + file "${VSPHERE_VMDK_IMAGE_LOCAL_PATH}" + else + ${GOVC_BIN} vm.info "${VSPHERE_VM_TEMPLATE}" + fi + + echo "Vsphere access has been verified" +} + +function verify_mcc_vars { + if [ "${MCC_CDN_REGION}" != "public" ] && [ -z "${MCC_RELEASES_URL}" ]; then + echo "Error: MCC_RELEASES_URL must be provided for non-public CDN region" + exit 1 + fi + if [ "${MCC_CDN_REGION}" != "public" ] && [ -z "${MCC_VERSION}" ]; then + echo "Error: MCC_VERSION must be provided for non-public CDN region" + exit 1 + fi + if [ -z "${MCC_LICENSE_FILE}" ] || ! [ -f "${MCC_LICENSE_FILE}" ]; then + echo "Error MCC_LICENSE_FILE is not found" + exit 1 + fi +} + +function _set_tmpl_file_vars { + # TODO: implement templating for userdata/metadata, e.g. jinja + seed_userdata_file="${work_dir}/userdata.yaml" + seed_userdata_file_tmpl="${script_dir}/userdata.yaml.tmpl" + seed_metadata_file="${work_dir}/metadata.yaml" + seed_metadata_file_tmpl="${script_dir}/metadata.yaml.tmpl" + seed_network_config_file="${work_dir}/network_config" + seed_network_config_file_tmpl="${script_dir}/network_config.tmpl" +} + +function _set_ssh_public_key_var { + export MCC_SSH_PUBLIC_KEY + MCC_SSH_PUBLIC_KEY=$(cat "${SSH_PUBLIC_KEY_PATH}") +} + +function prepare_ssh_key { + if ! [ -f "${SSH_PRIVATE_KEY_PATH}" ]; then + ${ssh_keygen_bin} -t rsa -f "${SSH_PRIVATE_KEY_PATH}" -P "" + fi + chmod 600 "${SSH_PRIVATE_KEY_PATH}" + + if ! [ -f "${SSH_PUBLIC_KEY_PATH}" ]; then + ${ssh_keygen_bin} -f "${SSH_PRIVATE_KEY_PATH}" -y > "${SSH_PUBLIC_KEY_PATH}" + fi + + # Prepare seed VM userdata + _set_ssh_public_key_var + + # Generate password for seed node user + seed_node_pwd_file="${work_dir}/seed_node_password" + export SEED_NODE_PWD + SEED_NODE_PWD=$(LC_ALL=C tr -dc A-Za-z0-9 "${seed_node_pwd_file}" + echo "Password for user ${SEED_NODE_USER} is stored in ${seed_node_pwd_file}" + + _set_tmpl_file_vars + render_template < "${seed_userdata_file_tmpl}" > "${seed_userdata_file}" + + echo "SSH key has been prepared" +} + +function _set_vsphere_vm_vars { + seed_folder="${VSPHERE_FOLDER}/seed" + mgmt_folder="${VSPHERE_FOLDER}/management" + child_folder="${VSPHERE_FOLDER}/child" + + seed_base_name="mcc-seed" + mgmt_machine_name_prefix="mgmt-master" + child_control_machine_name_prefix="child-control" + child_worker_machine_name_prefix="child-worker" + export vm_name_prefix_tmpl="" + if [ -n "${VM_NAME_PREFIX}" ]; then + vm_name_prefix_tmpl="${VM_NAME_PREFIX}-" + seed_base_name="${VM_NAME_PREFIX}-${seed_base_name}" + mgmt_machine_name_prefix="${VM_NAME_PREFIX}-${mgmt_machine_name_prefix}" + child_control_machine_name_prefix="${VM_NAME_PREFIX}-${child_control_machine_name_prefix}" + child_worker_machine_name_prefix="${VM_NAME_PREFIX}-${child_worker_machine_name_prefix}" + fi + seed_full_name="${seed_folder}/${seed_base_name}" + +} + +function prepare_seed_node_metadata { + _set_tmpl_file_vars + local metadata userdata + export seed_mac_address network_lcm_mask encoded_network_config + + seed_mac_address="$(${GOVC_BIN} vm.info -json "${seed_full_name}" \ + | ${jq_bin} -r '.virtualMachines[0].config.hardware.device[] | select (.deviceInfo.label == "Network adapter 1") | .macAddress')" + + network_lcm_mask="$(echo "${NETWORK_LCM_SUBNET}" | awk -F "/" '{print $2}')" + render_template < "${seed_network_config_file_tmpl}" > "${seed_network_config_file}" + + encoded_network_config=$(${base64_encode_cmd} < "${seed_network_config_file}") + render_template < "${seed_metadata_file_tmpl}" > "${seed_metadata_file}" + unset seed_mac_address network_lcm_mask encoded_network_config + + metadata="$(${base64_encode_cmd} < "${seed_metadata_file}")" + userdata="$(${base64_encode_cmd} < "${seed_userdata_file}")" + + if [ -n "${metadata}" ]; then + ${GOVC_BIN} vm.change -vm "${seed_full_name}" \ + -e guestinfo.metadata="${metadata}" \ + -e guestinfo.metadata.encoding="base64" + fi + + if [ -n "${userdata}" ]; then + ${GOVC_BIN} vm.change -vm "${seed_full_name}" \ + -e guestinfo.userdata="${userdata}" \ + -e guestinfo.userdata.encoding="base64" + fi +} + +function create_seed_vm { + _print_header "${FUNCNAME[0]}" + _set_vsphere_vm_vars + + ${GOVC_BIN} folder.info "${seed_folder}" || ${GOVC_BIN} folder.create "${seed_folder}" + + local vm_disk_name="${seed_base_name}/${seed_base_name}.vmdk" + + if [ -n "${VSPHERE_VMDK_IMAGE_LOCAL_PATH}" ] && [ -z "${VSPHERE_VMDK_IMAGE_DATASTORE_PATH}" ]; then + local do_upload="true" + local import_folder="mcc-seed-image" + local import_file_name + import_file_name="${import_folder}/$(basename "${VSPHERE_VMDK_IMAGE_LOCAL_PATH}")" + + set +e + # Do not fail if file is not found + if ${GOVC_BIN} datastore.ls -ds="${VSPHERE_DATASTORE_MGMT_CLUSTER}" "${import_file_name}"; then + echo "Skipping uploaded because ${import_file_name} is already in place" + do_upload="false" + else + echo "${import_file_name} is not found on datastore. Doing upload" + fi + set -e + + if [ "${do_upload}" == "true" ]; then + ${GOVC_BIN} import.vmdk \ + -ds="${VSPHERE_DATASTORE_MGMT_CLUSTER}" \ + -folder="${seed_folder}" \ + -pool "${VSPHERE_RESOURCE_POOL}" \ + "${VSPHERE_VMDK_IMAGE_LOCAL_PATH}" \ + "${import_folder}" + fi + + if ! ${GOVC_BIN} datastore.ls -ds="${VSPHERE_DATASTORE_MGMT_CLUSTER}" "${seed_base_name}"; then + ${GOVC_BIN} datastore.mkdir -ds="${VSPHERE_DATASTORE_MGMT_CLUSTER}" "${seed_base_name}" + fi + ${GOVC_BIN} datastore.cp -ds="${VSPHERE_DATASTORE_MGMT_CLUSTER}" \ + "${import_file_name}" \ + "${vm_disk_name}" + fi + + if [ -n "${VSPHERE_VMDK_IMAGE_DATASTORE_PATH}" ]; then + if ! ${GOVC_BIN} datastore.ls -ds="${VSPHERE_DATASTORE_MGMT_CLUSTER}" "${seed_base_name}"; then + ${GOVC_BIN} datastore.mkdir -ds="${VSPHERE_DATASTORE_MGMT_CLUSTER}" "${seed_base_name}" + fi + + # Copy original disk to seed node disk + ${GOVC_BIN} datastore.cp -ds="${VSPHERE_DATASTORE_MGMT_CLUSTER}" \ + "${VSPHERE_VMDK_IMAGE_DATASTORE_PATH}" "${vm_disk_name}" + fi + + if [ -n "${VSPHERE_VMDK_IMAGE_DATASTORE_PATH}" ] || [ -n "${VSPHERE_VMDK_IMAGE_LOCAL_PATH}" ]; then + # Default disk size is 10GiB which is too small + ${GOVC_BIN} datastore.disk.extend -ds="${VSPHERE_DATASTORE_MGMT_CLUSTER}" \ + -size="${SEED_NODE_DISK_SIZE}" "${vm_disk_name}" + + # Create seed node + ${GOVC_BIN} vm.create -ds="${VSPHERE_DATASTORE_MGMT_CLUSTER}" \ + -pool="${VSPHERE_RESOURCE_POOL}" \ + -folder="${seed_folder}" \ + -net="${VSPHERE_NETWORK_LCM}" \ + -on=false \ + -m="${SEED_NODE_MEMORY_MB}" \ + -c="${SEED_NODE_CPU_NUM}" \ + -disk="${vm_disk_name}" \ + "${seed_base_name}" + + else # VSPHERE_VM_TEMPLATE + # Create seed node + ${GOVC_BIN} vm.clone -ds="${VSPHERE_DATASTORE_MGMT_CLUSTER}" \ + -pool="${VSPHERE_RESOURCE_POOL}" \ + -vm="${VSPHERE_VM_TEMPLATE}" \ + -folder="${seed_folder}" \ + -net="${VSPHERE_NETWORK_LCM}" \ + -template=false \ + -on=false \ + -m="${SEED_NODE_MEMORY_MB}" \ + -c="${SEED_NODE_CPU_NUM}" \ + "${seed_base_name}" + fi + + prepare_seed_node_metadata + + ${GOVC_BIN} vm.power -on "${seed_full_name}" + + echo "Seed node VM has been created: IP ${NETWORK_LCM_SEED_IP}" +} + +function create_mgmt_cluster_vms { + _print_header "${FUNCNAME[0]}" + _set_vsphere_vm_vars + + ${GOVC_BIN} folder.info "${mgmt_folder}" || ${GOVC_BIN} folder.create "${mgmt_folder}" + + # Create management cluster VMs + for (( num=0; num<3; num++ )); do + machine_name="${mgmt_machine_name_prefix}-$num" + ${GOVC_BIN} vm.create -ds="${VSPHERE_DATASTORE_MGMT_CLUSTER}" \ + -pool="${VSPHERE_RESOURCE_POOL}" \ + -folder "${mgmt_folder}" \ + -net "${VSPHERE_NETWORK_LCM}" \ + -on=false \ + -m="${MGMT_MACHINES_MEMORY_MB}" \ + -c="${MGMT_MACHINES_CPU_NUM}" \ + -disk="${MGMT_MACHINES_DISK_SIZE}" \ + "${machine_name}" + + ${GOVC_BIN} vm.change -vm "${mgmt_folder}/${machine_name}" -e disk.EnableUUID=TRUE + done + + echo "Vsphere management cluster VMs have been created" +} + +function create_child_cluster_vms { + _print_header "${FUNCNAME[0]}" + _set_vsphere_vm_vars + + ${GOVC_BIN} folder.info "${child_folder}" || ${GOVC_BIN} folder.create "${child_folder}" + + local disk_id + # Create child cluster VMs for control plane + for (( num=0; num<3; num++ )); do + machine_name="${child_control_machine_name_prefix}-$num" + ${GOVC_BIN} vm.create -ds="${VSPHERE_DATASTORE_CHILD_CLUSTER}" \ + -pool="${VSPHERE_RESOURCE_POOL}" \ + -folder "${child_folder}" \ + -net "${VSPHERE_NETWORK_LCM}" \ + -on=false \ + -m="${CHILD_CONTROL_MACHINES_MEMORY_MB}" \ + -c="${CHILD_CONTROL_MACHINES_CPU_NUM}" \ + -disk="${CHILD_MACHINES_ROOT_DISK_SIZE}" \ + "${machine_name}" + + ${GOVC_BIN} vm.change -vm "${child_folder}/${machine_name}" -e disk.EnableUUID=TRUE + disk_id="$(${GOVC_BIN} disk.create -ds="${VSPHERE_DATASTORE_CHILD_CLUSTER}" \ + -size "${CHILD_MACHINES_CEPH_DISK_SIZE}" "${machine_name}-disk-2" | tail -n 1)" + ${GOVC_BIN} disk.attach -vm "${child_folder}/${machine_name}" -ds="${VSPHERE_DATASTORE_CHILD_CLUSTER}" "${disk_id}" + ${GOVC_BIN} vm.network.add -net "${VSPHERE_NETWORK_OPENSTACK}" -vm "${child_folder}/${machine_name}" + done + + # Create child cluster VMs for workers/computes + for (( num=0; num<3; num++ )); do + machine_name="${child_worker_machine_name_prefix}-$num" + ${GOVC_BIN} vm.create -ds="${VSPHERE_DATASTORE_CHILD_CLUSTER}" \ + -pool="${VSPHERE_RESOURCE_POOL}" \ + -folder "${child_folder}" \ + -net "${VSPHERE_NETWORK_LCM}" \ + -on=false \ + -m="${CHILD_WORKER_MACHINES_MEMORY_MB}" \ + -c="${CHILD_WORKER_MACHINES_CPU_NUM}" \ + -disk="${CHILD_MACHINES_ROOT_DISK_SIZE}" \ + "${machine_name}" + + ${GOVC_BIN} vm.change -vm "${child_folder}/${machine_name}" -e disk.EnableUUID=TRUE -nested-hv-enabled TRUE + disk_id="$(${GOVC_BIN} disk.create -ds="${VSPHERE_DATASTORE_CHILD_CLUSTER}" \ + -size "${CHILD_MACHINES_CEPH_DISK_SIZE}" "${machine_name}-disk-2" | tail -n 1)" + ${GOVC_BIN} disk.attach -vm "${child_folder}/${machine_name}" -ds="${VSPHERE_DATASTORE_CHILD_CLUSTER}" "${disk_id}" + ${GOVC_BIN} vm.network.add -net "${VSPHERE_NETWORK_OPENSTACK}" -vm "${child_folder}/${machine_name}" + done + + echo "Vsphere child cluster VMs have been created" +} + +function wait_for_seed_ssh_available { + local num_attepts=30 + while [ ${num_attepts} -ne 0 ]; do + echo "Trying ssh to seed node: ${NETWORK_LCM_SEED_IP}" + res=$(${ssh_bin} -o ConnectTimeout=5 -i "${SSH_PRIVATE_KEY_PATH}" "${SEED_NODE_USER}@${NETWORK_LCM_SEED_IP}" echo ok || true) + if [ "${res}" == "ok" ]; then + return + fi + + num_attepts=$((num_attepts-1)) + if [ ${num_attepts} -eq 0 ]; then + echo "Error: timeout waiting for ssh to be available" + exit 1 + fi + sleep 5 + done +} + +function prepare_mgmt_cluster_templates { + _print_header "${FUNCNAME[0]}" + _set_vsphere_vm_vars + export mgmt_node_mac_address_0 mgmt_node_mac_address_1 mgmt_node_mac_address_2 + mgmt_node_mac_address_0="$(${GOVC_BIN} vm.info -json "${mgmt_folder}/${mgmt_machine_name_prefix}-0" \ + | ${jq_bin} -r '.virtualMachines[0].config.hardware.device[] | select (.deviceInfo.label == "Network adapter 1") | .macAddress')" + mgmt_node_mac_address_1="$(${GOVC_BIN} vm.info -json "${mgmt_folder}/${mgmt_machine_name_prefix}-1" \ + | ${jq_bin} -r '.virtualMachines[0].config.hardware.device[] | select (.deviceInfo.label == "Network adapter 1") | .macAddress')" + mgmt_node_mac_address_2="$(${GOVC_BIN} vm.info -json "${mgmt_folder}/${mgmt_machine_name_prefix}-2" \ + | ${jq_bin} -r '.virtualMachines[0].config.hardware.device[] | select (.deviceInfo.label == "Network adapter 1") | .macAddress')" + + if [ -z "${MCC_SERVICEUSER_PASSWORD}" ]; then + MCC_SERVICEUSER_PASSWORD=$(LC_ALL=C tr -dc A-Za-z0-9 "${mgmt_templates_work_dir}/${f_b_name}" + ${scp_bin} -i "${SSH_PRIVATE_KEY_PATH}" "${mgmt_templates_work_dir}/${f_b_name}" \ + "${SEED_NODE_USER}@${NETWORK_LCM_SEED_IP}:${mgmt_templates_remote_dir}" + done +} + +function prepare_child_cluster_templates { + _print_header "${FUNCNAME[0]}" + # shellcheck source=/dev/null + [ -f "${mcc_version_file}" ] && source "${mcc_version_file}" + if [ -z "${MCC_VERSION}" ]; then + echo "Error: MCC_VERSION is not set. Unable to prepare management cluster templates" + exit 1 + fi + + _set_vsphere_vm_vars + export child_control_mac_address_0 child_control_mac_address_1 child_control_mac_address_2 + child_control_mac_address_0="$(${GOVC_BIN} vm.info -json "${child_folder}/${child_control_machine_name_prefix}-0" \ + | ${jq_bin} -r '.virtualMachines[0].config.hardware.device[] | select (.deviceInfo.label == "Network adapter 1") | .macAddress')" + child_control_mac_address_1="$(${GOVC_BIN} vm.info -json "${child_folder}/${child_control_machine_name_prefix}-1" \ + | ${jq_bin} -r '.virtualMachines[0].config.hardware.device[] | select (.deviceInfo.label == "Network adapter 1") | .macAddress')" + child_control_mac_address_2="$(${GOVC_BIN} vm.info -json "${child_folder}/${child_control_machine_name_prefix}-2" \ + | ${jq_bin} -r '.virtualMachines[0].config.hardware.device[] | select (.deviceInfo.label == "Network adapter 1") | .macAddress')" + + export child_worker_mac_address_0 child_worker_mac_address_1 child_worker_mac_address_2 + child_worker_mac_address_0="$(${GOVC_BIN} vm.info -json "${child_folder}/${child_worker_machine_name_prefix}-0" \ + | ${jq_bin} -r '.virtualMachines[0].config.hardware.device[] | select (.deviceInfo.label == "Network adapter 1") | .macAddress')" + child_worker_mac_address_1="$(${GOVC_BIN} vm.info -json "${child_folder}/${child_worker_machine_name_prefix}-1" \ + | ${jq_bin} -r '.virtualMachines[0].config.hardware.device[] | select (.deviceInfo.label == "Network adapter 1") | .macAddress')" + child_worker_mac_address_2="$(${GOVC_BIN} vm.info -json "${child_folder}/${child_worker_machine_name_prefix}-2" \ + | ${jq_bin} -r '.virtualMachines[0].config.hardware.device[] | select (.deviceInfo.label == "Network adapter 1") | .macAddress')" + + _set_bootstrap_vars + + if [ -z "${MCC_CHILD_CLUSTER_RELEASE}" ]; then + MCC_CHILD_CLUSTER_RELEASE="$(${ssh_cmd} "/home/${SEED_NODE_USER}/yq" \ + eval '.spec.supportedClusterReleases[0].name' \ + "/home/${SEED_NODE_USER}/kaas-bootstrap/releases/kaas/${MCC_VERSION}.yaml")" + fi + + _set_ssh_public_key_var + _set_templates_dir_vars + rm -rf "${child_templates_work_dir}" && mkdir -p "${child_templates_work_dir}" + ${ssh_cmd} mkdir -p "${child_templates_remote_dir}" + + local f_b_name + # shellcheck disable=SC2044 + for file in $(find "${child_templates_local_dir}" -maxdepth 1 -type f -name "*.template"); do + f_b_name=$(basename "${file}") + render_template < "${file}" > "${child_templates_work_dir}/${f_b_name%.tmpl}" + ${scp_bin} -i "${SSH_PRIVATE_KEY_PATH}" "${child_templates_work_dir}/${f_b_name%.tmpl}" "${SEED_NODE_USER}@${NETWORK_LCM_SEED_IP}:${child_templates_remote_dir}" + done + + ${scp_bin} -i "${SSH_PRIVATE_KEY_PATH}" -r "${child_templates_local_dir}/certs" "${SEED_NODE_USER}@${NETWORK_LCM_SEED_IP}:${child_templates_remote_dir}/certs" + ${ssh_cmd} chmod +x "${child_templates_remote_dir}/certs/create_secrets.sh" + + cp -r "${child_templates_local_dir}/hack" "${child_templates_work_dir}/hack" +} + +function setup_seed { + local prepare_env_file="${work_dir}/.prepare_seed_node.env" + local remote_proxy_cert_file="" + if { [ -n "${HTTP_PROXY}" ] || [ -n "${HTTPS_PROXY}" ] ;} && [ -n "${PROXY_CA_CERTIFICATE_PATH}" ]; then + remote_proxy_cert_file="/home/${SEED_NODE_USER}/$(basename "${PROXY_CA_CERTIFICATE_PATH}")" + ${scp_bin} -i "${SSH_PRIVATE_KEY_PATH}" "${PROXY_CA_CERTIFICATE_PATH}" "${SEED_NODE_USER}@${NETWORK_LCM_SEED_IP}:${remote_proxy_cert_file}" + fi + + cat << EOF > "${prepare_env_file}" +#!/bin/bash +export HTTPS_PROXY="${HTTPS_PROXY}" +export HTTP_PROXY="${HTTP_PROXY}" +export NO_PROXY="${NO_PROXY}" +export PROXY_CA_CERTIFICATE_PATH="${remote_proxy_cert_file}" + +export MCC_CDN_REGION="${MCC_CDN_REGION}" +export MCC_CDN_BASE_URL="${MCC_CDN_BASE_URL}" +export MCC_RELEASES_URL="${MCC_RELEASES_URL}" +export SEED_NODE_USER="${SEED_NODE_USER}" + +export YQ_VERSION="${YQ_VERSION}" +EOF + + if [ -n "${MCC_VERSION}" ]; then + echo "export MCC_VERSION=${MCC_VERSION}" >> "${prepare_env_file}" + fi + + local ssh_cmd="${ssh_bin} -i ${SSH_PRIVATE_KEY_PATH} ${SEED_NODE_USER}@${NETWORK_LCM_SEED_IP}" + ${scp_bin} -i "${SSH_PRIVATE_KEY_PATH}" "${script_dir}/bin/prepare_seed_node.sh" "${SEED_NODE_USER}@${NETWORK_LCM_SEED_IP}:" + ${scp_bin} -i "${SSH_PRIVATE_KEY_PATH}" "${prepare_env_file}" "${SEED_NODE_USER}@${NETWORK_LCM_SEED_IP}:" + ${ssh_cmd} chmod +x prepare_seed_node.sh + ${ssh_cmd} bash -x prepare_seed_node.sh + ${scp_bin} -i "${SSH_PRIVATE_KEY_PATH}" "${MCC_LICENSE_FILE}" "${SEED_NODE_USER}@${NETWORK_LCM_SEED_IP}:/home/${SEED_NODE_USER}/mirantis.lic" + if [ -z "${MCC_VERSION}" ]; then + export MCC_VERSION + MCC_VERSION="$(${ssh_cmd} cat mcc_version)" + fi + + # Keep state: MCC_VERSION + echo "export MCC_VERSION=${MCC_VERSION}" > "${mcc_version_file}" +} + +function setup_bootstrap_cluster { + _print_header "${FUNCNAME[0]}" + # shellcheck source=/dev/null + [ -f "${mcc_version_file}" ] && source "${mcc_version_file}" + + local ssh_cmd="${ssh_bin} -i ${SSH_PRIVATE_KEY_PATH} ${SEED_NODE_USER}@${NETWORK_LCM_SEED_IP}" + local bootstrap_env_file_name="bootstrap.env" + local bootstrap_env_file="${work_dir}/${bootstrap_env_file_name}" + local remote_proxy_cert_file="" + local network_pxe_mask + network_pxe_mask="$(echo "${NETWORK_PXE_SUBNET}" | awk -F "/" '{print $2}')" + if { [ -n "${HTTP_PROXY}" ] || [ -n "${HTTPS_PROXY}" ] ;} && [ -n "${PROXY_CA_CERTIFICATE_PATH}" ]; then + remote_proxy_cert_file="/home/${SEED_NODE_USER}/$(basename "${PROXY_CA_CERTIFICATE_PATH}")" + fi + + # Note: bootstrap script requires KAAS_CDN_REGION, not MCC_CDN_REGION + cat << EOF > "${bootstrap_env_file}" +export KAAS_RELEASE_YAML="/home/${SEED_NODE_USER}/kaas-bootstrap/releases/kaas/${MCC_VERSION}.yaml" +export CLUSTER_RELEASES_DIR="/home/${SEED_NODE_USER}/kaas-bootstrap/releases/cluster" +export KAAS_CDN_REGION="${MCC_CDN_REGION}" + +export HTTPS_PROXY=${HTTPS_PROXY} +export HTTP_PROXY="${HTTP_PROXY}" +export NO_PROXY="${NO_PROXY}" +export PROXY_CA_CERTIFICATE_PATH="${remote_proxy_cert_file}" + +export KAAS_BM_ENABLED="true" +export KAAS_BM_PXE_BRIDGE=${SEED_NODE_PXE_BRIDGE} +export KAAS_BM_PXE_IP=${NETWORK_PXE_BRIDGE_IP} +export KAAS_BM_PXE_MASK=${network_pxe_mask} +export KAAS_BOOTSTRAP_DEBUG="${MCC_DEMO_DEBUG}" +EOF + + ${scp_bin} -i "${SSH_PRIVATE_KEY_PATH}" "${bootstrap_env_file}" "${SEED_NODE_USER}@${NETWORK_LCM_SEED_IP}:/home/${SEED_NODE_USER}/kaas-bootstrap/${bootstrap_env_file_name}" + + ${ssh_cmd} "/home/${SEED_NODE_USER}/kaas-bootstrap/bootstrap.sh" bootstrapv2 +} + +function _set_bootstrap_vars { + ssh_cmd="${ssh_bin} -i ${SSH_PRIVATE_KEY_PATH} ${SEED_NODE_USER}@${NETWORK_LCM_SEED_IP}" + kubectl_file_var="KUBECONFIG=/home/${SEED_NODE_USER}/.kube/kind-config-clusterapi" + remote_kubectl_cmd="${ssh_cmd} ${kubectl_file_var} /home/${SEED_NODE_USER}/kaas-bootstrap/bin/kubectl" + remote_container_cloud_cmd="${ssh_cmd} ${kubectl_file_var} /home/${SEED_NODE_USER}/kaas-bootstrap/container-cloud" + remote_kind_cmd="${ssh_cmd} /home/${SEED_NODE_USER}/kaas-bootstrap/bin/kind" +} + +function _set_mgmt_vars { + ssh_cmd="${ssh_bin} -i ${SSH_PRIVATE_KEY_PATH} ${SEED_NODE_USER}@${NETWORK_LCM_SEED_IP}" + kubectl_file_var="KUBECONFIG=/home/${SEED_NODE_USER}/kaas-bootstrap/kubeconfig-${MCC_MGMT_CLUSTER_NAME}" + remote_kubectl_cmd="${ssh_cmd} ${kubectl_file_var} /home/${SEED_NODE_USER}/kaas-bootstrap/bin/kubectl" + remote_container_cloud_cmd="${ssh_cmd} ${kubectl_file_var} /home/${SEED_NODE_USER}/kaas-bootstrap/container-cloud" +} + +function _set_child_vars { + ssh_cmd="${ssh_bin} -i ${SSH_PRIVATE_KEY_PATH} ${SEED_NODE_USER}@${NETWORK_LCM_SEED_IP}" + kubectl_file_var="KUBECONFIG=/home/${SEED_NODE_USER}/kaas-bootstrap/kubeconfig-${MCC_CHILD_CLUSTER_NAME}" + remote_kubectl_cmd="${ssh_cmd} ${kubectl_file_var} /home/${SEED_NODE_USER}/kaas-bootstrap/bin/kubectl" + remote_container_cloud_cmd="${ssh_cmd} ${kubectl_file_var} /home/${SEED_NODE_USER}/kaas-bootstrap/container-cloud" +} + +function _set_templates_dir_vars { + # shellcheck source=/dev/null + [ -f "${mcc_version_file}" ] && source "${mcc_version_file}" + if [ -z "${MCC_VERSION}" ]; then + echo "Error: MCC_VERSION is not set. Unable to set templates vars" + exit 1 + fi + + mgmt_templates_work_dir="${work_dir}/templates/management" + mgmt_templates_local_dir="${script_dir}/templates/${MCC_VERSION%-rc}/management/" + mgmt_templates_remote_dir="/home/${SEED_NODE_USER}/kaas-bootstrap/templates/bm" + + child_templates_work_dir="${work_dir}/templates/child/" + child_templates_local_dir="${script_dir}/templates/${MCC_VERSION%-rc}/child/" + child_templates_remote_dir="/home/${SEED_NODE_USER}/kaas-bootstrap/templates/bm/child" +} + +function deploy_mgmt_cluster { + _print_header "${FUNCNAME[0]}" + _set_bootstrap_vars + _set_templates_dir_vars + + echo "Creating management cluster objects" + ${remote_kubectl_cmd} apply -f "${mgmt_templates_remote_dir}/bootstrapregion.yaml.template" + ${remote_kubectl_cmd} apply -f "${mgmt_templates_remote_dir}/serviceusers.yaml.template" + ${remote_kubectl_cmd} apply -f "${mgmt_templates_remote_dir}/sshkey.yaml.template" + ${remote_kubectl_cmd} apply -f "${mgmt_templates_remote_dir}/cluster.yaml.template" + ${remote_kubectl_cmd} apply -f "${mgmt_templates_remote_dir}/metallbconfig.yaml.template" + ${remote_kubectl_cmd} apply -f "${mgmt_templates_remote_dir}/ipam-objects.yaml.template" + + # wait for VBMC crd + echo "Waiting for vbmcs crd" + _wait_for_object_status "crds" "vbmcs.metal3.io" "" ".status.conditions[].status" "True" 15 "plain" + ${remote_kubectl_cmd} apply -f "${mgmt_templates_remote_dir}/vbmc.yaml.template" + + ${remote_kubectl_cmd} apply -f "${mgmt_templates_remote_dir}/baremetalhostprofiles.yaml.template" + ${remote_kubectl_cmd} apply -f "${mgmt_templates_remote_dir}/baremetalhosts.yaml.template" + ${remote_kubectl_cmd} apply -f "${mgmt_templates_remote_dir}/machines.yaml.template" + + # wait for ironic start, so provisioning artifacts are downloaded + echo "Waiting for Ironic deployment" + _wait_for_object_status "deployment" "ironic" "kaas" ".status.readyReplicas" "1" "${IRONIC_DEPLOYMENT_TIMEOUT}" "plain" + + # wait for bmh + echo "Waiting for Baremetal hosts provisioning" + local bmh_names + bmh_names=$(${remote_kubectl_cmd} get bmh -o jsonpath='{.items[*].metadata.name}') + _wait_for_objects_statuses "bmh" "${bmh_names}" "" ".status.provisioning.state" "available,provisioned" "${BMH_READINESS_TIMEOUT}" + + # start deployment + echo "Starting MCC management cluster deployment" + ${remote_container_cloud_cmd} bootstrap approve all + + wait_for_mgmt_cluster + + cleanup_bootstrap_cluster +} + +function apply_coredns_hack { + _print_header "${FUNCNAME[0]}" + _set_child_vars + _set_templates_dir_vars + # Patch Coredns configmap + local hack_dir="${child_templates_work_dir}/hack" + local cm_file="${hack_dir}/coredns.cm.yaml" + local remote_cm_file="${child_templates_remote_dir}/coredns.cm.yaml" + local cm_content_file="${hack_dir}/coredns.cm.tmp" + + export cm_hack_value coredns_cm_content + # Get current config and insert hack for hosts substitution + ${remote_kubectl_cmd} -n kube-system get configmap coredns -o jsonpath='{.data.Corefile}' \ + | sed 's/^/ /' | awk 'NR==2{print "\{\{ cm_hack_value \}\}"}1' > "${cm_content_file}" + # Update hosts + cm_hack_value="$(render_template < "${hack_dir}/coredns.cm.hosts")" + # Update configmap content value + coredns_cm_content="$(render_template < "${cm_content_file}")" + # Update configmap template + render_template < "${hack_dir}/coredns.cm.template" > "${cm_file}" + # Copy template to seed node and apply + ${scp_bin} -i "${SSH_PRIVATE_KEY_PATH}" "${cm_file}" "${SEED_NODE_USER}@${NETWORK_LCM_SEED_IP}:${remote_cm_file}" + ${remote_kubectl_cmd} apply -f "${remote_cm_file}" + unset cm_hack_value coredns_cm_content + + # Note: CoreDNS config is reloaded automatically +} + +function deploy_child_cluster { + _print_header "${FUNCNAME[0]}" + _set_mgmt_vars + _set_templates_dir_vars + + echo "Creating child cluster objects" + ${remote_kubectl_cmd} get namespace "${MCC_CHILD_CLUSTER_NAMESPACE}" || \ + ${remote_kubectl_cmd} create namespace "${MCC_CHILD_CLUSTER_NAMESPACE}" + ${remote_kubectl_cmd} apply -f "${child_templates_remote_dir}/sshkey.yaml.template" + ${remote_kubectl_cmd} apply -f "${child_templates_remote_dir}/cluster.yaml.template" + ${remote_kubectl_cmd} apply -f "${child_templates_remote_dir}/metallbconfig.yaml.template" + ${remote_kubectl_cmd} apply -f "${child_templates_remote_dir}/ipam-objects.yaml.template" + ${remote_kubectl_cmd} apply -f "${child_templates_remote_dir}/baremetalhostprofiles.yaml.template" + ${remote_kubectl_cmd} apply -f "${child_templates_remote_dir}/baremetalhosts.yaml.template" + ${remote_kubectl_cmd} apply -f "${child_templates_remote_dir}/machines.yaml.template" + ${remote_kubectl_cmd} apply -f "${child_templates_remote_dir}/kaascephcluster.yaml.template" + + echo "MCC child cluster deployment has been started" + + # wait for bmh + echo "Waiting for Baremetal hosts provisioning" + local bmh_names + bmh_names=$(${remote_kubectl_cmd} -n "${MCC_CHILD_CLUSTER_NAMESPACE}" get bmh -o jsonpath='{.items[*].metadata.name}') + _wait_for_objects_statuses "bmh" "${bmh_names}" "${MCC_CHILD_CLUSTER_NAMESPACE}" ".status.provisioning.state" "available,provisioned" "${BMH_READINESS_TIMEOUT}" + + echo "Waiting for child cluster deployment" + wait_for_child_cluster + + echo "Waiting for Ceph" + _wait_for_object_status kaascephcluster "ceph-${MCC_CHILD_CLUSTER_NAME}" "${MCC_CHILD_CLUSTER_NAMESPACE}" ".status.shortClusterInfo.state" \ + "Ready" "${CHILD_CEPH_CLUSTER_TIMEOUT}" "plain" + echo "Ceph cluster is ready" + + echo "Child cluster deployment has been finished successfully" +} + +function deploy_openstack { + _print_header "${FUNCNAME[0]}" + _set_templates_dir_vars + _set_child_vars + + ${ssh_cmd} "KUBECTL_BIN=/home/${SEED_NODE_USER}/kaas-bootstrap/bin/kubectl \ + ${kubectl_file_var}" "${child_templates_remote_dir}/certs/create_secrets.sh" + ${remote_kubectl_cmd} apply -f "${child_templates_remote_dir}/osdpl.yaml.template" + + echo "Waiting for Openstack" + _wait_for_object_status openstackdeploymentstatus osh-dev openstack ".status.osdpl.state" "APPLIED" "${OSDPL_APPLIED_TIMEOUT}" "plain" + # Wait till all the Openstack components will be ready + _wait_for_object_status openstackdeploymentstatus osh-dev openstack ".status.health.*.*.status" '^Ready( Ready)*$' "${OPENSTACK_READINESS_TIMEOUT}" "regex" + echo "Openstack Deployment has been completed" + + # Note: custom Openstack hostnames have to be resolved inside child cluster, + # otherwise the Openstack endpoints are not accessible. + # If user adds Openstack endpoints to the DNS, the hack is not needed + if [[ "${APPLY_COREDNS_HACK}" =~ [Tt]rue ]]; then + apply_coredns_hack + fi + + local c_y_file="${work_dir}/cloud.yaml" + ${remote_kubectl_cmd} -n openstack-external get secrets openstack-identity-credentials \ + -o jsonpath='{.data.clouds\\.yaml}' | base64 -d > "${c_y_file}" + + echo "Openstack deployment has been finished successfully" + echo "Please add following line to your /etc/hosts configuration to access Openstack Web UI" + echo "${NETWORK_LCM_METALLB_OPENSTACK_ADDRESS} \ + keystone.${MCC_OPENSTACK_PUBLIC_DOMAIN} \ + horizon.${MCC_OPENSTACK_PUBLIC_DOMAIN} \ + nova.${MCC_OPENSTACK_PUBLIC_DOMAIN} \ + novncproxy.${MCC_OPENSTACK_PUBLIC_DOMAIN}" + echo "Openstack Web UI: https://horizon.${MCC_OPENSTACK_PUBLIC_DOMAIN}" + echo "Openstack credentials are saved into ${c_y_file} on local machine" +} + +function wait_for_mgmt_cluster { + _set_bootstrap_vars + # wait for cluster readiness + _wait_for_object_status cluster "${MCC_MGMT_CLUSTER_NAME}" "" ".status.providerStatus.ready" "true"\ + "${MGMT_CLUSTER_READINESS_TIMEOUT}" "plain" + + local k_f_name_local="${work_dir}/kubeconfig-${MCC_MGMT_CLUSTER_NAME}" + local k_f_name_remote="/home/${SEED_NODE_USER}/kaas-bootstrap/kubeconfig-${MCC_MGMT_CLUSTER_NAME}" + ${remote_container_cloud_cmd} get cluster-kubeconfig \ + --cluster-name="${MCC_MGMT_CLUSTER_NAME}" --kubeconfig-output="${k_f_name_remote}" + ${scp_bin} -i "${SSH_PRIVATE_KEY_PATH}" "${SEED_NODE_USER}@${NETWORK_LCM_SEED_IP}:${k_f_name_remote}" "${k_f_name_local}" + + echo "Management cluster kubeconfig is saved localy to ${k_f_name_local}" +} + +function wait_for_child_cluster { + _set_mgmt_vars + _wait_for_object_status cluster "${MCC_CHILD_CLUSTER_NAME}" "${MCC_CHILD_CLUSTER_NAMESPACE}" ".status.providerStatus.ready" "true" \ + "${CHILD_CLUSTER_READINESS_TIMEOUT}" "plain" + + local k_f_name_local="${work_dir}/kubeconfig-${MCC_CHILD_CLUSTER_NAME}" + local k_f_name_remote="/home/${SEED_NODE_USER}/kaas-bootstrap/kubeconfig-${MCC_CHILD_CLUSTER_NAME}" + ${remote_kubectl_cmd} -n "${MCC_CHILD_CLUSTER_NAMESPACE}" get secret "${MCC_CHILD_CLUSTER_NAME}-kubeconfig" \ + -o jsonpath='{.data.admin\\.conf}' | base64 -d | tee "${k_f_name_local}" + ${scp_bin} -i "${SSH_PRIVATE_KEY_PATH}" "${k_f_name_local}" "${SEED_NODE_USER}@${NETWORK_LCM_SEED_IP}:${k_f_name_remote}" + + echo "Child cluster kubeconfig is saved localy to ${k_f_name_local}" +} + +function _wait_for_objects_statuses { + if [ $# -ne 6 ]; then + echo "Error: _wait_for_objects_statuses requires exactly 6 arguments" + exit 1 + fi + + local obj_type="${1}" + local obj_names="${2}" + local obj_namespace="${3}" + if [ -n "${obj_namespace}" ]; then + obj_namespace="-n ${obj_namespace}" + fi + local obj_status_path="${4}" + # Delimited by , + local obj_expected_statuses_pattern + obj_expected_statuses_pattern="^($(echo "${5}" | tr "," "|"))$" + local num_attepts=${6} + + while [ "${num_attepts}" -ne 0 ]; do + local all_ready=true + for obj_name in ${obj_names}; do + set +e + status="$(${remote_kubectl_cmd} "${obj_namespace}" get "${obj_type}" "${obj_name}" -o jsonpath="{${obj_status_path}}")" + set -e + echo "${obj_type} ${obj_name} status: ${status}. Expected status: ${obj_expected_statuses_pattern}" + if [[ ! "${status}" =~ ${obj_expected_statuses_pattern} ]]; then + all_ready=false + fi + done + + if [ "${all_ready}" == "true" ]; then + echo "All ${obj_type}s are ready" + break + fi + + num_attepts=$((num_attepts-1)) + echo "Left attemtps: ${num_attepts}" + if [ ${num_attepts} -eq 0 ]; then + echo "Error: timeout waiting for ${obj_type}s to be available" + exit 1 + fi + sleep 60 + done +} + +function _wait_for_object_status { + if ! [ $# -eq 7 ]; then + echo "Error: _wait_for_object_status requires exactly 7 arguments" + exit 1 + fi + + local obj_type="${1}" + local obj_name="${2}" + local obj_namespace="${3}" + if [ -n "${obj_namespace}" ]; then + obj_namespace="-n ${obj_namespace}" + fi + local obj_status_path="${4}" + local obj_expected_status="${5}" + local num_attepts=${6} + local compare_mode=${7} + + while [ "${num_attepts}" -ne 0 ]; do + set +e + status="$(${remote_kubectl_cmd} "${obj_namespace}" get "${obj_type}" "${obj_name}" -o jsonpath="{${obj_status_path}}")" + set -e + echo "${obj_type} ${obj_name} status: ${status}. Expected status: ${obj_expected_status}" + if [ "${compare_mode}" == 'regex' ]; then + if [[ "${status}" =~ ${obj_expected_status} ]]; then + break + fi + else + if [ "${status}" == "${obj_expected_status}" ]; then + break + fi + fi + + num_attepts=$((num_attepts-1)) + echo "Left attemtps: ${num_attepts}" + if [ "${num_attepts}" -eq 0 ]; then + echo "Error: timeout waiting for ${obj_type} ${obj_name} status" + exit 1 + fi + sleep 60 + done + + echo "${obj_type} ${obj_name} is ready" +} + +function cleanup { + echo "Starting cleanup" + if ! [ -f "${GOVC_BIN}" ]; then + echo "Error: govc binary is not found. Cleanup is not possible" + exit 1 + fi + _set_vsphere_vm_vars + + local mgmt_cluster_vms child_cluster_vms seed_vm + mgmt_cluster_vms=$(${GOVC_BIN} ls "${mgmt_folder}") + child_cluster_vms=$(${GOVC_BIN} ls "${child_folder}") + seed_vm=$(${GOVC_BIN} ls "${seed_folder}") + + # shellcheck disable=SC2116 + for vm in $(echo "${mgmt_cluster_vms}" "${child_cluster_vms}" "${seed_vm}"); do + ${GOVC_BIN} vm.power -off -force "${vm}" + # Note: all disks are deleted with VM automatically + ${GOVC_BIN} vm.destroy "${vm}" + done + + local seed_disk_name="${seed_base_name}/${seed_base_name}.vmdk" + # Ensure seed disk is removed + if ${GOVC_BIN} datastore.ls -ds="${VSPHERE_DATASTORE_MGMT_CLUSTER}" "${seed_disk_name}"; then + ${GOVC_BIN} datastore.rm -ds="${VSPHERE_DATASTORE_MGMT_CLUSTER}" "${seed_disk_name}" + fi + + rm -rf "${work_dir}" + + echo "Cleanup has been finished successfully" +} + +function cleanup_bootstrap_cluster { + _set_bootstrap_vars + ${remote_kind_cmd} delete cluster --name clusterapi +} + +function main { + local arg + if [ $# -ne 0 ]; then + arg="${1}" + shift + fi + + if [ -f "${ENV_FILE}" ]; then + # shellcheck source=/dev/null + chmod +x "${ENV_FILE}" && source "${ENV_FILE}" + fi + + if [[ "${MCC_DEMO_DEBUG}" =~ [Tt]rue ]]; then + set -x + fi + + case "${arg}" in + -h|help) + usage + exit 0 + ;; + cleanup) + verify_binaries + set_vars + ensure_govc_lib + verify_vsphere_objects + cleanup + exit 0 + ;; + cleanup_bootstrap_cluster) + verify_binaries + set_vars + cleanup_bootstrap_cluster + exit 0 + ;; + create_seed_vm) + verify_binaries + set_vars + ensure_govc_lib + verify_vsphere_objects + verify_mcc_vars + prepare_ssh_key + create_seed_vm + wait_for_seed_ssh_available + setup_seed + exit 0 + ;; + setup_bootstrap_cluster) + verify_binaries + set_vars + verify_mcc_vars + setup_bootstrap_cluster + exit 0 + ;; + create_mgmt_cluster_vms) + verify_binaries + set_vars + if [[ "${SKIP_VSPHERE_VMS_CREATION}" =~ [Tt]rue ]]; then + echo "Skipping create_mgmt_cluster_vms action: SKIP_VSPHERE_VMS_CREATION=True" + exit 0 + fi + ensure_govc_lib + verify_vsphere_objects + verify_mcc_vars + create_mgmt_cluster_vms + exit 0 + ;; + create_child_cluster_vms) + verify_binaries + set_vars + if [[ "${SKIP_VSPHERE_VMS_CREATION}" =~ [Tt]rue ]]; then + echo "Skipping create_child_cluster_vms action: SKIP_VSPHERE_VMS_CREATION=True" + exit 0 + fi + ensure_govc_lib + verify_vsphere_objects + verify_mcc_vars + create_child_cluster_vms + exit 0 + ;; + prepare_mgmt_cluster_templates) + verify_binaries + set_vars + ensure_govc_lib + prepare_mgmt_cluster_templates + exit 0 + ;; + prepare_child_cluster_templates) + verify_binaries + set_vars + ensure_govc_lib + prepare_child_cluster_templates + exit 0 + ;; + deploy_mgmt_cluster) + verify_binaries + set_vars + deploy_mgmt_cluster + exit 0 + ;; + deploy_child_cluster) + verify_binaries + set_vars + deploy_child_cluster + exit 0 + ;; + deploy_openstack) + verify_binaries + set_vars + deploy_openstack + exit 0 + ;; + apply_coredns_hack) + set_vars + apply_coredns_hack + exit 0 + ;; + all) + verify_binaries + set_vars + ensure_govc_lib + verify_vsphere_objects + + verify_mcc_vars + + prepare_ssh_key + + if ! [[ "${SKIP_VSPHERE_VMS_CREATION}" =~ [Tt]rue ]]; then + if ! [[ "${SKIP_SEED_NODE_CREATION}" =~ [Tt]rue ]]; then + create_seed_vm + fi + create_mgmt_cluster_vms + create_child_cluster_vms + fi + + wait_for_seed_ssh_available + + if ! [[ "${SKIP_SEED_NODE_SETUP}" =~ [Tt]rue ]]; then + setup_seed + fi + + prepare_mgmt_cluster_templates + + setup_bootstrap_cluster + + prepare_child_cluster_templates + + deploy_mgmt_cluster + + deploy_child_cluster + + deploy_openstack + + echo "MCC installation has been finished successfully" + exit 0 + ;; + *) + echo "Wrong option is passed" + usage + exit 1 + ;; + esac +} + +main "$@" diff --git a/metadata.yaml.tmpl b/metadata.yaml.tmpl new file mode 100644 index 0000000..15d1fb0 --- /dev/null +++ b/metadata.yaml.tmpl @@ -0,0 +1,2 @@ +network: {{ encoded_network_config }} +network.encoding: "base64" \ No newline at end of file diff --git a/network_config.tmpl b/network_config.tmpl new file mode 100644 index 0000000..f3a8991 --- /dev/null +++ b/network_config.tmpl @@ -0,0 +1,24 @@ +version: 2 +ethernets: + id0: + dhcp4: false + dhcp6: false + match: + macaddress: {{ seed_mac_address }} +bridges: + {{ SEED_NODE_PXE_BRIDGE }}: + dhcp4: false + dhcp6: false + interfaces: + - id0 + parameters: + forward-delay: 4 + stp: false + addresses: + - {{ NETWORK_LCM_SEED_IP }}/{{ network_lcm_mask }} + gateway4: {{ NETWORK_LCM_GATEWAY }} + nameservers: + addresses: + {%- for server in NAMESERVERS.split(',') %} + - {{ server -}} + {% endfor %} diff --git a/templates/2.28.0/child/baremetalhostprofiles.yaml.template b/templates/2.28.0/child/baremetalhostprofiles.yaml.template new file mode 100644 index 0000000..22e0e10 --- /dev/null +++ b/templates/2.28.0/child/baremetalhostprofiles.yaml.template @@ -0,0 +1,89 @@ +apiVersion: metal3.io/v1alpha1 +metadata: + namespace: {{ MCC_CHILD_CLUSTER_NAMESPACE }} + name: default-2disks + labels: + kaas.mirantis.com/defaultBMHProfile: 'true' +kind: BareMetalHostProfile +spec: + devices: + - device: + minSize: 70Gi + wipeDevice: + eraseMetadata: + enabled: true + partitions: + - name: bios_grub + size: 4Mi + partflags: ['bios_grub'] + - name: uefi + partflags: ['esp'] + size: 200Mi + - name: config-2 + # Size of this partition is limited to 64Mb. + size: 64Mi + - name: lvm_root_part + size: 50Gi + - name: lvm_lvp_part + size: 0 + - device: + minSize: 30Gi + wipeDevice: + eraseMetadata: + enabled: true + volumeGroups: + - name: lvm_root + devices: + - partition: lvm_root_part + - name: lvm_lvp + devices: + - partition: lvm_lvp_part + logicalVolumes: + - name: root + vg: lvm_root + size: 0 + - name: lvp + vg: lvm_lvp + size: 0 + fileSystems: + - fileSystem: vfat + partition: config-2 + - fileSystem: vfat + partition: uefi + mountPoint: /boot/efi + - fileSystem: ext4 + logicalVolume: root + mountPoint: / + mountOpts: 'rw,noatime,nodiratime,lazytime,nobarrier,commit=240,data=ordered' + - fileSystem: ext4 + logicalVolume: lvp + mountPoint: /mnt/local-volumes/ + mountOpts: 'rw,noatime,nodiratime,lazytime,nobarrier,commit=240,data=ordered' + preDeployScript: | + #!/bin/bash -ex + echo $(date) 'pre_deploy_script done' >> /root/pre_deploy_done + postDeployScript: | + #!/bin/bash -ex + echo "root:r00tme" | sudo chpasswd + echo "blacklist acpi_power_meter" > /etc/modprobe.d/hwmon.conf + ln -sf /dev/null /etc/systemd/system/ondemand.service + echo $(date) 'post_deploy_script done' >> /root/post_deploy_done + grubConfig: + defaultGrubOptions: + - 'GRUB_DISABLE_RECOVERY="true"' + - 'GRUB_PRELOAD_MODULES=lvm' + - 'GRUB_TIMEOUT=20' + - 'GRUB_TERMINAL_INPUT="console serial"' + - 'GRUB_TERMINAL_OUTPUT="gfxterm serial"' + - 'GRUB_SERIAL_COMMAND="serial --unit=0 --speed=9600"' + - 'GRUB_CMDLINE_LINUX="noibrs noibpb nopti nospectre_v2 nospectre_v1 l1tf=off nospec_store_bypass_disable no_stf_barrier mds=off tsx=on tsx_async_abort=off mitigations=off systemd.journald.forward_to_console=yes console=ttyS0,9600 console=tty0"' + kernelParameters: + sysctl: + kernel.dmesg_restrict: "1" + kernel.core_uses_pid: "1" + fs.file-max: "9223372036854775807" + fs.aio-max-nr: "1048576" + fs.inotify.max_user_instances: "4096" + vm.max_map_count: "262144" + net.ipv4.conf.all.rp_filter: "0" + net.ipv4.conf.default.rp_filter: "0" diff --git a/templates/2.28.0/child/baremetalhosts.yaml.template b/templates/2.28.0/child/baremetalhosts.yaml.template new file mode 100644 index 0000000..843965a --- /dev/null +++ b/templates/2.28.0/child/baremetalhosts.yaml.template @@ -0,0 +1,180 @@ +--- +apiVersion: kaas.mirantis.com/v1alpha1 +kind: BareMetalHostCredential +metadata: + name: child-control-0-bmc-credentials + namespace: {{ MCC_CHILD_CLUSTER_NAMESPACE }} + labels: + kaas.mirantis.com/provider: baremetal +spec: + username: "admin" + password: + value: "password" +--- +apiVersion: kaas.mirantis.com/v1alpha1 +kind: BareMetalHostCredential +metadata: + name: child-control-1-bmc-credentials + namespace: {{ MCC_CHILD_CLUSTER_NAMESPACE }} + labels: + kaas.mirantis.com/provider: baremetal +spec: + username: "admin" + password: + value: "password" +--- +apiVersion: kaas.mirantis.com/v1alpha1 +kind: BareMetalHostCredential +metadata: + name: child-control-2-bmc-credentials + namespace: {{ MCC_CHILD_CLUSTER_NAMESPACE }} + labels: + kaas.mirantis.com/provider: baremetal +spec: + username: "admin" + password: + value: "password" +--- +apiVersion: kaas.mirantis.com/v1alpha1 +kind: BareMetalHostCredential +metadata: + name: child-worker-0-bmc-credentials + namespace: {{ MCC_CHILD_CLUSTER_NAMESPACE }} + labels: + kaas.mirantis.com/provider: baremetal +spec: + username: "admin" + password: + value: "password" +--- +apiVersion: kaas.mirantis.com/v1alpha1 +kind: BareMetalHostCredential +metadata: + name: child-worker-1-bmc-credentials + namespace: {{ MCC_CHILD_CLUSTER_NAMESPACE }} + labels: + kaas.mirantis.com/provider: baremetal +spec: + username: "admin" + password: + value: "password" +--- +apiVersion: kaas.mirantis.com/v1alpha1 +kind: BareMetalHostCredential +metadata: + name: child-worker-2-bmc-credentials + namespace: {{ MCC_CHILD_CLUSTER_NAMESPACE }} + labels: + kaas.mirantis.com/provider: baremetal +spec: + username: "admin" + password: + value: "password" +--- +apiVersion: metal3.io/v1alpha1 +kind: BareMetalHost +metadata: + name: child-control-0 + namespace: {{ MCC_CHILD_CLUSTER_NAMESPACE }} + labels: + kaas.mirantis.com/provider: baremetal + baremetal: hw-child-control-0 + annotations: + "kaas.mirantis.com/baremetalhost-credentials-name": "child-control-0-bmc-credentials" +spec: + bootMode: legacy + online: true + bootMACAddress: "{{ child_control_mac_address_0 }}" + bmc: + address: 127.0.0.1:6240 + credentialsName: '' +--- +apiVersion: metal3.io/v1alpha1 +kind: BareMetalHost +metadata: + name: child-control-1 + namespace: {{ MCC_CHILD_CLUSTER_NAMESPACE }} + labels: + kaas.mirantis.com/provider: baremetal + baremetal: hw-child-control-1 + annotations: + "kaas.mirantis.com/baremetalhost-credentials-name": "child-control-1-bmc-credentials" +spec: + bootMode: legacy + online: true + bootMACAddress: "{{ child_control_mac_address_1 }}" + bmc: + address: 127.0.0.1:6241 + credentialsName: '' +--- +apiVersion: metal3.io/v1alpha1 +kind: BareMetalHost +metadata: + name: child-control-2 + namespace: {{ MCC_CHILD_CLUSTER_NAMESPACE }} + labels: + kaas.mirantis.com/provider: baremetal + baremetal: hw-child-control-2 + annotations: + "kaas.mirantis.com/baremetalhost-credentials-name": "child-control-2-bmc-credentials" +spec: + bootMode: legacy + online: true + bootMACAddress: "{{ child_control_mac_address_2 }}" + bmc: + address: 127.0.0.1:6242 + credentialsName: '' +--- +apiVersion: metal3.io/v1alpha1 +kind: BareMetalHost +metadata: + name: child-worker-0 + namespace: {{ MCC_CHILD_CLUSTER_NAMESPACE }} + labels: + kaas.mirantis.com/provider: baremetal + baremetal: hw-child-worker-0 + annotations: + "kaas.mirantis.com/baremetalhost-credentials-name": "child-worker-0-bmc-credentials" +spec: + bootMode: legacy + online: true + bootMACAddress: "{{ child_worker_mac_address_0 }}" + bmc: + address: 127.0.0.1:6250 + credentialsName: '' +--- +apiVersion: metal3.io/v1alpha1 +kind: BareMetalHost +metadata: + name: child-worker-1 + namespace: {{ MCC_CHILD_CLUSTER_NAMESPACE }} + labels: + kaas.mirantis.com/provider: baremetal + baremetal: hw-child-worker-1 + annotations: + "kaas.mirantis.com/baremetalhost-credentials-name": "child-worker-1-bmc-credentials" +spec: + bootMode: legacy + online: true + bootMACAddress: "{{ child_worker_mac_address_1 }}" + bmc: + address: 127.0.0.1:6251 + credentialsName: '' +--- +apiVersion: metal3.io/v1alpha1 +kind: BareMetalHost +metadata: + name: child-worker-2 + namespace: {{ MCC_CHILD_CLUSTER_NAMESPACE }} + labels: + kaas.mirantis.com/provider: baremetal + baremetal: hw-child-worker-2 + annotations: + "kaas.mirantis.com/baremetalhost-credentials-name": "child-worker-2-bmc-credentials" +spec: + bootMode: legacy + online: true + bootMACAddress: "{{ child_worker_mac_address_2 }}" + bmc: + address: 127.0.0.1:6252 + credentialsName: '' diff --git a/templates/2.28.0/child/certs/ca-config.json b/templates/2.28.0/child/certs/ca-config.json new file mode 100644 index 0000000..dcf3f28 --- /dev/null +++ b/templates/2.28.0/child/certs/ca-config.json @@ -0,0 +1,18 @@ +{ + "signing": { + "default": { + "expiry": "8760h" + }, + "profiles": { + "kubernetes": { + "usages": [ + "signing", + "key encipherment", + "server auth", + "client auth" + ], + "expiry": "8760h" + } + } + } +} diff --git a/templates/2.28.0/child/certs/ca-csr.json b/templates/2.28.0/child/certs/ca-csr.json new file mode 100644 index 0000000..c6b7c23 --- /dev/null +++ b/templates/2.28.0/child/certs/ca-csr.json @@ -0,0 +1,14 @@ +{ + "CN": "kubernetes", + "key": { + "algo": "rsa", + "size": 2048 + }, + "names":[{ + "C": "", + "ST": "", + "L": "", + "O": "", + "OU": "" + }] +} diff --git a/templates/2.28.0/child/certs/create_secrets.sh b/templates/2.28.0/child/certs/create_secrets.sh new file mode 100755 index 0000000..a7fa221 --- /dev/null +++ b/templates/2.28.0/child/certs/create_secrets.sh @@ -0,0 +1,29 @@ +#!/usr/bin/env bash + +set -eou pipefail + +KUBECONFIG="${KUBECONFIG:=""}" +KUBECTL_BIN="${KUBECTL_BIN:="/home/mcc-user/kaas-bootstrap/bin/kubectl"}" + +if [ -z "${KUBECONFIG}" ]; then + echo "Error: KUBECONFIG must be provided" + exit 1 +fi + +script_dir="$(dirname "${BASH_SOURCE[0]}")" +pushd "${script_dir}" || true + +cfssl gencert -initca ca-csr.json | cfssljson -bare ca + +cfssl gencert -ca=ca.pem \ + -ca-key=ca-key.pem \ + --config=ca-config.json \ + -profile=kubernetes server-csr.json | cfssljson -bare server + +${KUBECTL_BIN} -n openstack create secret generic osh-dev-hidden \ + --from-file=ca_cert=ca.pem \ + --from-file=api_cert=server.pem \ + --from-file=api_key=server-key.pem +${KUBECTL_BIN} -n openstack label secret osh-dev-hidden "openstack.lcm.mirantis.com/osdpl_secret=true" + +echo "Openstack certificates has been created successfully" diff --git a/templates/2.28.0/child/certs/server-csr.json b/templates/2.28.0/child/certs/server-csr.json new file mode 100644 index 0000000..868f9ed --- /dev/null +++ b/templates/2.28.0/child/certs/server-csr.json @@ -0,0 +1,15 @@ +{ + "CN": "*.it.just.works", + "hosts": [ + "*.it.just.works" + ], + "key": { + "algo": "rsa", + "size": 2048 + }, + "names": [ { + "C": "US", + "L": "CA", + "ST": "San Francisco" + }] +} diff --git a/templates/2.28.0/child/cluster.yaml.template b/templates/2.28.0/child/cluster.yaml.template new file mode 100644 index 0000000..82b4172 --- /dev/null +++ b/templates/2.28.0/child/cluster.yaml.template @@ -0,0 +1,40 @@ +apiVersion: cluster.k8s.io/v1alpha1 +kind: Cluster +metadata: + labels: + kaas.mirantis.com/provider: baremetal + name: {{ MCC_CHILD_CLUSTER_NAME }} + namespace: {{ MCC_CHILD_CLUSTER_NAMESPACE }} +spec: + clusterNetwork: + pods: + cidrBlocks: + - 192.168.0.0/16 + services: + cidrBlocks: + - 10.232.0.0/18 + providerSpec: + value: + apiVersion: baremetal.k8s.io/v1alpha1 + kind: BaremetalClusterProviderSpec + dedicatedControlPlane: false + dedicatedMetallbPools: false + helmReleases: + - name: ceph-controller + values: {} + - name: stacklight + values: + highAvailabilityEnabled: false + logging: + enabled: false + prometheusServer: + customAlerts: [] + persistentVolumeClaimSize: 16Gi + retentionSize: 15GB + retentionTime: 15d + watchDogAlertEnabled: false + - name: metallb + values: {} + publicKeys: + - name: user-key + release: {{ MCC_CHILD_CLUSTER_RELEASE }} diff --git a/templates/2.28.0/child/hack/coredns.cm.hosts b/templates/2.28.0/child/hack/coredns.cm.hosts new file mode 100644 index 0000000..d2783e4 --- /dev/null +++ b/templates/2.28.0/child/hack/coredns.cm.hosts @@ -0,0 +1,16 @@ + hosts /etc/coredns/{{ MCC_OPENSTACK_PUBLIC_DOMAIN }} {{ MCC_OPENSTACK_PUBLIC_DOMAIN }} { + {{ NETWORK_LCM_METALLB_OPENSTACK_ADDRESS }} barbican.{{ MCC_OPENSTACK_PUBLIC_DOMAIN }} + {{ NETWORK_LCM_METALLB_OPENSTACK_ADDRESS }} cinder.{{ MCC_OPENSTACK_PUBLIC_DOMAIN }} + {{ NETWORK_LCM_METALLB_OPENSTACK_ADDRESS }} cloudformation.{{ MCC_OPENSTACK_PUBLIC_DOMAIN }} + {{ NETWORK_LCM_METALLB_OPENSTACK_ADDRESS }} designate.{{ MCC_OPENSTACK_PUBLIC_DOMAIN }} + {{ NETWORK_LCM_METALLB_OPENSTACK_ADDRESS }} glance.{{ MCC_OPENSTACK_PUBLIC_DOMAIN }} + {{ NETWORK_LCM_METALLB_OPENSTACK_ADDRESS }} heat.{{ MCC_OPENSTACK_PUBLIC_DOMAIN }} + {{ NETWORK_LCM_METALLB_OPENSTACK_ADDRESS }} horizon.{{ MCC_OPENSTACK_PUBLIC_DOMAIN }} + {{ NETWORK_LCM_METALLB_OPENSTACK_ADDRESS }} keystone.{{ MCC_OPENSTACK_PUBLIC_DOMAIN }} + {{ NETWORK_LCM_METALLB_OPENSTACK_ADDRESS }} metadata.{{ MCC_OPENSTACK_PUBLIC_DOMAIN }} + {{ NETWORK_LCM_METALLB_OPENSTACK_ADDRESS }} neutron.{{ MCC_OPENSTACK_PUBLIC_DOMAIN }} + {{ NETWORK_LCM_METALLB_OPENSTACK_ADDRESS }} nova.{{ MCC_OPENSTACK_PUBLIC_DOMAIN }} + {{ NETWORK_LCM_METALLB_OPENSTACK_ADDRESS }} novncproxy.{{ MCC_OPENSTACK_PUBLIC_DOMAIN }} + {{ NETWORK_LCM_METALLB_OPENSTACK_ADDRESS }} octavia.{{ MCC_OPENSTACK_PUBLIC_DOMAIN }} + {{ NETWORK_LCM_METALLB_OPENSTACK_ADDRESS }} placement.{{ MCC_OPENSTACK_PUBLIC_DOMAIN }} + } \ No newline at end of file diff --git a/templates/2.28.0/child/hack/coredns.cm.template b/templates/2.28.0/child/hack/coredns.cm.template new file mode 100644 index 0000000..eb2d686 --- /dev/null +++ b/templates/2.28.0/child/hack/coredns.cm.template @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: coredns + namespace: kube-system +data: + Corefile: | +{{ coredns_cm_content }} diff --git a/templates/2.28.0/child/ipam-objects.yaml.template b/templates/2.28.0/child/ipam-objects.yaml.template new file mode 100644 index 0000000..1f495e7 --- /dev/null +++ b/templates/2.28.0/child/ipam-objects.yaml.template @@ -0,0 +1,82 @@ + +--- +apiVersion: "ipam.mirantis.com/v1alpha1" +kind: Subnet +metadata: + name: child-k8s-api-lb + namespace: {{ MCC_CHILD_CLUSTER_NAMESPACE }} + labels: + kaas.mirantis.com/provider: baremetal + cluster.sigs.k8s.io/cluster-name: {{ MCC_CHILD_CLUSTER_NAME }} + ipam/SVC-LBhost: "presents" +spec: + cidr: {{ NETWORK_LCM_CHILD_LB_HOST }}/32 + useWholeCidr: true + +--- +apiVersion: "ipam.mirantis.com/v1alpha1" +kind: Subnet +metadata: + name: child-lcm + namespace: {{ MCC_CHILD_CLUSTER_NAMESPACE }} + labels: + kaas.mirantis.com/provider: baremetal + cluster.sigs.k8s.io/cluster-name: {{ MCC_CHILD_CLUSTER_NAME }} + ipam/SVC-k8s-lcm: "presents" +spec: + cidr: {{ NETWORK_LCM_SUBNET }} + gateway: {{ NETWORK_LCM_GATEWAY }} + nameservers: + {%- for server in NAMESERVERS.split(',') %} + - {{ server -}} + {% endfor %} + includeRanges: + - {{ NETWORK_LCM_STATIC_RANGE_CHILD }} + +--- +apiVersion: ipam.mirantis.com/v1alpha1 +kind: L2Template +metadata: + name: default-child + namespace: {{ MCC_CHILD_CLUSTER_NAMESPACE }} + labels: + kaas.mirantis.com/provider: baremetal + cluster.sigs.k8s.io/cluster-name: {{ MCC_CHILD_CLUSTER_NAME }} + ipam/DefaultForCluster: "1" +spec: + autoIfMappingPrio: + - eth + - eno + - ens + - enp + l3Layout: + - scope: namespace + subnetName: child-lcm + labelSelector: + kaas.mirantis.com/provider: baremetal + ipam/SVC-k8s-lcm: "presents" +{#- protect go-template below from Jinja #} +{%- raw %} + npTemplate: | + version: 2 + renderer: networkd + ethernets: + {{ nic 0 }}: + addresses: + - {{ ip "0:child-lcm" }} + dhcp4: false + dhcp6: false + gateway4: {{ gateway_from_subnet "child-lcm" }} + match: + macaddress: {{ mac 0 }} + nameservers: + addresses: {{ nameservers_from_subnet "child-lcm" }} + set-name: mcc-lcm + {{ nic 1 }}: + dhcp4: false + dhcp6: false + match: + macaddress: {{ mac 1 }} + set-name: mcc-openstack +{%- endraw %} +{#- end protect go-template below from Jinja #} diff --git a/templates/2.28.0/child/kaascephcluster.yaml.template b/templates/2.28.0/child/kaascephcluster.yaml.template new file mode 100644 index 0000000..c297a6c --- /dev/null +++ b/templates/2.28.0/child/kaascephcluster.yaml.template @@ -0,0 +1,99 @@ +apiVersion: kaas.mirantis.com/v1alpha1 +kind: KaaSCephCluster +metadata: + name: ceph-{{ MCC_CHILD_CLUSTER_NAME }} + namespace: {{ MCC_CHILD_CLUSTER_NAMESPACE }} +spec: + cephClusterSpec: + network: + clusterNet: "{{ NETWORK_LCM_SUBNET }}" + publicNet: "{{ NETWORK_LCM_SUBNET }}" + nodes: + child-control-0: + roles: + - mgr + - mon + - osd + storageDevices: + - config: + deviceClass: ssd + name: sdb + child-control-1: + roles: + - mon + - mgr + - osd + storageDevices: + - config: + deviceClass: ssd + name: sdb + child-control-2: + roles: + - mgr + - mon + - osd + storageDevices: + - config: + deviceClass: ssd + name: sdb + objectStorage: + rgw: + dataPool: + deviceClass: ssd + replicated: + size: 2 + failureDomain: host + gateway: + allNodes: false + instances: 2 + port: 80 + securePort: 8443 + metadataPool: + deviceClass: ssd + failureDomain: host + replicated: + size: 2 + name: openstack-store + preservePoolsOnDelete: false + pools: + - default: true + deviceClass: ssd + name: kubernetes + replicated: + size: 2 + role: kubernetes + - default: false + deviceClass: ssd + name: volumes + replicated: + size: 2 + role: volumes + - default: false + deviceClass: ssd + name: vms + replicated: + size: 2 + role: vms + - default: false + deviceClass: ssd + name: backup + replicated: + size: 2 + role: backup + - default: false + deviceClass: ssd + name: images + replicated: + size: 2 + role: images + - default: false + deviceClass: ssd + name: other + replicated: + size: 2 + role: other + rookConfig: + osd_pool_default_size: "2" + k8sCluster: + name: {{ MCC_CHILD_CLUSTER_NAME }} + namespace: {{ MCC_CHILD_CLUSTER_NAMESPACE }} diff --git a/templates/2.28.0/child/machines.yaml.template b/templates/2.28.0/child/machines.yaml.template new file mode 100644 index 0000000..4f852bd --- /dev/null +++ b/templates/2.28.0/child/machines.yaml.template @@ -0,0 +1,111 @@ +apiVersion: "cluster.k8s.io/v1alpha1" +kind: MachineList +items: +- apiVersion: "cluster.k8s.io/v1alpha1" + kind: Machine + metadata: + name: child-control-0 + namespace: {{ MCC_CHILD_CLUSTER_NAMESPACE }} + labels: &cp_control_labels + kaas.mirantis.com/provider: baremetal + cluster.sigs.k8s.io/cluster-name: {{ MCC_CHILD_CLUSTER_NAME }} + cluster.sigs.k8s.io/control-plane: "true" + spec: + providerSpec: + value: &cp_control_values + apiVersion: "baremetal.k8s.io/v1alpha1" + kind: "BareMetalMachineProviderSpec" + hostSelector: + matchLabels: + baremetal: hw-child-control-0 + nodeLabels: + - key: openstack-control-plane + value: enabled + - key: openvswitch + value: enabled + - key: openstack-gateway + value: enabled + +- apiVersion: "cluster.k8s.io/v1alpha1" + kind: Machine + metadata: + name: child-control-1 + namespace: {{ MCC_CHILD_CLUSTER_NAMESPACE }} + labels: + <<: *cp_control_labels + spec: + providerSpec: + value: + <<: *cp_control_values + hostSelector: + matchLabels: + baremetal: hw-child-control-1 + +- apiVersion: "cluster.k8s.io/v1alpha1" + kind: Machine + metadata: + name: child-control-2 + namespace: {{ MCC_CHILD_CLUSTER_NAMESPACE }} + labels: + <<: *cp_control_labels + spec: + providerSpec: + value: + <<: *cp_control_values + hostSelector: + matchLabels: + baremetal: hw-child-control-2 + +- apiVersion: "cluster.k8s.io/v1alpha1" + kind: Machine + metadata: + name: child-worker-0 + namespace: {{ MCC_CHILD_CLUSTER_NAMESPACE }} + labels: &cp_worker_labels + kaas.mirantis.com/provider: baremetal + cluster.sigs.k8s.io/cluster-name: {{ MCC_CHILD_CLUSTER_NAME }} + spec: + providerSpec: + value: &cp_worker_values + apiVersion: "baremetal.k8s.io/v1alpha1" + kind: "BareMetalMachineProviderSpec" + hostSelector: + matchLabels: + baremetal: hw-child-worker-0 + nodeLabels: + - key: stacklight + value: enabled + - key: openstack-compute-node + value: enabled + - key: openvswitch + value: enabled + +- apiVersion: "cluster.k8s.io/v1alpha1" + kind: Machine + metadata: + name: child-worker-1 + namespace: {{ MCC_CHILD_CLUSTER_NAMESPACE }} + labels: + <<: *cp_worker_labels + spec: + providerSpec: + value: + <<: *cp_worker_values + hostSelector: + matchLabels: + baremetal: hw-child-worker-1 + +- apiVersion: "cluster.k8s.io/v1alpha1" + kind: Machine + metadata: + name: child-worker-2 + namespace: {{ MCC_CHILD_CLUSTER_NAMESPACE }} + labels: + <<: *cp_worker_labels + spec: + providerSpec: + value: + <<: *cp_worker_values + hostSelector: + matchLabels: + baremetal: hw-child-worker-2 diff --git a/templates/2.28.0/child/metallbconfig.yaml.template b/templates/2.28.0/child/metallbconfig.yaml.template new file mode 100644 index 0000000..173cdf0 --- /dev/null +++ b/templates/2.28.0/child/metallbconfig.yaml.template @@ -0,0 +1,44 @@ +--- +apiVersion: kaas.mirantis.com/v1alpha1 +kind: MetalLBConfig +metadata: + labels: + kaas.mirantis.com/provider: baremetal + cluster.sigs.k8s.io/cluster-name: {{ MCC_CHILD_CLUSTER_NAME }} + name: {{ MCC_CHILD_CLUSTER_NAME }}-metallb + namespace: {{ MCC_CHILD_CLUSTER_NAMESPACE }} +spec: + ipAddressPools: + - name: default + spec: + addresses: + - {{ NETWORK_LCM_METALLB_RANGE_CHILD }} + autoAssign: true + avoidBuggyIPs: false + - name: openstack-lb + spec: + addresses: + - {{ NETWORK_LCM_METALLB_OPENSTACK_ADDRESS }}/32 + autoAssign: true + serviceAllocation: + namespaces: + - openstack + priority: 42 + serviceSelectors: + - matchExpressions: + - key: application + operator: In + values: + - ingress + - key: component + operator: In + values: + - server + l2Advertisements: + - name: default + spec: + interfaces: + - mcc-lcm + ipAddressPools: + - default + - openstack-lb diff --git a/templates/2.28.0/child/osdpl.yaml.template b/templates/2.28.0/child/osdpl.yaml.template new file mode 100644 index 0000000..cc69044 --- /dev/null +++ b/templates/2.28.0/child/osdpl.yaml.template @@ -0,0 +1,59 @@ +apiVersion: lcm.mirantis.com/v1alpha1 +kind: OpenStackDeployment +metadata: + name: osh-dev + namespace: openstack +spec: + internal_domain_name: cluster.local + public_domain_name: {{ MCC_OPENSTACK_PUBLIC_DOMAIN }} + openstack_version: {{ MCC_CHILD_OPENSTACK_RELEASE }} + local_volume_storage_class: openstack-operator-bind-mounts + persistent_volume_storage_class: kubernetes-ssd + preset: compute + size: tiny + features: + ssl: + public_endpoints: + api_cert: + value_from: + secret_key_ref: + key: api_cert + name: osh-dev-hidden + api_key: + value_from: + secret_key_ref: + key: api_key + name: osh-dev-hidden + ca_cert: + value_from: + secret_key_ref: + key: ca_cert + name: osh-dev-hidden + nova: + live_migration_interface: mcc-lcm + images: + backend: ceph + encryption: + enabled: false + neutron: + dns_servers: + {%- for server in NAMESERVERS.split(',') %} + - {{ server -}} + {% endfor %} + external_networks: + - physnet: physnet1 + interface: mcc-openstack + bridge: br-ex + network_types: + - flat + vlan_ranges: null + mtu: null + floating_network: + physnet: physnet1 + enabled: True + subnet: + gateway: "{{ NETWORK_OPENSTACK_GATEWAY }}" + pool_start: "{{ network_openstack_range_start }}" + pool_end: "{{ network_openstack_range_end }}" + range: "{{ NETWORK_OPENSTACK_SUBNET }}" + tunnel_interface: mcc-lcm diff --git a/templates/2.28.0/child/sshkey.yaml.template b/templates/2.28.0/child/sshkey.yaml.template new file mode 100644 index 0000000..250a197 --- /dev/null +++ b/templates/2.28.0/child/sshkey.yaml.template @@ -0,0 +1,8 @@ +apiVersion: kaas.mirantis.com/v1alpha1 +kind: PublicKey +metadata: + name: user-key + namespace: {{ MCC_CHILD_CLUSTER_NAMESPACE }} +spec: + publicKey: | + {{ MCC_SSH_PUBLIC_KEY }} diff --git a/templates/2.28.0/management/baremetalhostprofiles.yaml.template b/templates/2.28.0/management/baremetalhostprofiles.yaml.template new file mode 100644 index 0000000..9b828b6 --- /dev/null +++ b/templates/2.28.0/management/baremetalhostprofiles.yaml.template @@ -0,0 +1,84 @@ +apiVersion: metal3.io/v1alpha1 +metadata: + namespace: default + name: default-simple + labels: + kaas.mirantis.com/defaultBMHProfile: 'true' +kind: BareMetalHostProfile +spec: + devices: + - device: + minSize: 120Gi + wipeDevice: + eraseMetadata: + enabled: true + partitions: + - name: bios_grub + size: 4Mi + partflags: ['bios_grub'] + - name: uefi + partflags: ['esp'] + size: 200Mi + - name: config-2 + # Size of this partition is limited to 64Mb. + size: 64Mi + - name: lvm_root_part + size: 80Gi + - name: lvm_lvp_part + size: 0 + volumeGroups: + - name: lvm_root + devices: + - partition: lvm_root_part + - name: lvm_lvp + devices: + - partition: lvm_lvp_part + logicalVolumes: + - name: root + vg: lvm_root + size: 0 + - name: lvp + vg: lvm_lvp + size: 0 + fileSystems: + - fileSystem: vfat + partition: config-2 + - fileSystem: vfat + partition: uefi + mountPoint: /boot/efi + - fileSystem: ext4 + logicalVolume: root + mountPoint: / + mountOpts: 'rw,noatime,nodiratime,lazytime,nobarrier,commit=240,data=ordered' + - fileSystem: ext4 + logicalVolume: lvp + mountPoint: /mnt/local-volumes/ + mountOpts: 'rw,noatime,nodiratime,lazytime,nobarrier,commit=240,data=ordered' + preDeployScript: | + #!/bin/bash -ex + echo $(date) 'pre_deploy_script done' >> /root/pre_deploy_done + postDeployScript: | + #!/bin/bash -ex + echo "root:r00tme" | sudo chpasswd + echo "blacklist acpi_power_meter" > /etc/modprobe.d/hwmon.conf + ln -sf /dev/null /etc/systemd/system/ondemand.service + echo $(date) 'post_deploy_script done' >> /root/post_deploy_done + grubConfig: + defaultGrubOptions: + - 'GRUB_DISABLE_RECOVERY="true"' + - 'GRUB_PRELOAD_MODULES=lvm' + - 'GRUB_TIMEOUT=20' + - 'GRUB_TERMINAL_INPUT="console serial"' + - 'GRUB_TERMINAL_OUTPUT="gfxterm serial"' + - 'GRUB_SERIAL_COMMAND="serial --unit=0 --speed=9600"' + - 'GRUB_CMDLINE_LINUX="noibrs noibpb nopti nospectre_v2 nospectre_v1 l1tf=off nospec_store_bypass_disable no_stf_barrier mds=off tsx=on tsx_async_abort=off mitigations=off systemd.journald.forward_to_console=yes console=ttyS0,9600 console=tty0"' + kernelParameters: + sysctl: + kernel.dmesg_restrict: "1" + kernel.core_uses_pid: "1" + fs.file-max: "9223372036854775807" + fs.aio-max-nr: "1048576" + fs.inotify.max_user_instances: "4096" + vm.max_map_count: "262144" + net.ipv4.conf.all.rp_filter: "0" + net.ipv4.conf.default.rp_filter: "0" diff --git a/templates/2.28.0/management/baremetalhosts.yaml.template b/templates/2.28.0/management/baremetalhosts.yaml.template new file mode 100644 index 0000000..bf50e10 --- /dev/null +++ b/templates/2.28.0/management/baremetalhosts.yaml.template @@ -0,0 +1,87 @@ +--- +apiVersion: kaas.mirantis.com/v1alpha1 +kind: BareMetalHostCredential +metadata: + name: master-0-bmc-credentials + namespace: default + labels: + kaas.mirantis.com/provider: baremetal +spec: + username: "admin" + password: + value: "password" +--- +apiVersion: kaas.mirantis.com/v1alpha1 +kind: BareMetalHostCredential +metadata: + name: master-1-bmc-credentials + namespace: default + labels: + kaas.mirantis.com/provider: baremetal +spec: + username: "admin" + password: + value: "password" +--- +apiVersion: kaas.mirantis.com/v1alpha1 +kind: BareMetalHostCredential +metadata: + name: master-2-bmc-credentials + namespace: default + labels: + kaas.mirantis.com/provider: baremetal +spec: + username: "admin" + password: + value: "password" +--- +apiVersion: metal3.io/v1alpha1 +kind: BareMetalHost +metadata: + name: master-0 + labels: + kaas.mirantis.com/provider: baremetal + baremetal: hw-master-0 + annotations: + "kaas.mirantis.com/baremetalhost-credentials-name": "master-0-bmc-credentials" +spec: + bootMode: legacy + online: true + bootMACAddress: "{{ mgmt_node_mac_address_0 }}" + bmc: + address: 127.0.0.1:6230 + credentialsName: '' +--- +apiVersion: metal3.io/v1alpha1 +kind: BareMetalHost +metadata: + name: master-1 + labels: + kaas.mirantis.com/provider: baremetal + baremetal: hw-master-1 + annotations: + "kaas.mirantis.com/baremetalhost-credentials-name": "master-1-bmc-credentials" +spec: + bootMode: legacy + online: true + bootMACAddress: "{{ mgmt_node_mac_address_1 }}" + bmc: + address: 127.0.0.1:6231 + credentialsName: '' +--- +apiVersion: metal3.io/v1alpha1 +kind: BareMetalHost +metadata: + name: master-2 + labels: + kaas.mirantis.com/provider: baremetal + baremetal: hw-master-2 + annotations: + "kaas.mirantis.com/baremetalhost-credentials-name": "master-2-bmc-credentials" +spec: + bootMode: legacy + online: true + bootMACAddress: "{{ mgmt_node_mac_address_2 }}" + bmc: + address: 127.0.0.1:6232 + credentialsName: '' diff --git a/templates/2.28.0/management/bootstrapregion.yaml.template b/templates/2.28.0/management/bootstrapregion.yaml.template new file mode 100644 index 0000000..687e29b --- /dev/null +++ b/templates/2.28.0/management/bootstrapregion.yaml.template @@ -0,0 +1,7 @@ +apiVersion: kaas.mirantis.com/v1alpha1 +kind: BootstrapRegion +metadata: + name: region-one + namespace: default +spec: + provider: baremetal diff --git a/templates/2.28.0/management/cluster.yaml.template b/templates/2.28.0/management/cluster.yaml.template new file mode 100644 index 0000000..b25d547 --- /dev/null +++ b/templates/2.28.0/management/cluster.yaml.template @@ -0,0 +1,74 @@ +--- +apiVersion: cluster.k8s.io/v1alpha1 +kind: Cluster +metadata: + name: {{ MCC_MGMT_CLUSTER_NAME }} + labels: + kaas.mirantis.com/provider: baremetal +spec: + clusterNetwork: + services: + cidrBlocks: + - 10.233.0.0/18 + pods: + cidrBlocks: + - 10.233.64.0/18 + providerSpec: + value: + apiVersion: baremetal.k8s.io/v1alpha1 + kind: BaremetalClusterProviderSpec + nodeCidr: 10.10.10.0/24 + dedicatedControlPlane: false + dedicatedMetallbPools: true + helmReleases: + - name: metallb + values: {} + - name: stacklight + values: + elasticsearch: + persistentVolumeClaimSize: 30Gi + highAvailabilityEnabled: true + logging: + enabled: false + prometheusServer: + persistentVolumeClaimSize: 16Gi + publicKeys: + - name: user-key + - name: bootstrap-key + kaas: + regional: + - provider: baremetal + helmReleases: + - name: baremetal-provider + values: + config: + customHostnamesEnabled: true + {%- if NTP_SERVERS is defined and NTP_SERVERS != '' %} + lcm: + ntp: + servers: + {%- for server in NTP_SERVERS.split(',') %} + - {{ server -}} + {% endfor %} + {%- endif %} + - name: baremetal-operator + values: + dhcp_relay: + enable: true + virtualbmc_vsphere: + enabled: true + dnsmasq: + dynamic_bootp: true + - name: kaas-ipam + values: {} + management: + enabled: true + {%- if MCC_RELEASES_URL is defined and MCC_RELEASES_URL != '' %} + helmReleases: + - name: release-controller + values: + releasesBaseUrl: {{ MCC_RELEASES_URL }}/releases + - name: diagnostic-controller + values: + releasesBaseUrl: {{ MCC_RELEASES_URL }}/releases + {%- endif %} diff --git a/templates/2.28.0/management/ipam-objects.yaml.template b/templates/2.28.0/management/ipam-objects.yaml.template new file mode 100644 index 0000000..e0b49a6 --- /dev/null +++ b/templates/2.28.0/management/ipam-objects.yaml.template @@ -0,0 +1,110 @@ +--- +apiVersion: "ipam.mirantis.com/v1alpha1" +kind: Subnet +metadata: + name: mgmt-pxe + namespace: default + labels: + kaas.mirantis.com/provider: baremetal + ipam/SVC-dhcp-range: "presents" +spec: + cidr: {{ NETWORK_PXE_SUBNET }} + includeRanges: + - {{ NETWORK_PXE_DHCP_RANGE }} + +--- +apiVersion: "ipam.mirantis.com/v1alpha1" +kind: Subnet +metadata: + name: mgmt-pxe-nics + namespace: default + labels: + kaas.mirantis.com/provider: baremetal + cluster.sigs.k8s.io/cluster-name: {{ MCC_MGMT_CLUSTER_NAME }} + ipam/SVC-pxe-nics: "presents" +spec: + cidr: {{ NETWORK_PXE_SUBNET }} + includeRanges: + - {{ NETWORK_PXE_STATIC_RANGE_MGMT }} + +--- +apiVersion: "ipam.mirantis.com/v1alpha1" +kind: Subnet +metadata: + name: mgmt-k8s-api-lb + namespace: default + labels: + kaas.mirantis.com/provider: baremetal + cluster.sigs.k8s.io/cluster-name: {{ MCC_MGMT_CLUSTER_NAME }} + ipam/SVC-LBhost: "presents" +spec: + cidr: {{ NETWORK_LCM_MGMT_LB_HOST }}/32 + useWholeCidr: true + +--- +apiVersion: "ipam.mirantis.com/v1alpha1" +kind: Subnet +metadata: + name: mgmt-lcm + namespace: default + labels: + kaas.mirantis.com/provider: baremetal + cluster.sigs.k8s.io/cluster-name: {{ MCC_MGMT_CLUSTER_NAME }} + ipam/SVC-k8s-lcm: "presents" +spec: + cidr: {{ NETWORK_LCM_SUBNET }} + gateway: {{ NETWORK_LCM_GATEWAY }} + nameservers: + {%- for server in NAMESERVERS.split(',') %} + - {{ server -}} + {% endfor %} + includeRanges: + - {{ NETWORK_LCM_STATIC_RANGE_MGMT }} + +--- +apiVersion: ipam.mirantis.com/v1alpha1 +kind: L2Template +metadata: + name: default + namespace: default + labels: + kaas.mirantis.com/provider: baremetal + cluster.sigs.k8s.io/cluster-name: {{ MCC_MGMT_CLUSTER_NAME }} + ipam/DefaultForCluster: "1" +spec: + autoIfMappingPrio: + - eth + - eno + - ens + - enp + l3Layout: + - scope: namespace + subnetName: mgmt-pxe-nics + labelSelector: + kaas.mirantis.com/provider: baremetal + ipam/SVC-pxe-nics: "presents" + - scope: namespace + subnetName: mgmt-lcm + labelSelector: + kaas.mirantis.com/provider: baremetal + ipam/SVC-k8s-lcm: "presents" +{#- protect go-template below from Jinja #} +{%- raw %} + npTemplate: | + version: 2 + renderer: networkd + ethernets: + {{ nic 0 }}: + addresses: + - {{ ip "0:mgmt-lcm" }} + - {{ ip "pxe:mgmt-pxe-nics" }} + dhcp4: false + dhcp6: false + gateway4: {{ gateway_from_subnet "mgmt-lcm" }} + match: + macaddress: {{ mac 0 }} + nameservers: + addresses: {{ nameservers_from_subnet "mgmt-lcm" }} + set-name: mcc-lcm +{%- endraw %} +{#- end protect go-template below from Jinja #} diff --git a/templates/2.28.0/management/machines.yaml.template b/templates/2.28.0/management/machines.yaml.template new file mode 100644 index 0000000..9b7c8cc --- /dev/null +++ b/templates/2.28.0/management/machines.yaml.template @@ -0,0 +1,47 @@ +apiVersion: "cluster.k8s.io/v1alpha1" +kind: MachineList +items: +- apiVersion: "cluster.k8s.io/v1alpha1" + kind: Machine + metadata: + name: master-0 + labels: &cp_labels + kaas.mirantis.com/provider: baremetal + cluster.sigs.k8s.io/cluster-name: {{ MCC_MGMT_CLUSTER_NAME }} + cluster.sigs.k8s.io/control-plane: "true" + spec: + providerSpec: + value: &cp_value + apiVersion: "baremetal.k8s.io/v1alpha1" + kind: "BareMetalMachineProviderSpec" + hostSelector: + matchLabels: + baremetal: hw-master-0 + +- apiVersion: "cluster.k8s.io/v1alpha1" + kind: Machine + metadata: + name: master-1 + labels: + <<: *cp_labels + spec: + providerSpec: + value: + <<: *cp_value + hostSelector: + matchLabels: + baremetal: hw-master-1 + +- apiVersion: "cluster.k8s.io/v1alpha1" + kind: Machine + metadata: + name: master-2 + labels: + <<: *cp_labels + spec: + providerSpec: + value: + <<: *cp_value + hostSelector: + matchLabels: + baremetal: hw-master-2 diff --git a/templates/2.28.0/management/metallbconfig.yaml.template b/templates/2.28.0/management/metallbconfig.yaml.template new file mode 100644 index 0000000..4166808 --- /dev/null +++ b/templates/2.28.0/management/metallbconfig.yaml.template @@ -0,0 +1,36 @@ +--- +apiVersion: kaas.mirantis.com/v1alpha1 +kind: MetalLBConfig +metadata: + labels: + kaas.mirantis.com/provider: baremetal + cluster.sigs.k8s.io/cluster-name: {{ MCC_MGMT_CLUSTER_NAME }} + name: {{ MCC_MGMT_CLUSTER_NAME }}-metallb + namespace: default +spec: + ipAddressPools: + - name: default + spec: + addresses: + - {{ NETWORK_LCM_METALLB_RANGE_MGMT }} + autoAssign: true + avoidBuggyIPs: false + - name: services-pxe + spec: + addresses: + - {{ NETWORK_PXE_METALLB_RANGE }} + autoAssign: false + avoidBuggyIPs: false + l2Advertisements: + - name: default + spec: + interfaces: + - mcc-lcm + ipAddressPools: + - default + - name: pxe + spec: + interfaces: + - mcc-lcm + ipAddressPools: + - services-pxe \ No newline at end of file diff --git a/templates/2.28.0/management/serviceusers.yaml.template b/templates/2.28.0/management/serviceusers.yaml.template new file mode 100644 index 0000000..3d492fb --- /dev/null +++ b/templates/2.28.0/management/serviceusers.yaml.template @@ -0,0 +1,10 @@ +apiVersion: kaas.mirantis.com/v1alpha1 +kind: ServiceUserList +items: +- apiVersion: kaas.mirantis.com/v1alpha1 + kind: ServiceUser + metadata: + name: serviceuser + spec: + password: + value: {{ MCC_SERVICEUSER_PASSWORD }} diff --git a/templates/2.28.0/management/sshkey.yaml.template b/templates/2.28.0/management/sshkey.yaml.template new file mode 100644 index 0000000..6dee3e4 --- /dev/null +++ b/templates/2.28.0/management/sshkey.yaml.template @@ -0,0 +1,8 @@ +apiVersion: kaas.mirantis.com/v1alpha1 +kind: PublicKey +metadata: + name: user-key + namespace: default +spec: + publicKey: | + {{ MCC_SSH_PUBLIC_KEY }} diff --git a/templates/2.28.0/management/vbmc.yaml.template b/templates/2.28.0/management/vbmc.yaml.template new file mode 100644 index 0000000..e0eac9f --- /dev/null +++ b/templates/2.28.0/management/vbmc.yaml.template @@ -0,0 +1,108 @@ +--- +apiVersion: metal3.io/v1alpha1 +kind: VBMC +metadata: + labels: + vbmc.metal3.io/active: "1" + name: vbmc-vsphere-config + namespace: kaas +spec: + default: + configDir: /vbmc + ipmi: + sessionTimeout: 20 + log: + debug: true + domains: + - username: admin + password: password + address: 127.0.0.1 + port: 6230 + domainName: {{ vm_name_prefix_tmpl }}mgmt-master-0 + vsphereAddress: {{ VSPHERE_SERVER }} + vsphereUsername: {{ VSPHERE_USERNAME }} + vspherePassword: {{ VSPHERE_PASSWORD }} + active: True + - username: admin + password: password + address: 127.0.0.1 + port: 6231 + domainName: {{ vm_name_prefix_tmpl }}mgmt-master-1 + vsphereAddress: {{ VSPHERE_SERVER }} + vsphereUsername: {{ VSPHERE_USERNAME }} + vspherePassword: {{ VSPHERE_PASSWORD }} + active: True + - username: admin + password: password + address: 127.0.0.1 + port: 6232 + domainName: {{ vm_name_prefix_tmpl }}mgmt-master-2 + vsphereAddress: {{ VSPHERE_SERVER }} + vsphereUsername: {{ VSPHERE_USERNAME }} + vspherePassword: {{ VSPHERE_PASSWORD }} + active: True + - username: admin + password: password + address: 127.0.0.1 + port: 6240 + domainName: {{ vm_name_prefix_tmpl }}child-control-0 + vsphereAddress: {{ VSPHERE_SERVER }} + vsphereUsername: {{ VSPHERE_USERNAME }} + vspherePassword: {{ VSPHERE_PASSWORD }} + active: True + - username: admin + password: password + address: 127.0.0.1 + port: 6241 + domainName: {{ vm_name_prefix_tmpl }}child-control-1 + vsphereAddress: {{ VSPHERE_SERVER }} + vsphereUsername: {{ VSPHERE_USERNAME }} + vspherePassword: {{ VSPHERE_PASSWORD }} + active: True + - username: admin + password: password + address: 127.0.0.1 + port: 6242 + domainName: {{ vm_name_prefix_tmpl }}child-control-2 + vsphereAddress: {{ VSPHERE_SERVER }} + vsphereUsername: {{ VSPHERE_USERNAME }} + vspherePassword: {{ VSPHERE_PASSWORD }} + active: True + - username: admin + password: password + address: 127.0.0.1 + port: 6250 + domainName: {{ vm_name_prefix_tmpl }}child-worker-0 + vsphereAddress: {{ VSPHERE_SERVER }} + vsphereUsername: {{ VSPHERE_USERNAME }} + vspherePassword: {{ VSPHERE_PASSWORD }} + active: True + - username: admin + password: password + address: 127.0.0.1 + port: 6251 + domainName: {{ vm_name_prefix_tmpl }}child-worker-1 + vsphereAddress: {{ VSPHERE_SERVER }} + vsphereUsername: {{ VSPHERE_USERNAME }} + vspherePassword: {{ VSPHERE_PASSWORD }} + active: True + - username: admin + password: password + address: 127.0.0.1 + port: 6252 + domainName: {{ vm_name_prefix_tmpl }}child-worker-2 + vsphereAddress: {{ VSPHERE_SERVER }} + vsphereUsername: {{ VSPHERE_USERNAME }} + vspherePassword: {{ VSPHERE_PASSWORD }} + active: True +--- +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + app.kubernetes.io/component: config + app.kubernetes.io/name: vbmc + app.kubernetes.io/part-of: kaas-bm + name: vbmc-vsphere-config + namespace: kaas +data: {} diff --git a/templates/2.28.1 b/templates/2.28.1 new file mode 120000 index 0000000..67d9e1b --- /dev/null +++ b/templates/2.28.1 @@ -0,0 +1 @@ +2.28.0 \ No newline at end of file diff --git a/templates/2.28.2 b/templates/2.28.2 new file mode 120000 index 0000000..67d9e1b --- /dev/null +++ b/templates/2.28.2 @@ -0,0 +1 @@ +2.28.0 \ No newline at end of file diff --git a/templates/2.28.3 b/templates/2.28.3 new file mode 120000 index 0000000..67d9e1b --- /dev/null +++ b/templates/2.28.3 @@ -0,0 +1 @@ +2.28.0 \ No newline at end of file diff --git a/templates/2.28.4 b/templates/2.28.4 new file mode 120000 index 0000000..67d9e1b --- /dev/null +++ b/templates/2.28.4 @@ -0,0 +1 @@ +2.28.0 \ No newline at end of file diff --git a/templates/2.28.5 b/templates/2.28.5 new file mode 120000 index 0000000..67d9e1b --- /dev/null +++ b/templates/2.28.5 @@ -0,0 +1 @@ +2.28.0 \ No newline at end of file diff --git a/templates/2.29.0 b/templates/2.29.0 new file mode 120000 index 0000000..67d9e1b --- /dev/null +++ b/templates/2.29.0 @@ -0,0 +1 @@ +2.28.0 \ No newline at end of file diff --git a/tox.ini b/tox.ini new file mode 100644 index 0000000..96c2498 --- /dev/null +++ b/tox.ini @@ -0,0 +1,33 @@ +# Tox (http://tox.testrun.org/) is a tool for running tests +# in multiple virtualenvs. This configuration file will run the +# test suite on all supported python versions. To use it, "pip install tox" +# and then run "tox" from this directory. + +[tox] +skipsdist = True +toxworkdir = {env:TOXWORKINGDIR:{toxinidir}/.tox} +envlist = pep8,shellcheck + +[testenv:pep8] +skip_install = True +deps = + flake8==5.0.4 +usedevelop = False +commands = + flake8 {posargs:./bin} + +[testenv:shellcheck] +allowlist_externals = bash +skip_install = True +deps = + shellcheck-py +usedevelop = False +commands = bash -c "find . -name '*.sh' \ + -print0 | xargs -0 shellcheck -s bash" + +[flake8] +exclude = .venv,.git,.tox,dist,doc,*egg,build,local,./lib +show-pep8 = True +show-source = True +count = True +max-line-length = 120 diff --git a/userdata.yaml.tmpl b/userdata.yaml.tmpl new file mode 100644 index 0000000..40fb17d --- /dev/null +++ b/userdata.yaml.tmpl @@ -0,0 +1,26 @@ +#cloud-config +fqdn: mcc-seed +users: +- gecos: {{ SEED_NODE_USER }} + groups: sudo, adm, docker + name: {{ SEED_NODE_USER }} + lock_passwd: false + shell: /bin/bash + ssh_authorized_keys: + - | + {{ MCC_SSH_PUBLIC_KEY }} + sudo: ALL=(ALL) NOPASSWD:ALL +chpasswd: + expire: false + users: + - name: {{ SEED_NODE_USER }} + password: {{ SEED_NODE_PWD }} + type: text +{% if NTP_SERVERS is defined and NTP_SERVERS != '' %} +ntp: + enabled: true + servers: + {%- for server in NTP_SERVERS.split(',') %} + - {{ server -}} + {% endfor %} +{% endif %}