77
88set -e
99
10- kind_cluster_name =" ess-helm"
11- kind_context_name= " kind- $kind_cluster_name "
10+ k3d_cluster_name =" ess-helm"
11+ k3d_context_name= " k3d- $k3d_cluster_name "
1212# Space separated list of namespaces to use
1313ess_namespaces=${ESS_NAMESPACES:- ess}
1414
1515root_folder=" $( git rev-parse --show-toplevel) "
1616ca_folder=" $root_folder /.ca"
1717mkdir -p " $ca_folder "
1818
19- if docker ps -a | grep " ${kind_cluster_name} -registry" ; then
20- docker stop " ${kind_cluster_name} -registry" || true
21- docker rm " ${kind_cluster_name} -registry" || true
22- fi
23- if kind get clusters 2> /dev/null | grep " $kind_cluster_name " ; then
24- echo " Cluster '$kind_cluster_name ' is already provisioned by Kind"
19+ if k3d cluster list 2> /dev/null | grep " $k3d_cluster_name " ; then
20+ echo " Cluster '$k3d_cluster_name ' is already provisioned by k3d"
2521else
26- echo " Creating new Kind cluster '$kind_cluster_name '"
27- (cd " $root_folder /tests/integration/fixtures/files/clusters " ; kind create cluster --name " $kind_cluster_name " --config " kind .yml" )
22+ echo " Creating new k3d cluster '$k3d_cluster_name '"
23+ k3d cluster create " $k3d_cluster_name " --config " tests/integration/fixtures/files/clusters/k3d .yml"
2824fi
2925
30- network=$( docker inspect $kind_cluster_name -control-plane | jq ' .[0].NetworkSettings.Networks | keys | .[0]' -r)
31- docker run \
32- -d --restart=always -p " 127.0.0.1:5000:5000" --network " $network " --network-alias " registry" --name " ${kind_cluster_name} -registry" \
33- registry:2
34-
35- helm --kube-context $kind_context_name upgrade -i ingress-nginx --repo https://kubernetes.github.io/ingress-nginx ingress-nginx \
36- --namespace ingress-nginx \
37- --create-namespace \
38- -f " $root_folder /tests/integration/fixtures/files/charts/ingress-nginx.yml"
39-
40- helm --kube-context $kind_context_name upgrade -i metrics-server --repo https://kubernetes-sigs.github.io/metrics-server metrics-server \
41- --namespace kube-system \
42- -f " $root_folder /tests/integration/fixtures/files/charts/metrics-server.yml"
43-
44- helm --kube-context $kind_context_name upgrade -i prometheus-operator-crds --repo https://prometheus-community.github.io/helm-charts prometheus-operator-crds \
26+ helm --kube-context $k3d_context_name upgrade -i prometheus-operator-crds --repo https://prometheus-community.github.io/helm-charts prometheus-operator-crds \
4527 --namespace prometheus-operator \
4628 --create-namespace
4729
48- helm --kube-context $kind_context_name upgrade -i cert-manager --repo https://charts.jetstack.io cert-manager \
30+ helm --kube-context $k3d_context_name upgrade -i cert-manager --repo https://charts.jetstack.io cert-manager \
4931 --namespace cert-manager \
5032 --create-namespace \
5133 -f " $root_folder /tests/integration/fixtures/files/charts/cert-manager.yml"
5234
5335# Create a new CA certificate
5436if [[ ! -f " $ca_folder " /ca.crt || ! -f " $ca_folder " /ca.pem ]]; then
55- cat << EOF | kubectl --context $kind_context_name apply -f -
37+ cat << EOF | kubectl --context $k3d_context_name apply -f -
5638---
5739apiVersion: cert-manager.io/v1
5840kind: ClusterIssuer
@@ -80,19 +62,19 @@ spec:
8062 group: cert-manager.io
8163---
8264EOF
83- kubectl --context $kind_context_name -n cert-manager wait --for condition=Ready Certificate/ess-ca
65+ kubectl --context $k3d_context_name -n cert-manager wait --for condition=Ready Certificate/ess-ca
8466else
85- kubectl --context $kind_context_name delete ClusterIssuer ess-ca 2> /dev/null || true
86- kubectl --context $kind_context_name -n cert-manager delete Certificate ess-ca 2> /dev/null || true
87- kubectl --context $kind_context_name -n cert-manager delete Secret ess-ca 2> /dev/null || true
88- kubectl --context $kind_context_name -n cert-manager create secret generic ess-ca \
67+ kubectl --context $k3d_context_name delete ClusterIssuer ess-ca 2> /dev/null || true
68+ kubectl --context $k3d_context_name -n cert-manager delete Certificate ess-ca 2> /dev/null || true
69+ kubectl --context $k3d_context_name -n cert-manager delete Secret ess-ca 2> /dev/null || true
70+ kubectl --context $k3d_context_name -n cert-manager create secret generic ess-ca \
8971 --type=kubernetes.io/tls \
9072 --from-file=tls.crt=" $ca_folder " /ca.crt \
9173 --from-file=tls.key=" $ca_folder " /ca.pem \
9274 --from-file=ca.crt=" $ca_folder " /ca.crt
9375fi
9476
95- cat << EOF | kubectl --context $kind_context_name apply -f -
77+ cat << EOF | kubectl --context $k3d_context_name apply -f -
9678apiVersion: cert-manager.io/v1
9779kind: ClusterIssuer
9880metadata:
@@ -103,15 +85,15 @@ spec:
10385EOF
10486
10587if [[ ! -f " $ca_folder " /ca.crt || ! -f " $ca_folder " /ca.pem ]]; then
106- kubectl --context $kind_context_name -n cert-manager get secret ess-ca -o jsonpath=" {.data['ca\.crt']}" | base64 -d > " $ca_folder " /ca.crt
107- kubectl --context $kind_context_name -n cert-manager get secret ess-ca -o jsonpath=" {.data['tls\.key']}" | base64 -d > " $ca_folder " /ca.pem
88+ kubectl --context $k3d_context_name -n cert-manager get secret ess-ca -o jsonpath=" {.data['ca\.crt']}" | base64 -d > " $ca_folder " /ca.crt
89+ kubectl --context $k3d_context_name -n cert-manager get secret ess-ca -o jsonpath=" {.data['tls\.key']}" | base64 -d > " $ca_folder " /ca.pem
10890fi
10991
11092for namespace in $ess_namespaces ; do
11193 echo " Constructing ESS dependencies in $namespace "
112- server_version=$( kubectl --context $kind_context_name version | grep Server | sed ' s/.*v/v/' | awk -F. ' {print $1"."$2}' )
94+ server_version=$( kubectl --context $k3d_context_name version | grep Server | sed ' s/.*v/v/' | awk -F. ' {print $1"."$2}' )
11395 # We don't turn on enforce here as people may be experimenting but we do turn on warn so people see the warnings when helm install/upgrade
114- cat << EOF | kubectl --context $kind_context_name apply -f -
96+ cat << EOF | kubectl --context $k3d_context_name apply -f -
11597apiVersion: v1
11698kind: Namespace
11799metadata:
0 commit comments