Skip to content

Commit 8cd249d

Browse files
committed
Use k3d instead of kind
1 parent 49fd92d commit 8cd249d

File tree

17 files changed

+145
-256
lines changed

17 files changed

+145
-256
lines changed

.github/workflows/pytest.yml

Lines changed: 11 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -75,12 +75,11 @@ jobs:
7575
username: ${{ secrets.DOCKERHUB_USERNAME }}
7676
password: ${{ secrets.DOCKERHUB_TOKEN }}
7777

78-
- uses: engineerd/setup-kind@ecfad61750951586a9ef973db567df1d28671bdc # v0.6.2
78+
- name: Install k3d with asdf
79+
uses: asdf-vm/actions/install@1902764435ca0dd2f3388eea723a4f92a4eb8302 # v4
7980
with:
80-
version: "v0.29.0"
81-
name: "ess-helm"
82-
skipClusterCreation: "true"
83-
skipClusterLogsExport: "true"
81+
tool_versions: |
82+
k3d 5.8.3
8483
8584
- uses: azure/setup-helm@1a275c3b69536ee54be43f2070a358922e12c8d4 # v4.3.1
8685

@@ -120,17 +119,17 @@ jobs:
120119
if: ${{ failure() }}
121120
shell: bash
122121
run: |
123-
kind export logs --name ess-helm ./ess-helm-logs
124-
kind export kubeconfig --name ess-helm
125-
ns=$(kubectl --context kind-ess-helm get ns -l app.kubernetes.io/managed-by=pytest -o jsonpath='{.items[].metadata.name}')
122+
mkdir ess-helm-logs
123+
k3d kubeconfig merge ess-helm -ds
124+
ns=$(kubectl --context k3d-ess-helm get ns -l app.kubernetes.io/managed-by=pytest -o jsonpath='{.items[].metadata.name}')
126125
resources=("pods" "deployments" "statefulsets" "services" "configmaps" "ingresses" "persistentvolumes" "persistentvolumeclaims" "endpoints")
127126
for i in "${resources[@]}"; do
128-
kubectl --context kind-ess-helm get "$i" -n "$ns" > "./ess-helm-logs/$i.txt"
127+
kubectl --context k3d-ess-helm get "$i" -n "$ns" > "./ess-helm-logs/$i.txt"
129128
echo "----" >> "./ess-helm-logs/$i.txt"
130-
kubectl --context kind-ess-helm get "$i" -o yaml -n "$ns" >> "./ess-helm-logs/$i.txt"
129+
kubectl --context k3d-ess-helm get "$i" -o yaml -n "$ns" >> "./ess-helm-logs/$i.txt"
131130
done
132-
kubectl --context kind-ess-helm get events --sort-by=.metadata.creationTimestamp -n "$ns" > ./ess-helm-logs/events.txt
133-
kind delete cluster --name ess-helm
131+
kubectl --context k3d-ess-helm get events --sort-by=.metadata.creationTimestamp -n "$ns" > ./ess-helm-logs/events.txt
132+
k3d cluster delete ess-helm
134133
135134
- name: Upload logs
136135
if: ${{ failure() }}

charts/matrix-stack/ci/fragments/well-known-pytest-base-extras.yaml

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3,9 +3,6 @@
33
#
44
# SPDX-License-Identifier: AGPL-3.0-only
55

6-
ingress:
7-
controllerType: ingress-nginx
8-
96
wellKnownDelegation:
107
ingress:
118
tlsSecret: "{{ $.Release.Name }}-well-known-web-tls"

charts/matrix-stack/ci/pytest-matrix-rtc-synapse-wellknown-values.yaml

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -16,8 +16,6 @@ haproxy:
1616
podSecurityContext:
1717
runAsGroup: 0
1818
replicas: 2
19-
ingress:
20-
controllerType: ingress-nginx
2119
initSecrets:
2220
annotations:
2321
has-no-service-monitor: "true"

charts/matrix-stack/ci/pytest-well-known-values.yaml

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,8 +18,6 @@ global:
1818
haproxy:
1919
podSecurityContext:
2020
runAsGroup: 0
21-
ingress:
22-
controllerType: ingress-nginx
2321
matrixAuthenticationService:
2422
enabled: false
2523
matrixRTC:

charts/matrix-stack/ci/test-cluster-mixin.yaml

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -10,9 +10,6 @@
1010
certManager:
1111
clusterIssuer: ess-selfsigned
1212

13-
ingress:
14-
controllerType: ingress-nginx
15-
1613
matrixRTC:
1714
# Because the authoriser service won't trust certificates issued by the above self-signed CA
1815
extraEnv:
@@ -24,4 +21,4 @@ matrixRTC:
2421
- ess.localhost
2522
- mrtc.ess.localhost
2623
- synapse.ess.localhost
27-
ip: '{{ ( (lookup "v1" "Service" "ingress-nginx" "ingress-nginx-controller") | default (dict "spec" (dict "clusterIP" "127.0.0.1")) ).spec.clusterIP }}'
24+
ip: '{{ ( (lookup "v1" "Service" "kube-system" "traefik") | default (dict "spec" (dict "clusterIP" "127.0.0.1")) ).spec.clusterIP }}'

newsfragments/871.internal.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
CI: switch from kind to k3d for integration tests.

poetry.lock

Lines changed: 16 additions & 13 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

pyproject.toml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -41,6 +41,7 @@ types-pyyaml = "^6.0.12.20250915"
4141
semver = "^3.0.4"
4242
prometheus-client = "^0.23.1"
4343
yamllint = "^1.37.1"
44+
httpx-retries = "^0.4.5"
4445

4546
[build-system]
4647
requires = ["poetry-core>=2.1.0"]

scripts/destroy_test_cluster.sh

Lines changed: 4 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -7,17 +7,10 @@
77

88
set -e
99

10-
kind_cluster_name="ess-helm"
10+
k3d_cluster_name="ess-helm"
1111

12-
if kind get clusters 2> /dev/null| grep "$kind_cluster_name"; then
13-
kind delete cluster --name $kind_cluster_name
12+
if k3d cluster list 2> /dev/null | grep "$k3d_cluster_name"; then
13+
k3d cluster delete $k3d_cluster_name
1414
else
15-
echo "Kind cluster ${kind_cluster_name} already destoryed"
16-
fi
17-
18-
if docker ps -a | grep "${kind_cluster_name}-registry"; then
19-
docker stop "${kind_cluster_name}-registry" || true
20-
docker rm "${kind_cluster_name}-registry" || true
21-
else
22-
echo "Kind cluster's local registry already destroyed"
15+
echo "k3d cluster ${k3d_cluster_name} already destoryed"
2316
fi

scripts/setup_test_cluster.sh

Lines changed: 19 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -7,52 +7,34 @@
77

88
set -e
99

10-
kind_cluster_name="ess-helm"
11-
kind_context_name="kind-$kind_cluster_name"
10+
k3d_cluster_name="ess-helm"
11+
k3d_context_name="k3d-$k3d_cluster_name"
1212
# Space separated list of namespaces to use
1313
ess_namespaces=${ESS_NAMESPACES:-ess}
1414

1515
root_folder="$(git rev-parse --show-toplevel)"
1616
ca_folder="$root_folder/.ca"
1717
mkdir -p "$ca_folder"
1818

19-
if docker ps -a | grep "${kind_cluster_name}-registry"; then
20-
docker stop "${kind_cluster_name}-registry" || true
21-
docker rm "${kind_cluster_name}-registry" || true
22-
fi
23-
if kind get clusters 2>/dev/null | grep "$kind_cluster_name"; then
24-
echo "Cluster '$kind_cluster_name' is already provisioned by Kind"
19+
if k3d cluster list 2>/dev/null | grep "$k3d_cluster_name"; then
20+
echo "Cluster '$k3d_cluster_name' is already provisioned by k3d"
2521
else
26-
echo "Creating new Kind cluster '$kind_cluster_name'"
27-
(cd "$root_folder/tests/integration/fixtures/files/clusters"; kind create cluster --name "$kind_cluster_name" --config "kind.yml")
22+
echo "Creating new k3d cluster '$k3d_cluster_name'"
23+
k3d cluster create "$k3d_cluster_name" --config "tests/integration/fixtures/files/clusters/k3d.yml"
2824
fi
2925

30-
network=$(docker inspect $kind_cluster_name-control-plane | jq '.[0].NetworkSettings.Networks | keys | .[0]' -r)
31-
docker run \
32-
-d --restart=always -p "127.0.0.1:5000:5000" --network "$network" --network-alias "registry" --name "${kind_cluster_name}-registry" \
33-
registry:2
34-
35-
helm --kube-context $kind_context_name upgrade -i ingress-nginx --repo https://kubernetes.github.io/ingress-nginx ingress-nginx \
36-
--namespace ingress-nginx \
37-
--create-namespace \
38-
-f "$root_folder/tests/integration/fixtures/files/charts/ingress-nginx.yml"
39-
40-
helm --kube-context $kind_context_name upgrade -i metrics-server --repo https://kubernetes-sigs.github.io/metrics-server metrics-server \
41-
--namespace kube-system \
42-
-f "$root_folder/tests/integration/fixtures/files/charts/metrics-server.yml"
43-
44-
helm --kube-context $kind_context_name upgrade -i prometheus-operator-crds --repo https://prometheus-community.github.io/helm-charts prometheus-operator-crds \
26+
helm --kube-context $k3d_context_name upgrade -i prometheus-operator-crds --repo https://prometheus-community.github.io/helm-charts prometheus-operator-crds \
4527
--namespace prometheus-operator \
4628
--create-namespace
4729

48-
helm --kube-context $kind_context_name upgrade -i cert-manager --repo https://charts.jetstack.io cert-manager \
30+
helm --kube-context $k3d_context_name upgrade -i cert-manager --repo https://charts.jetstack.io cert-manager \
4931
--namespace cert-manager \
5032
--create-namespace \
5133
-f "$root_folder/tests/integration/fixtures/files/charts/cert-manager.yml"
5234

5335
# Create a new CA certificate
5436
if [[ ! -f "$ca_folder"/ca.crt || ! -f "$ca_folder"/ca.pem ]]; then
55-
cat <<EOF | kubectl --context $kind_context_name apply -f -
37+
cat <<EOF | kubectl --context $k3d_context_name apply -f -
5638
---
5739
apiVersion: cert-manager.io/v1
5840
kind: ClusterIssuer
@@ -80,19 +62,19 @@ spec:
8062
group: cert-manager.io
8163
---
8264
EOF
83-
kubectl --context $kind_context_name -n cert-manager wait --for condition=Ready Certificate/ess-ca
65+
kubectl --context $k3d_context_name -n cert-manager wait --for condition=Ready Certificate/ess-ca
8466
else
85-
kubectl --context $kind_context_name delete ClusterIssuer ess-ca 2>/dev/null || true
86-
kubectl --context $kind_context_name -n cert-manager delete Certificate ess-ca 2>/dev/null || true
87-
kubectl --context $kind_context_name -n cert-manager delete Secret ess-ca 2>/dev/null || true
88-
kubectl --context $kind_context_name -n cert-manager create secret generic ess-ca \
67+
kubectl --context $k3d_context_name delete ClusterIssuer ess-ca 2>/dev/null || true
68+
kubectl --context $k3d_context_name -n cert-manager delete Certificate ess-ca 2>/dev/null || true
69+
kubectl --context $k3d_context_name -n cert-manager delete Secret ess-ca 2>/dev/null || true
70+
kubectl --context $k3d_context_name -n cert-manager create secret generic ess-ca \
8971
--type=kubernetes.io/tls \
9072
--from-file=tls.crt="$ca_folder"/ca.crt \
9173
--from-file=tls.key="$ca_folder"/ca.pem \
9274
--from-file=ca.crt="$ca_folder"/ca.crt
9375
fi
9476

95-
cat <<EOF | kubectl --context $kind_context_name apply -f -
77+
cat <<EOF | kubectl --context $k3d_context_name apply -f -
9678
apiVersion: cert-manager.io/v1
9779
kind: ClusterIssuer
9880
metadata:
@@ -103,15 +85,15 @@ spec:
10385
EOF
10486

10587
if [[ ! -f "$ca_folder"/ca.crt || ! -f "$ca_folder"/ca.pem ]]; then
106-
kubectl --context $kind_context_name -n cert-manager get secret ess-ca -o jsonpath="{.data['ca\.crt']}" | base64 -d > "$ca_folder"/ca.crt
107-
kubectl --context $kind_context_name -n cert-manager get secret ess-ca -o jsonpath="{.data['tls\.key']}" | base64 -d > "$ca_folder"/ca.pem
88+
kubectl --context $k3d_context_name -n cert-manager get secret ess-ca -o jsonpath="{.data['ca\.crt']}" | base64 -d > "$ca_folder"/ca.crt
89+
kubectl --context $k3d_context_name -n cert-manager get secret ess-ca -o jsonpath="{.data['tls\.key']}" | base64 -d > "$ca_folder"/ca.pem
10890
fi
10991

11092
for namespace in $ess_namespaces; do
11193
echo "Constructing ESS dependencies in $namespace"
112-
server_version=$(kubectl --context $kind_context_name version | grep Server | sed 's/.*v/v/' | awk -F. '{print $1"."$2}')
94+
server_version=$(kubectl --context $k3d_context_name version | grep Server | sed 's/.*v/v/' | awk -F. '{print $1"."$2}')
11395
# We don't turn on enforce here as people may be experimenting but we do turn on warn so people see the warnings when helm install/upgrade
114-
cat <<EOF | kubectl --context $kind_context_name apply -f -
96+
cat <<EOF | kubectl --context $k3d_context_name apply -f -
11597
apiVersion: v1
11698
kind: Namespace
11799
metadata:

0 commit comments

Comments
 (0)