Skip to content

feat(storagebox): replace Bitnami Cassandra with K8ssandra operator #5

feat(storagebox): replace Bitnami Cassandra with K8ssandra operator

feat(storagebox): replace Bitnami Cassandra with K8ssandra operator #5

Workflow file for this run

name: Storagebox CI
on:
pull_request:
paths:
- 'applications/storagebox/charts/**'
- 'applications/storagebox/kots/**'
- 'applications/storagebox/tests/**'
- 'applications/storagebox/Makefile'
- '.github/workflows/storagebox-ci.yml'
push:
branches:
- main
paths:
- 'applications/storagebox/charts/**'
- 'applications/storagebox/kots/**'
- 'applications/storagebox/tests/**'
- 'applications/storagebox/Makefile'
- '.github/workflows/storagebox-ci.yml'
jobs:
lint-and-template:
runs-on: ubuntu-22.04
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Helm
uses: azure/setup-helm@v4.3.0
with:
version: v3.13.3
- name: Add Helm repositories
working-directory: applications/storagebox
run: make add-helm-repositories
- name: Update chart dependencies
working-directory: applications/storagebox
run: make update-dependencies
- name: Helm lint
working-directory: applications/storagebox
run: helm lint ./charts/storagebox
- name: Helm template (default values)
working-directory: applications/storagebox
run: helm template storagebox ./charts/storagebox --debug
- name: Helm template (all-components test values)
working-directory: applications/storagebox
run: |
helm template storagebox ./charts/storagebox \
-f tests/helm/all-components.yaml \
--debug
- name: Validate config contract
working-directory: applications/storagebox
run: make validate-config
helm-install-test:
runs-on: ubuntu-22.04
needs: [lint-and-template]
strategy:
fail-fast: false
matrix:
cluster:
- distribution: k3s
version: "1.32"
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Helm
uses: azure/setup-helm@v4.3.0
with:
version: v3.13.3
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: "3.12"
- name: Create Cluster
id: create-cluster
uses: replicatedhq/replicated-actions/create-cluster@v1.17.0
with:
api-token: ${{ secrets.REPLICATED_PLATFORM_EXAMPLES_TOKEN }}
kubernetes-distribution: ${{ matrix.cluster.distribution }}
kubernetes-version: ${{ matrix.cluster.version }}
cluster-name: storagebox-ci-${{ github.run_id }}-${{ matrix.cluster.distribution }}-${{ matrix.cluster.version }}
disk: 100
nodes: 1
instance-type: r1.medium
ttl: 2h
export-kubeconfig: true
- name: Save kubeconfig
run: |
mkdir -p /tmp/storagebox-ci
echo "$KUBECONFIG_DATA" > /tmp/storagebox-ci/kubeconfig
echo "KUBECONFIG=/tmp/storagebox-ci/kubeconfig" >> $GITHUB_ENV
env:
KUBECONFIG_DATA: ${{ steps.create-cluster.outputs.cluster-kubeconfig }}
- name: Add Helm repositories
working-directory: applications/storagebox
run: |
make add-helm-repositories
# Operator repos (from ec.yaml) not in Chart.yaml dependencies
helm repo add jetstack https://charts.jetstack.io || true
helm repo add cnpg https://cloudnative-pg.github.io/charts || true
helm repo add minio-operator https://operator.min.io || true
helm repo add k8ssandra https://helm.k8ssandra.io/stable || true
helm repo update
- name: Update chart dependencies
working-directory: applications/storagebox
run: make update-dependencies
# Install all operators in parallel (versions from kots/ec.yaml)
- name: Install operators
run: |
helm install cert-manager jetstack/cert-manager \
--namespace cert-manager --create-namespace \
--version v1.19.1 \
--set crds.enabled=true \
--set prometheus.enabled=false &
helm install cloudnative-pg cnpg/cloudnative-pg \
--namespace cnpg --create-namespace \
--version 0.27.0 &
helm install minio-operator minio-operator/operator \
--namespace minio --create-namespace \
--version 7.1.1 &
helm install k8ssandra-operator k8ssandra/k8ssandra-operator \
--namespace k8ssandra-operator --create-namespace \
--version 1.22.0 \
--set global.clusterScoped=true &
wait
- name: Wait for operator pods
run: |
kubectl wait --for=condition=Available deployment --all -n cert-manager --timeout=300s &
kubectl wait --for=condition=Available deployment --all -n cnpg --timeout=300s &
kubectl wait --for=condition=Available deployment --all -n minio --timeout=300s &
kubectl wait --for=condition=Available deployment --all -n k8ssandra-operator --timeout=300s &
wait
- name: Install Storagebox
working-directory: applications/storagebox
run: |
helm install storagebox ./charts/storagebox \
--namespace storagebox --create-namespace \
-f tests/helm/all-components.yaml \
--wait --timeout 10m
- name: Wait for component pods
run: |
NS=storagebox
echo "Waiting for PostgreSQL pods..."
kubectl wait --for=condition=Ready pods -l cnpg.io/cluster=postgres -n $NS --timeout=180s || true
echo "Waiting for MinIO pods..."
kubectl wait --for=condition=Ready pods -l v1.min.io/tenant=minio -n $NS --timeout=180s || true
echo "Waiting for NFS pods..."
kubectl wait --for=condition=Ready pods -l app.kubernetes.io/name=nfs-server -n $NS --timeout=120s || true
echo "Waiting for rqlite pods..."
kubectl wait --for=condition=Ready pods -l app.kubernetes.io/name=rqlite -n $NS --timeout=120s || true
echo "Waiting for Cassandra pods..."
kubectl wait --for=condition=Ready pods -l app.kubernetes.io/managed-by=cass-operator -n $NS --timeout=300s || true
echo "All pods:"
kubectl get pods -n $NS
- name: Run smoke tests
working-directory: applications/storagebox
run: |
pip install -r tests/requirements.txt
python tests/smoke_test.py storagebox --timeout 120
- name: Debug output on failure
if: failure()
run: |
NS=storagebox
echo "=== Pods ==="
kubectl get pods -n $NS -o wide
echo ""
echo "=== Services ==="
kubectl get svc -n $NS
echo ""
echo "=== Events (last 50) ==="
kubectl get events -n $NS --sort-by='.lastTimestamp' | tail -50
echo ""
echo "=== Operator pods ==="
kubectl get pods -n cert-manager
kubectl get pods -n cnpg
kubectl get pods -n minio
kubectl get pods -n k8ssandra-operator
echo ""
echo "=== PostgreSQL Cluster status ==="
kubectl get clusters.postgresql.cnpg.io -n $NS -o yaml || true
echo ""
echo "=== K8ssandraCluster status ==="
kubectl get k8ssandraclusters -n $NS -o yaml || true
echo ""
echo "=== MinIO Tenant status ==="
kubectl get tenants.minio.min.io -n $NS -o yaml || true
echo ""
echo "=== Pod logs (last 30 lines each) ==="
for pod in $(kubectl get pods -n $NS -o name 2>/dev/null); do
echo "--- $pod ---"
kubectl logs $pod -n $NS --tail=30 2>/dev/null || true
done
- name: Remove Cluster
uses: replicatedhq/replicated-actions/remove-cluster@v1.17.0
if: ${{ always() && steps.create-cluster.outputs.cluster-id != '' }}
with:
api-token: ${{ secrets.REPLICATED_PLATFORM_EXAMPLES_TOKEN }}
cluster-id: ${{ steps.create-cluster.outputs.cluster-id }}