From 6f0d5a5546e2d20e81b4c395866987c55b8cceab Mon Sep 17 00:00:00 2001 From: Maros Orsak Date: Sun, 15 Sep 2024 20:50:46 +0200 Subject: [PATCH] [system test] [doc] connect package (#10567) Signed-off-by: see-quick Signed-off-by: Maros Orsak Co-authored-by: henryZrncik --- .azure/scripts/uncommitted-changes.sh | 4 +- ...mzi.systemtest.connect.ConnectBuilderST.md | 132 ++++++ ...io.strimzi.systemtest.connect.ConnectST.md | 411 ++++++++++++++++++ .../systemtests/labels/connect.md | 36 ++ .../systemtest/connect/ConnectBuilderST.java | 98 +++++ .../strimzi/systemtest/connect/ConnectST.java | 321 +++++++++++++- 6 files changed, 981 insertions(+), 21 deletions(-) create mode 100644 development-docs/systemtests/io.strimzi.systemtest.connect.ConnectBuilderST.md create mode 100644 development-docs/systemtests/io.strimzi.systemtest.connect.ConnectST.md create mode 100644 development-docs/systemtests/labels/connect.md diff --git a/.azure/scripts/uncommitted-changes.sh b/.azure/scripts/uncommitted-changes.sh index 37a072a3381..e072e89a33f 100755 --- a/.azure/scripts/uncommitted-changes.sh +++ b/.azure/scripts/uncommitted-changes.sh @@ -4,8 +4,8 @@ set -e echo "Build reason: ${BUILD_REASON}" echo "Source branch: ${BRANCH}" -CHANGED_DERIVED=$(git diff --name-status -- packaging/install/ packaging/helm-charts/ documentation/modules/appendix_crds.adoc cluster-operator/src/main/resources/cluster-roles development-docs/systemtests/) -GENERATED_FILES=$(git ls-files --other --exclude-standard -- packaging/install/ packaging/helm-charts/ cluster-operator/src/main/resources/cluster-roles api/src/test/resources/io/strimzi/api/kafka/model development-docs/systemtests/) +CHANGED_DERIVED=$(git diff --name-status -- packaging/install/ packaging/helm-charts/ documentation/modules/appendix_crds.adoc cluster-operator/src/main/resources/cluster-roles development-docs/systemtests/ ':!development-docs/systemtests/labels') +GENERATED_FILES=$(git ls-files --other --exclude-standard -- packaging/install/ packaging/helm-charts/ cluster-operator/src/main/resources/cluster-roles api/src/test/resources/io/strimzi/api/kafka/model development-docs/systemtests/ ':!development-docs/systemtests/labels') if [ -n "$CHANGED_DERIVED" ] || [ -n "$GENERATED_FILES" ] ; then if [ -n "$CHANGED_DERIVED" ] ; then echo "ERROR: Uncommitted changes in derived resources:" diff --git a/development-docs/systemtests/io.strimzi.systemtest.connect.ConnectBuilderST.md b/development-docs/systemtests/io.strimzi.systemtest.connect.ConnectBuilderST.md new file mode 100644 index 00000000000..0e55406207b --- /dev/null +++ b/development-docs/systemtests/io.strimzi.systemtest.connect.ConnectBuilderST.md @@ -0,0 +1,132 @@ +# ConnectBuilderST + +**Description:** Testing Kafka Connect build and plugin management. + +**Labels:** + +* [connect](labels/connect.md) + +
+ +## testBuildFailsWithWrongChecksumOfArtifact + +**Description:** Test that ensures Kafka Connect build fails with wrong artifact checksum and recovers with correct checksum. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Initialize TestStorage and get test image name | TestStorage instance is created and the image name for the test case is retrieved | +| 2. | Create a Plugin with wrong checksum and build Kafka Connect resource with it | Kafka Connect resource is created but the build fails due to wrong checksum | +| 3. | Deploy Scraper pod with specific configurations | Kafka Scraper pod are successfully deployed | +| 4. | Wait for Kafka Connect status to indicate build failure | Kafka Connect status contains message about build failure | +| 5. | Deploy network policies for Kafka Connect | Network policies are successfully deployed for Kafka Connect | +| 6. | Replace the plugin checksum with the correct one and update Kafka Connect resource | Kafka Connect resource is updated with the correct checksum | +| 7. | Wait for Kafka Connect to be ready | Kafka Connect becomes ready | +| 8. | Verify that EchoSink KafkaConnector is available in Kafka Connect API | EchoSink KafkaConnector is returned by Kafka Connect API | +| 9. | Verify that EchoSink KafkaConnector is listed in Kafka Connect resource status | EchoSink KafkaConnector is listed in the status of Kafka Connect resource | + +**Labels:** + +* [connect](labels/connect.md) + + +## testBuildOtherPluginTypeWithAndWithoutFileName + +**Description:** Test verifying Kafka Connect plugin behavior with and without file names for different plugin types. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Initialize test storage and topic | Namespace and topic are created successfully | +| 2. | Create and set up Kafka Connect with specified plugin and build configurations | Kafka Connect is deployed and configured correctly | +| 3. | Take a snapshot of current Kafka Connect pods and verify plugin file name | Plugin file name matches the expected file name | +| 4. | Modify Kafka Connect to use a plugin without a file name and trigger a rolling update | Kafka Connect plugin is updated without the file name successfully | +| 5. | Verify plugin file name after update using the plugin's hash | Plugin file name is different from the previous name and matches the hash | + +**Labels:** + +* [connect](labels/connect.md) + + +## testBuildPluginUsingMavenCoordinatesArtifacts + +**Description:** Test building a plugin using Maven coordinates artifacts. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Create a test storage object | Test storage object is created | +| 2. | Generate image name for the test case | Image name is generated successfully | +| 3. | Create Kafka topic and Kafka Connect resources with the configuration for plugin using mvn coordinates | Resources are created and available | +| 4. | Configure Kafka Connector and deploy it | Connector is deployed with correct configuration | +| 5. | Create Kafka consumer and start consuming messages | Consumer starts consuming messages successfully | +| 6. | Verify that consumer receives messages | Consumer receives the expected messages | + +**Labels:** + +* [connect](labels/connect.md) + + +## testBuildWithJarTgzAndZip + +**Description:** Test for building Kafka Connect image with combined jar, tar.gz, and zip plugins, and validating message send-receive functionality. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Create TestStorage object | TestStorage instance is created with context | +| 2. | Get image name for test case | Image name is successfully retrieved | +| 3. | Create Kafka Topic resources | Kafka Topic resources are created with wait | +| 4. | Create Kafka Connect resources | Kafka Connect resources are created with wait | +| 5. | Configure Kafka Connector | Kafka Connector is configured and created with wait | +| 6. | Verify Kafka Connector class name | Connector class name matches expected ECHO_SINK_CLASS_NAME | +| 7. | Create Kafka Clients and send messages | Kafka Clients created and messages sent and verified | +| 8. | Check logs for received message | Logs contain the expected received message | + +**Labels:** + +* [connect](labels/connect.md) + + +## testPushIntoImageStream + +**Description:** Test verifying the successful push of a KafkaConnect build into an OpenShift ImageStream. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Initialize test storage | Test storage is initialized with the test context | +| 2. | Create ImageStream | ImageStream is created in the specified namespace | +| 3. | Deploy KafkaConnect with the image stream output | KafkaConnect is deployed with the expected build configuration | +| 4. | Verify KafkaConnect build artifacts and status | KafkaConnect has two plugins, uses the image stream output and is in the 'Ready' state | + +**Labels:** + +* [connect](labels/connect.md) + + +## testUpdateConnectWithAnotherPlugin + +**Description:** Test updating and validating Kafka Connect with another plugin. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Create TestStorage instance | Instance of TestStorage is created | +| 2. | Generate random topic name and create Kafka topic | Kafka topic is successfully created | +| 3. | Deploy network policies for KafkaConnect | Network policies are successfully deployed | +| 4. | Create EchoSink KafkaConnector | EchoSink KafkaConnector is successfully created and validated | +| 5. | Add a second plugin to Kafka Connect and perform rolling update | Second plugin is added and rolling update is performed | +| 6. | Create Camel-HTTP-Sink KafkaConnector | Camel-HTTP-Sink KafkaConnector is successfully created and validated | +| 7. | Verify that both connectors and plugins are present in Kafka Connect | Both connectors and plugins are verified successfully | + +**Labels:** + +* [connect](labels/connect.md) + diff --git a/development-docs/systemtests/io.strimzi.systemtest.connect.ConnectST.md b/development-docs/systemtests/io.strimzi.systemtest.connect.ConnectST.md new file mode 100644 index 00000000000..c4794cbf929 --- /dev/null +++ b/development-docs/systemtests/io.strimzi.systemtest.connect.ConnectST.md @@ -0,0 +1,411 @@ +# ConnectST + +**Description:** Verifies the deployment, manual rolling update, and undeployment of Kafka Connect components. + +**Before tests execution steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Deploy scraper Pod for accessing all other Pods | Scraper Pod is deployed | + +**Labels:** + +* [connect](labels/connect.md) + +
+ +## testConnectScramShaAuthWithWeirdUserName + +**Description:** Test verifying that Kafka Connect can authenticate with SCRAM-SHA-512 using a username with special characters and length exceeding typical constraints. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Create resource with Node Pools | Node Pools created successfully | +| 2. | Create NodePools using resourceManager based on the configuration | NodePools for broker and controller are created or not based on configuration | +| 3. | Deploy Kafka cluster with SCRAM-SHA-512 authentication | Kafka cluster deployed with specified authentications | +| 4. | Create Kafka Topic | Topic created successfully | +| 5. | Create Kafka SCRAM-SHA-512 user with a weird username | User created successfully with SCRAM-SHA-512 credentials | +| 6. | Deploy Kafka Connect with SCRAM-SHA-512 authentication | Kafka Connect instance deployed and configured with user credentials | +| 7. | Deploy Kafka Connector | Kafka Connector deployed and configured successfully | +| 8. | Send messages using the configured client | Messages sent successfully | +| 9. | Verify that connector receives messages | Messages consumed by the connector and written to the specified sink | + +**Labels:** + +* [connect](labels/connect.md) + + +## testConnectTlsAuthWithWeirdUserName + +**Description:** Test verifying Kafka connect TLS authentication with a username containing unusual characters. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Set up a name of username containing dots and 64 characters | | +| 2. | Create NodePools using resourceManager based on the configuration | NodePools for broker and controller are created or not based on configuration | +| 3. | Create Kafka broker, controller, topic, and Kafka user with the specified username | Resources are created with the expected configurations | +| 4. | Setup Kafka Connect with the created Kafka instance and TLS authentication | Kafka Connect is set up with the expected configurations | +| 5. | Check if the user can produce messages to Kafka | Messages are produced successfully | +| 6. | Verify that Kafka Connect can consume messages | Messages are consumed successfully by Kafka Connect | + +**Labels:** + +* [connect](labels/connect.md) + + +## testConnectorTaskAutoRestart + +**Description:** Test the automatic restart functionality of Kafka Connect tasks when they fail. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Create test storage instance | Test storage instance is created | +| 2. | Create node pool resources | Node pool resources are created and waited for readiness | +| 3. | Create Kafka cluster | Kafka cluster is created and waited for readiness | +| 4. | Deploy EchoSink Kafka Connector with autor restart enabled | Kafka Connector is created with auto-restart enabled | +| 5. | Send first batch of messages | First batch of messages is sent to the topic | +| 6. | Ensure connection success for the first batch | Successfully produce the first batch of messages | +| 7. | Send second batch of messages | Second batch of messages is sent to the topic | +| 8. | Ensure connection success for the second batch | Successfully produce the second batch of messages | +| 9. | Verify task failure and auto-restart | Connector task fails and is automatically restarted | +| 10. | Wait for task to reach running state | Connector task returns to running state after recovery | +| 11. | Verify auto-restart count reset | Auto-restart count is reset to zero after task stability | + +**Labels:** + +* [connect](labels/connect.md) + + +## testCustomAndUpdatedValues + +**Description:** Test that verifies custom and updated environment variables and readiness/liveness probes for Kafka Connect. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Create and configure Kafka Connect with initial values | Kafka Connect is created and configured with initial environment variables and readiness/liveness probes | +| 2. | Create NodePools using resourceManager based on the configuration | NodePools for broker and controller are created or not based on configuration | +| 3. | Verify initial configuration and environment variables | Initial configuration and environment variables are as expected | +| 4. | Update Kafka Connect configuration and environment variables | Kafka Connect configuration and environment variables are updated | +| 5. | Verify updated configuration and environment variables | Updated configuration and environment variables are as expected | + +**Labels:** + +* [connect](labels/connect.md) + + +## testDeployRollUndeploy + +**Description:** Verifies the deployment, manual rolling update, and undeployment of Kafka Connect components. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Initialize Test Storage | Test storage instance is created with required context | +| 2. | Define expected configurations | Configurations are loaded from properties file | +| 3. | Create and wait for resources | Kafka resources, including NodePools and KafkaConnect instances, are created and become ready | +| 4. | Annotate for manual rolling update | KafkaConnect components are annotated for a manual rolling update | +| 5. | Perform and wait for rolling update | KafkaConnect components roll and new pods are deployed | +| 6. | Kafka Connect pod | Pod configurations and annotations are verified | +| 7. | Kafka Connectors | Various Kafka Connect resource labels and configurations are verified to ensure correct deployment | + +**Labels:** + +* [connect](labels/connect.md) + + +## testJvmAndResources + +**Description:** Test ensuring the JVM options and resource requests/limits are correctly applied to Kafka Connect components. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Create TestStorage instance | TestStorage instance is created | +| 2. | Create NodePools using resourceManager based on the configuration | NodePools for broker and controller are created or not based on configuration | +| 3. | Create broker and controller node pools | Node pools are created and ready | +| 4. | Create Kafka cluster | Kafka cluster is created and operational | +| 5. | Setup JVM options and resource requirements for Kafka Connect | Kafka Connect is configured with specified JVM options and resources | +| 6. | Verify JVM options and resource requirements | JVM options and resource requests/limits are correctly applied to the Kafka Connect pod | + +**Labels:** + +* [connect](labels/connect.md) + + +## testKafkaConnectAndConnectorFileSinkPlugin + +**Description:** Test the functionality of Kafka Connect with a File Sink Plugin in a parallel namespace setup. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Create and configure test storage | Test storage is set up with necessary configurations. | +| 2. | Create NodePools using resourceManager based on the configuration | NodePools for broker and controller are created or not based on configuration | +| 3. | Create and wait for the broker and controller pools | Broker and controller pools are created and running. | +| 4. | Deploy and configure Kafka Connect with File Sink Plugin | Kafka Connect with File Sink Plugin is deployed and configured. | +| 5. | Deploy Network Policies for Kafka Connect | Network Policies are successfully deployed for Kafka Connect. | +| 6. | Create and wait for Kafka Connector | Kafka Connector is created and running. | +| 7. | Deploy and configure scraper pod | Scraper pod is deployed and configured. | +| 8. | Deploy and configure Kafka clients | Kafka clients are deployed and configured. | +| 9. | Execute assertions to verify the Kafka Connector configuration and status | Assertions confirm the Kafka Connector is successfully deployed, has the correct configuration, and is running. | + +**Labels:** + +* [connect](labels/connect.md) + + +## testKafkaConnectAndConnectorStateWithFileSinkPlugin + +**Description:** This test case verifies pausing, stopping and running of connector via 'spec.pause' or 'spec.state' specification. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Deploy prerequisites for running FileSink KafkaConnector, that is KafkaTopic, Kafka cluster, KafkaConnect, and FileSink KafkaConnector. | All resources are deployed and ready. | +| 2. | Pause and run connector by modifying 'spec.pause' property of Connector, while also producing messages when connector pauses. | Connector is paused and resumed as expected, after connector is resumed, produced messages are present in destination file, indicating connector resumed correctly. | +| 3. | Stop and run connector by modifying 'spec.state' property of Connector (with priority over now set 'spec.pause=false'), while also producing messages when connector stops. | Connector stops and resumes as expected, after resuming, produced messages are present in destination file, indicating connector resumed correctly. | +| 4. | Pause and run connector by modifying 'spec.state' property of Connector (with priority over now set 'spec.pause=false'), while also producing messages when connector pauses. | Connector pauses and resumes as expected, after resuming, produced messages are present in destination file, indicating connector resumed correctly. | + +**Labels:** + +* [connect](labels/connect.md) + + +## testKafkaConnectScaleUpScaleDown + +**Description:** Test verifying the scaling up and down functionality of Kafka Connect in a Kubernetes environment. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Create TestStorage object instance | Instance of TestStorage is created | +| 2. | Create NodePools using resourceManager based on the configuration | NodePools for broker and controller are created or not based on configuration | +| 3. | Create resources for KafkaNodePools and KafkaCluster | Resources are created and ready | +| 4. | Deploy Kafka Connect with file plugin | Kafka Connect is deployed with 1 initial replica | +| 5. | Verify the initial replica count | Initial replica count is verified to be 1 | +| 6. | Scale Kafka Connect up to a higher number of replicas | Kafka Connect is scaled up successfully | +| 7. | Verify the new replica count after scaling up | New replica count is verified to be the scaled up count | +| 8. | Scale Kafka Connect down to the initial number of replicas | Kafka Connect is scaled down successfully | +| 9. | Verify the replica count after scaling down | Replica count is verified to be the initial count | + +**Labels:** + +* [connect](labels/connect.md) + + +## testKafkaConnectWithPlainAndScramShaAuthentication + +**Description:** Test verifying Kafka Connect functionalities with Plain and SCRAM-SHA authentication. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Create object instance of TestStorage | Instance of TestStorage is created | +| 2. | Create NodePools using resourceManager based on the configuration | NodePools for broker and controller are created or not based on configuration | +| 3. | Deploy Kafka with SCRAM-SHA-512 listener | Kafka is deployed with the specified listener authentication | +| 4. | Create KafkaUser with SCRAM-SHA authentication | KafkaUser is created using SCRAM-SHA authentication with the given credentials | +| 5. | Create KafkaTopic | KafkaTopic is created | +| 6. | Deploy KafkaConnect with SCRAM-SHA-512 authentication | KafkaConnect instance is deployed and connected to Kafka | +| 7. | Deploy required resources for NetworkPolicy, KafkaConnect, and ScraperPod | Resources are successfully deployed with NetworkPolicy applied | +| 8. | Create FileStreamSink connector | FileStreamSink connector is created successfully | +| 9. | Create Kafka client with SCRAM-SHA-PLAIN authentication and send messages | Messages are produced and consumed successfully using Kafka client with SCRAM-SHA-PLAIN authentication | +| 10. | Verify messages in KafkaConnect file sink | FileSink contains the expected number of messages | + +**Labels:** + +* [connect](labels/connect.md) + + +## testKafkaConnectWithScramShaAuthenticationRolledAfterPasswordChanged + +**Description:** Verifies Kafka Connect functionality when SCRAM-SHA authentication password is changed and the component is rolled. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Create NodePools using resourceManager based on the configuration | NodePools for broker and controller are created or not based on configuration | +| 2. | Create Kafka cluster with SCRAM-SHA authentication | Kafka cluster is created with SCRAM-SHA authentication enabled | +| 3. | Create a Kafka user with SCRAM-SHA authentication | Kafka user with SCRAM-SHA authentication is created | +| 4. | Deploy Kafka Connect with the created user credentials | Kafka Connect is deployed successfully | +| 5. | Update the SCRAM-SHA user password and reconfigure Kafka Connect | Kafka Connect is reconfigured with the new password | +| 6. | Verify Kafka Connect continues to function after rolling update | Kafka Connect remains functional and REST API is available | + +**Labels:** + +* [connect](labels/connect.md) + + +## testMountingSecretAndConfigMapAsVolumesAndEnvVars + +**Description:** This test verifies that Secrets and ConfigMaps can be mounted as volumes and environment variables in Kafka Connect. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Create Secrets and ConfigMaps | Secrets and ConfigMaps are created successfully. | +| 2. | Create Kafka environment | Kafka broker, Kafka Connect, and other resources are deployed successfully. | +| 3. | Create NodePools using resourceManager based on the configuration | NodePools for broker and controller are created or not based on configuration | +| 4. | Bind Secrets and ConfigMaps to Kafka Connect | Secrets and ConfigMaps are bound to Kafka Connect as volumes and environment variables. | +| 5. | Verify environment variables | Kafka Connect environment variables contain expected values from Secrets and ConfigMaps. | +| 6. | Verify mounted volumes | Kafka Connect mounted volumes contain expected values from Secrets and ConfigMaps. | + +**Labels:** + +* [connect](labels/connect.md) + + +## testMultiNodeKafkaConnectWithConnectorCreation + +**Description:** Test validating multi-node Kafka Connect cluster creation, connector deployment, and message processing. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Initialize test storage and determine connect cluster name | Test storage and cluster name properly initialized | +| 2. | Create broker and controller node pools | Broker and controller node pools created successfully | +| 3. | Deploy Kafka cluster in ephemeral mode | Kafka cluster deployed successfully | +| 4. | Create Kafka Connect cluster with default image | Kafka Connect cluster created with appropriate configuration | +| 5. | Create and configure Kafka Connector | Kafka Connector deployed and configured with correct settings | +| 6. | Verify the status of the Kafka Connector | Kafka Connector status retrieved and worker node identified | +| 7. | Deploy Kafka clients for producer and consumer | Kafka producer and consumer clients deployed | +| 8. | Verify that Kafka Connect writes messages to the specified file sink | Messages successfully written to the file sink by Kafka Connect | + +**Labels:** + +* [connect](labels/connect.md) + + +## testScaleConnectAndConnectorSubresource + +**Description:** This test verifies the scaling functionality of Kafka Connect and Kafka Connector subresources. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Initialize the test storage and create broker and controller pools | Broker and controller pools are created successfully | +| 2. | Create NodePools using resourceManager based on the configuration | NodePools for broker and controller are created or not based on configuration | +| 3. | Deploy Kafka, Kafka Connect and Kafka Connector resources | Kafka, Kafka Connect and Kafka Connector resources are deployed successfully | +| 4. | Scale Kafka Connect subresource | Kafka Connect subresource is scaled successfully | +| 5. | Verify Kafka Connect subresource scaling | Kafka Connect replicas and observed generation are as expected | +| 6. | Scale Kafka Connector subresource | Kafka Connector subresource task max is set correctly | +| 7. | Verify Kafka Connector subresource scaling | Kafka Connector task max in spec, status and Connect Pods API are as expected | + +**Labels:** + +* [connect](labels/connect.md) + + +## testScaleConnectWithConnectorToZero + +**Description:** Test scaling Kafka Connect with a connector to zero replicas. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Create TestStorage instance | TestStorage instance is created with context | +| 2. | Create broker and controller node pools | Broker and Controller node pools are created | +| 3. | Create ephemeral Kafka cluster | Kafka cluster with 3 replicas is created | +| 4. | Create Kafka Connect with file plugin | Kafka Connect is created with 2 replicas and file plugin | +| 5. | Create Kafka Connector | Kafka Connector is created with necessary configurations | +| 6. | Check Kafka Connect pods | There are 2 Kafka Connect pods | +| 7. | Scale down Kafka Connect to zero | Kafka Connect is scaled down to 0 replicas | +| 8. | Wait for Kafka Connect to be ready | Kafka Connect readiness is verified | +| 9. | Wait for Kafka Connector to not be ready | Kafka Connector readiness is verified | +| 10. | Verify conditions | Pod size is 0, Kafka Connect is ready, Kafka Connector is not ready due to zero replicas | + +**Labels:** + +* [connect](labels/connect.md) + + +## testScaleConnectWithoutConnectorToZero + +**Description:** Test to validate scaling KafkaConnect without a connector to zero replicas. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Initialize TestStorage and create namespace | Namespace and storage initialized | +| 2. | Create broker and controller node pools | Node pools created with 3 replicas. | +| 3. | Create ephemeral Kafka cluster | Kafka cluster created with 3 replicas. | +| 4. | Create KafkaConnect resource with 2 replicas | KafkaConnect resource created with 2 replicas. | +| 5. | Verify that KafkaConnect has 2 pods | 2 KafkaConnect pods are running. | +| 6. | Scale down KafkaConnect to zero replicas | KafkaConnect scaled to zero replicas. | +| 7. | Wait for KafkaConnect to be ready | KafkaConnect is ready with 0 replicas. | +| 8. | Verify that KafkaConnect has 0 pods | No KafkaConnect pods are running and status is ready. | + +**Labels:** + +* [connect](labels/connect.md) + + +## testSecretsWithKafkaConnectWithTlsAndScramShaAuthentication + +**Description:** Test validating Kafka Connect with TLS and SCRAM-SHA authentication along with associated resources setup and verification. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Initialize test storage | Instances created successfully | +| 2. | Create Kafka node pools (broker and controller) | Node pools created and ready | +| 3. | Deploy Kafka cluster with TLS and SCRAM-SHA-512 authentication | Kafka cluster deployed with listeners configured | +| 4. | Create Kafka user with SCRAM-SHA-512 | User created successfully | +| 5. | Deploy Kafka topic | Topic created successfully | +| 6. | Deploy Kafka Connect with TLS and SCRAM-SHA-512 authentication | Kafka Connect deployed with plugins and configuration | +| 7. | Deploy scraper pod for testing Kafka Connect | Scraper pod deployed successfully | +| 8. | Deploy NetworkPolicies for Kafka Connect | NetworkPolicies applied successfully | +| 9. | Create and configure FileStreamSink KafkaConnector | FileStreamSink KafkaConnector created and configured | +| 10. | Create Kafka clients for SCRAM-SHA-512 over TLS | Kafka clients (producer and consumer) created successfully | +| 11. | Wait for client operations to succeed | Message production and consumption verified | +| 12. | Verify messages in Kafka Connect file sink | Messages found in the specified file path | + +**Labels:** + +* [connect](labels/connect.md) + + +## testSecretsWithKafkaConnectWithTlsAndTlsClientAuthentication + +**Description:** This test verifies that Kafka Connect works with TLS and TLS client authentication. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Create test storage instance | Test storage instance is created | +| 2. | Create NodePools using resourceManager based on the configuration | NodePools for broker and controller are created or not based on configuration | +| 3. | Create resources for Kafka broker and Kafka Connect components | Resources are created and ready | +| 4. | Configure Kafka broker with TLS listener and client authentication | Kafka broker is configured correctly | +| 5. | Deploy Kafka user with TLS authentication | Kafka user is deployed with TLS authentication | +| 6. | Deploy Kafka topic | Kafka topic is deployed | +| 7. | Configure and deploy Kafka Connect with TLS and TLS client authentication | Kafka Connect is configured and deployed correctly | +| 8. | Deploy Network Policies for Kafka Connect | Network Policies are deployed | +| 9. | Create FileStreamSink KafkaConnector via scraper pod | KafkaConnector is created correctly | +| 10. | Deploy TLS clients and produce/consume messages | Messages are produced and consumed successfully | +| 11. | Verify messages in Kafka Connect FileSink | Messages are verified in Kafka Connect FileSink | + +**Labels:** + +* [connect](labels/connect.md) + diff --git a/development-docs/systemtests/labels/connect.md b/development-docs/systemtests/labels/connect.md new file mode 100644 index 00000000000..1713849294c --- /dev/null +++ b/development-docs/systemtests/labels/connect.md @@ -0,0 +1,36 @@ +# **Connect** + +## Description + +These tests validate the Kafka Connect component, ensuring reliable integration between Kafka and external systems through connectors. +Kafka Connect enables data streaming between Kafka clusters and various data sources or sinks. +These tests cover scenarios like plugin management, build processes, network configurations, and various security protocols. +Ensuring the correctness of Kafka Connect behavior under different configurations and scaling scenarios is critical to +maintaining data consistency and availability in a streaming ecosystem. + + +**Tests:** +- [testBuildFailsWithWrongChecksumOfArtifact](../io.strimzi.systemtest.connect.ConnectBuilderST.md) +- [testKafkaConnectAndConnectorStateWithFileSinkPlugin](../io.strimzi.systemtest.connect.ConnectST.md) +- [testScaleConnectWithoutConnectorToZero](../io.strimzi.systemtest.connect.ConnectST.md) +- [testScaleConnectWithConnectorToZero](../io.strimzi.systemtest.connect.ConnectST.md) +- [testBuildWithJarTgzAndZip](../io.strimzi.systemtest.connect.ConnectBuilderST.md) +- [testKafkaConnectAndConnectorFileSinkPlugin](../io.strimzi.systemtest.connect.ConnectST.md) +- [testKafkaConnectWithPlainAndScramShaAuthentication](../io.strimzi.systemtest.connect.ConnectST.md) +- [testKafkaConnectScaleUpScaleDown](../io.strimzi.systemtest.connect.ConnectST.md) +- [testDeployRollUndeploy](../io.strimzi.systemtest.connect.ConnectST.md) +- [testCustomAndUpdatedValues](../io.strimzi.systemtest.connect.ConnectST.md) +- [testSecretsWithKafkaConnectWithTlsAndTlsClientAuthentication](../io.strimzi.systemtest.connect.ConnectST.md) +- [testScaleConnectAndConnectorSubresource](../io.strimzi.systemtest.connect.ConnectST.md) +- [testMountingSecretAndConfigMapAsVolumesAndEnvVars](../io.strimzi.systemtest.connect.ConnectST.md) +- [testMultiNodeKafkaConnectWithConnectorCreation](../io.strimzi.systemtest.connect.ConnectST.md) +- [testConnectorTaskAutoRestart](../io.strimzi.systemtest.connect.ConnectST.md) +- [testKafkaConnectWithScramShaAuthenticationRolledAfterPasswordChanged](../io.strimzi.systemtest.connect.ConnectST.md) +- [testPushIntoImageStream](../io.strimzi.systemtest.connect.ConnectBuilderST.md) +- [testJvmAndResources](../io.strimzi.systemtest.connect.ConnectST.md) +- [testConnectTlsAuthWithWeirdUserName](../io.strimzi.systemtest.connect.ConnectST.md) +- [testBuildOtherPluginTypeWithAndWithoutFileName](../io.strimzi.systemtest.connect.ConnectBuilderST.md) +- [testSecretsWithKafkaConnectWithTlsAndScramShaAuthentication](../io.strimzi.systemtest.connect.ConnectST.md) +- [testConnectScramShaAuthWithWeirdUserName](../io.strimzi.systemtest.connect.ConnectST.md) +- [testBuildPluginUsingMavenCoordinatesArtifacts](../io.strimzi.systemtest.connect.ConnectBuilderST.md) +- [testUpdateConnectWithAnotherPlugin](../io.strimzi.systemtest.connect.ConnectBuilderST.md) diff --git a/systemtest/src/test/java/io/strimzi/systemtest/connect/ConnectBuilderST.java b/systemtest/src/test/java/io/strimzi/systemtest/connect/ConnectBuilderST.java index 1b7b57fa6e5..3f8db73186f 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/connect/ConnectBuilderST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/connect/ConnectBuilderST.java @@ -7,6 +7,11 @@ import io.fabric8.openshift.api.model.ImageStream; import io.fabric8.openshift.api.model.ImageStreamBuilder; import io.fabric8.openshift.client.OpenShiftClient; +import io.skodjob.annotations.Desc; +import io.skodjob.annotations.Label; +import io.skodjob.annotations.Step; +import io.skodjob.annotations.SuiteDoc; +import io.skodjob.annotations.TestDoc; import io.strimzi.api.kafka.model.common.Condition; import io.strimzi.api.kafka.model.connect.KafkaConnect; import io.strimzi.api.kafka.model.connect.KafkaConnectResources; @@ -77,6 +82,12 @@ @Tag(CONNECT_COMPONENTS) @Tag(CONNECT) @MicroShiftNotSupported +@SuiteDoc( + description = @Desc("Testing Kafka Connect build and plugin management."), + labels = { + @Label(value = "connect"), + } +) class ConnectBuilderST extends AbstractST { private static final Logger LOGGER = LogManager.getLogger(ConnectBuilderST.class); @@ -143,6 +154,23 @@ class ConnectBuilderST extends AbstractST { .build(); @ParallelTest + @TestDoc( + description = @Desc("Test that ensures Kafka Connect build fails with wrong artifact checksum and recovers with correct checksum."), + steps = { + @Step(value = "Initialize TestStorage and get test image name", expected = "TestStorage instance is created and the image name for the test case is retrieved"), + @Step(value = "Create a Plugin with wrong checksum and build Kafka Connect resource with it", expected = "Kafka Connect resource is created but the build fails due to wrong checksum"), + @Step(value = "Deploy Scraper pod with specific configurations", expected = "Kafka Scraper pod are successfully deployed"), + @Step(value = "Wait for Kafka Connect status to indicate build failure", expected = "Kafka Connect status contains message about build failure"), + @Step(value = "Deploy network policies for Kafka Connect", expected = "Network policies are successfully deployed for Kafka Connect"), + @Step(value = "Replace the plugin checksum with the correct one and update Kafka Connect resource", expected = "Kafka Connect resource is updated with the correct checksum"), + @Step(value = "Wait for Kafka Connect to be ready", expected = "Kafka Connect becomes ready"), + @Step(value = "Verify that EchoSink KafkaConnector is available in Kafka Connect API", expected = "EchoSink KafkaConnector is returned by Kafka Connect API"), + @Step(value = "Verify that EchoSink KafkaConnector is listed in Kafka Connect resource status", expected = "EchoSink KafkaConnector is listed in the status of Kafka Connect resource") + }, + labels = { + @Label(value = "connect") + } + ) void testBuildFailsWithWrongChecksumOfArtifact() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); @@ -213,6 +241,22 @@ void testBuildFailsWithWrongChecksumOfArtifact() { } @ParallelTest + @TestDoc( + description = @Desc("Test for building Kafka Connect image with combined jar, tar.gz, and zip plugins, and validating message send-receive functionality."), + steps = { + @Step(value = "Create TestStorage object", expected = "TestStorage instance is created with context"), + @Step(value = "Get image name for test case", expected = "Image name is successfully retrieved"), + @Step(value = "Create Kafka Topic resources", expected = "Kafka Topic resources are created with wait"), + @Step(value = "Create Kafka Connect resources", expected = "Kafka Connect resources are created with wait"), + @Step(value = "Configure Kafka Connector", expected = "Kafka Connector is configured and created with wait"), + @Step(value = "Verify Kafka Connector class name", expected = "Connector class name matches expected ECHO_SINK_CLASS_NAME"), + @Step(value = "Create Kafka Clients and send messages", expected = "Kafka Clients created and messages sent and verified"), + @Step(value = "Check logs for received message", expected = "Logs contain the expected received message") + }, + labels = { + @Label(value = "connect") + } + ) void testBuildWithJarTgzAndZip() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); @@ -264,6 +308,18 @@ void testBuildWithJarTgzAndZip() { @OpenShiftOnly @ParallelTest + @TestDoc( + description = @Desc("Test verifying the successful push of a KafkaConnect build into an OpenShift ImageStream."), + steps = { + @Step(value = "Initialize test storage", expected = "Test storage is initialized with the test context"), + @Step(value = "Create ImageStream", expected = "ImageStream is created in the specified namespace"), + @Step(value = "Deploy KafkaConnect with the image stream output", expected = "KafkaConnect is deployed with the expected build configuration"), + @Step(value = "Verify KafkaConnect build artifacts and status", expected = "KafkaConnect has two plugins, uses the image stream output and is in the 'Ready' state") + }, + labels = { + @Label(value = "connect") + } + ) void testPushIntoImageStream() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); @@ -304,6 +360,21 @@ void testPushIntoImageStream() { } @ParallelTest + @TestDoc( + description = @Desc("Test updating and validating Kafka Connect with another plugin."), + steps = { + @Step(value = "Create TestStorage instance", expected = "Instance of TestStorage is created"), + @Step(value = "Generate random topic name and create Kafka topic", expected = "Kafka topic is successfully created"), + @Step(value = "Deploy network policies for KafkaConnect", expected = "Network policies are successfully deployed"), + @Step(value = "Create EchoSink KafkaConnector", expected = "EchoSink KafkaConnector is successfully created and validated"), + @Step(value = "Add a second plugin to Kafka Connect and perform rolling update", expected = "Second plugin is added and rolling update is performed"), + @Step(value = "Create Camel-HTTP-Sink KafkaConnector", expected = "Camel-HTTP-Sink KafkaConnector is successfully created and validated"), + @Step(value = "Verify that both connectors and plugins are present in Kafka Connect", expected = "Both connectors and plugins are verified successfully") + }, + labels = { + @Label(value = "connect") + } + ) void testUpdateConnectWithAnotherPlugin() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); @@ -398,6 +469,19 @@ void testUpdateConnectWithAnotherPlugin() { } @ParallelTest + @TestDoc( + description = @Desc("Test verifying Kafka Connect plugin behavior with and without file names for different plugin types."), + steps = { + @Step(value = "Initialize test storage and topic", expected = "Namespace and topic are created successfully"), + @Step(value = "Create and set up Kafka Connect with specified plugin and build configurations", expected = "Kafka Connect is deployed and configured correctly"), + @Step(value = "Take a snapshot of current Kafka Connect pods and verify plugin file name", expected = "Plugin file name matches the expected file name"), + @Step(value = "Modify Kafka Connect to use a plugin without a file name and trigger a rolling update", expected = "Kafka Connect plugin is updated without the file name successfully"), + @Step(value = "Verify plugin file name after update using the plugin's hash", expected = "Plugin file name is different from the previous name and matches the hash") + }, + labels = { + @Label(value = "connect") + } + ) void testBuildOtherPluginTypeWithAndWithoutFileName() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); @@ -457,6 +541,20 @@ void testBuildOtherPluginTypeWithAndWithoutFileName() { @Tag(ACCEPTANCE) @KindNotSupported("using kind we encounter (error building image: deleting file system after stage 0: unlinkat //product_uuid: device or resource busy)") @ParallelTest + @TestDoc( + description = @Desc("Test building a plugin using Maven coordinates artifacts."), + steps = { + @Step(value = "Create a test storage object", expected = "Test storage object is created"), + @Step(value = "Generate image name for the test case", expected = "Image name is generated successfully"), + @Step(value = "Create Kafka topic and Kafka Connect resources with the configuration for plugin using mvn coordinates", expected = "Resources are created and available"), + @Step(value = "Configure Kafka Connector and deploy it", expected = "Connector is deployed with correct configuration"), + @Step(value = "Create Kafka consumer and start consuming messages", expected = "Consumer starts consuming messages successfully"), + @Step(value = "Verify that consumer receives messages", expected = "Consumer receives the expected messages") + }, + labels = { + @Label(value = "connect") + } + ) void testBuildPluginUsingMavenCoordinatesArtifacts() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); diff --git a/systemtest/src/test/java/io/strimzi/systemtest/connect/ConnectST.java b/systemtest/src/test/java/io/strimzi/systemtest/connect/ConnectST.java index 91652104018..95c88ff0dbf 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/connect/ConnectST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/connect/ConnectST.java @@ -15,6 +15,11 @@ import io.fabric8.kubernetes.api.model.SecretBuilder; import io.fabric8.kubernetes.api.model.SecretKeySelectorBuilder; import io.fabric8.kubernetes.api.model.SecretVolumeSourceBuilder; +import io.skodjob.annotations.Desc; +import io.skodjob.annotations.Label; +import io.skodjob.annotations.Step; +import io.skodjob.annotations.SuiteDoc; +import io.skodjob.annotations.TestDoc; import io.strimzi.api.kafka.model.common.CertSecretSourceBuilder; import io.strimzi.api.kafka.model.common.ConnectorState; import io.strimzi.api.kafka.model.common.PasswordSecretSourceBuilder; @@ -106,11 +111,35 @@ @Tag(CONNECT) @Tag(CONNECT_COMPONENTS) @SuppressWarnings({"checkstyle:ClassDataAbstractionCoupling"}) +@SuiteDoc( + description = @Desc("Verifies the deployment, manual rolling update, and undeployment of Kafka Connect components."), + beforeTestSteps = { + @Step(value = "Deploy scraper Pod for accessing all other Pods", expected = "Scraper Pod is deployed") + }, + labels = { + @Label(value = "connect") + } +) class ConnectST extends AbstractST { private static final Logger LOGGER = LogManager.getLogger(ConnectST.class); @ParallelNamespaceTest + @TestDoc( + description = @Desc("Verifies the deployment, manual rolling update, and undeployment of Kafka Connect components."), + steps = { + @Step(value = "Initialize Test Storage", expected = "Test storage instance is created with required context"), + @Step(value = "Define expected configurations", expected = "Configurations are loaded from properties file"), + @Step(value = "Create and wait for resources", expected = "Kafka resources, including NodePools and KafkaConnect instances, are created and become ready"), + @Step(value = "Annotate for manual rolling update", expected = "KafkaConnect components are annotated for a manual rolling update"), + @Step(value = "Perform and wait for rolling update", expected = "KafkaConnect components roll and new pods are deployed"), + @Step(value = "Kafka Connect pod", expected = "Pod configurations and annotations are verified"), + @Step(value = "Kafka Connectors", expected = "Various Kafka Connect resource labels and configurations are verified to ensure correct deployment") + }, + labels = { + @Label(value = "connect") + } + ) void testDeployRollUndeploy() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); @@ -155,27 +184,21 @@ void testDeployRollUndeploy() { VerificationUtils.verifyServiceAccountsLabels(testStorage.getNamespaceName(), testStorage.getClusterName()); } - /** - * @description This test case verifies pausing, stopping and running of connector via 'spec.pause' or 'spec.state' specification. - * - * @steps - * 1. - Deploy prerequisites for running FileSink KafkaConnector, that is KafkaTopic, Kafka cluster, KafkaConnect, and FileSink KafkaConnector. - * - All resources are deployed and ready. - * 2. - Pause and run connector by modifying 'spec.pause' property of Connector, while also producing messages when connector pauses. - * - Connector is paused and resumed as expected, after connector is resumed, produced messages are present in destination file, indicating connector resumed correctly. - * 3. - Stop and run connector by modifying 'spec.state' property of Connector (with priority over now set 'spec.pause=false'), while also producing messages when connector stops. - * - Connector stops and resumes as expected, after resuming, produced messages are present in destination file, indicating connector resumed correctly. - * 4. - Pause and run connector by modifying 'spec.state' property of Connector (with priority over now set 'spec.pause=false'), while also producing messages when connector pauses. - * - Connector pauses and resumes as expected, after resuming, produced messages are present in destination file, indicating connector resumed correctly. - * - * @usecase - * - kafka-connect - * - kafka-connector - * - connector-state - */ @ParallelNamespaceTest @Tag(SANITY) @Tag(SMOKE) + @TestDoc( + description = @Desc("This test case verifies pausing, stopping and running of connector via 'spec.pause' or 'spec.state' specification."), + steps = { + @Step(value = "Deploy prerequisites for running FileSink KafkaConnector, that is KafkaTopic, Kafka cluster, KafkaConnect, and FileSink KafkaConnector.", expected = "All resources are deployed and ready."), + @Step(value = "Pause and run connector by modifying 'spec.pause' property of Connector, while also producing messages when connector pauses.", expected = "Connector is paused and resumed as expected, after connector is resumed, produced messages are present in destination file, indicating connector resumed correctly."), + @Step(value = "Stop and run connector by modifying 'spec.state' property of Connector (with priority over now set 'spec.pause=false'), while also producing messages when connector stops.", expected = "Connector stops and resumes as expected, after resuming, produced messages are present in destination file, indicating connector resumed correctly."), + @Step(value = "Pause and run connector by modifying 'spec.state' property of Connector (with priority over now set 'spec.pause=false'), while also producing messages when connector pauses.", expected = "Connector pauses and resumes as expected, after resuming, produced messages are present in destination file, indicating connector resumed correctly.") + }, + labels = { + @Label(value = "connect") + } + ) void testKafkaConnectAndConnectorStateWithFileSinkPlugin() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); @@ -232,6 +255,24 @@ void testKafkaConnectAndConnectorStateWithFileSinkPlugin() { } @ParallelNamespaceTest + @TestDoc( + description = @Desc("Test verifying Kafka Connect functionalities with Plain and SCRAM-SHA authentication."), + steps = { + @Step(value = "Create object instance of TestStorage", expected = "Instance of TestStorage is created"), + @Step(value = "Create NodePools using resourceManager based on the configuration", expected = "NodePools for broker and controller are created or not based on configuration"), + @Step(value = "Deploy Kafka with SCRAM-SHA-512 listener", expected = "Kafka is deployed with the specified listener authentication"), + @Step(value = "Create KafkaUser with SCRAM-SHA authentication", expected = "KafkaUser is created using SCRAM-SHA authentication with the given credentials"), + @Step(value = "Create KafkaTopic", expected = "KafkaTopic is created"), + @Step(value = "Deploy KafkaConnect with SCRAM-SHA-512 authentication", expected = "KafkaConnect instance is deployed and connected to Kafka"), + @Step(value = "Deploy required resources for NetworkPolicy, KafkaConnect, and ScraperPod", expected = "Resources are successfully deployed with NetworkPolicy applied"), + @Step(value = "Create FileStreamSink connector", expected = "FileStreamSink connector is created successfully"), + @Step(value = "Create Kafka client with SCRAM-SHA-PLAIN authentication and send messages", expected = "Messages are produced and consumed successfully using Kafka client with SCRAM-SHA-PLAIN authentication"), + @Step(value = "Verify messages in KafkaConnect file sink", expected = "FileSink contains the expected number of messages") + }, + labels = { + @Label(value = "connect") + } + ) void testKafkaConnectWithPlainAndScramShaAuthentication() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); @@ -306,6 +347,23 @@ void testKafkaConnectWithPlainAndScramShaAuthentication() { @ParallelNamespaceTest @Tag(CONNECTOR_OPERATOR) + @TestDoc( + description = @Desc("Test the functionality of Kafka Connect with a File Sink Plugin in a parallel namespace setup."), + steps = { + @Step(value = "Create and configure test storage", expected = "Test storage is set up with necessary configurations."), + @Step(value = "Create NodePools using resourceManager based on the configuration", expected = "NodePools for broker and controller are created or not based on configuration"), + @Step(value = "Create and wait for the broker and controller pools", expected = "Broker and controller pools are created and running."), + @Step(value = "Deploy and configure Kafka Connect with File Sink Plugin", expected = "Kafka Connect with File Sink Plugin is deployed and configured."), + @Step(value = "Deploy Network Policies for Kafka Connect", expected = "Network Policies are successfully deployed for Kafka Connect."), + @Step(value = "Create and wait for Kafka Connector", expected = "Kafka Connector is created and running."), + @Step(value = "Deploy and configure scraper pod", expected = "Scraper pod is deployed and configured."), + @Step(value = "Deploy and configure Kafka clients", expected = "Kafka clients are deployed and configured."), + @Step(value = "Execute assertions to verify the Kafka Connector configuration and status", expected = "Assertions confirm the Kafka Connector is successfully deployed, has the correct configuration, and is running.") + }, + labels = { + @Label(value = "connect") + } + ) void testKafkaConnectAndConnectorFileSinkPlugin() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); @@ -354,8 +412,21 @@ void testKafkaConnectAndConnectorFileSinkPlugin() { assertThat(output, containsString("\"topic\":\"" + testStorage.getTopicName() + "\"")); } - @ParallelNamespaceTest + @TestDoc( + description = @Desc("Test ensuring the JVM options and resource requests/limits are correctly applied to Kafka Connect components."), + steps = { + @Step(value = "Create TestStorage instance", expected = "TestStorage instance is created"), + @Step(value = "Create NodePools using resourceManager based on the configuration", expected = "NodePools for broker and controller are created or not based on configuration"), + @Step(value = "Create broker and controller node pools", expected = "Node pools are created and ready"), + @Step(value = "Create Kafka cluster", expected = "Kafka cluster is created and operational"), + @Step(value = "Setup JVM options and resource requirements for Kafka Connect", expected = "Kafka Connect is configured with specified JVM options and resources"), + @Step(value = "Verify JVM options and resource requirements", expected = "JVM options and resource requests/limits are correctly applied to the Kafka Connect pod") + }, + labels = { + @Label(value = "connect") + } + ) void testJvmAndResources() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); @@ -395,6 +466,23 @@ void testJvmAndResources() { @ParallelNamespaceTest @Tag(COMPONENT_SCALING) + @TestDoc( + description = @Desc("Test verifying the scaling up and down functionality of Kafka Connect in a Kubernetes environment."), + steps = { + @Step(value = "Create TestStorage object instance", expected = "Instance of TestStorage is created"), + @Step(value = "Create NodePools using resourceManager based on the configuration", expected = "NodePools for broker and controller are created or not based on configuration"), + @Step(value = "Create resources for KafkaNodePools and KafkaCluster", expected = "Resources are created and ready"), + @Step(value = "Deploy Kafka Connect with file plugin", expected = "Kafka Connect is deployed with 1 initial replica"), + @Step(value = "Verify the initial replica count", expected = "Initial replica count is verified to be 1"), + @Step(value = "Scale Kafka Connect up to a higher number of replicas", expected = "Kafka Connect is scaled up successfully"), + @Step(value = "Verify the new replica count after scaling up", expected = "New replica count is verified to be the scaled up count"), + @Step(value = "Scale Kafka Connect down to the initial number of replicas", expected = "Kafka Connect is scaled down successfully"), + @Step(value = "Verify the replica count after scaling down", expected = "Replica count is verified to be the initial count") + }, + labels = { + @Label(value = "connect") + } + ) void testKafkaConnectScaleUpScaleDown() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); @@ -430,6 +518,25 @@ void testKafkaConnectScaleUpScaleDown() { } @ParallelNamespaceTest + @TestDoc( + description = @Desc("This test verifies that Kafka Connect works with TLS and TLS client authentication."), + steps = { + @Step(value = "Create test storage instance", expected = "Test storage instance is created"), + @Step(value = "Create NodePools using resourceManager based on the configuration", expected = "NodePools for broker and controller are created or not based on configuration"), + @Step(value = "Create resources for Kafka broker and Kafka Connect components", expected = "Resources are created and ready"), + @Step(value = "Configure Kafka broker with TLS listener and client authentication", expected = "Kafka broker is configured correctly"), + @Step(value = "Deploy Kafka user with TLS authentication", expected = "Kafka user is deployed with TLS authentication"), + @Step(value = "Deploy Kafka topic", expected = "Kafka topic is deployed"), + @Step(value = "Configure and deploy Kafka Connect with TLS and TLS client authentication", expected = "Kafka Connect is configured and deployed correctly"), + @Step(value = "Deploy Network Policies for Kafka Connect", expected = "Network Policies are deployed"), + @Step(value = "Create FileStreamSink KafkaConnector via scraper pod", expected = "KafkaConnector is created correctly"), + @Step(value = "Deploy TLS clients and produce/consume messages", expected = "Messages are produced and consumed successfully"), + @Step(value = "Verify messages in Kafka Connect FileSink", expected = "Messages are verified in Kafka Connect FileSink") + }, + labels = { + @Label(value = "connect") + } + ) void testSecretsWithKafkaConnectWithTlsAndTlsClientAuthentication() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); @@ -503,6 +610,26 @@ void testSecretsWithKafkaConnectWithTlsAndTlsClientAuthentication() { } @ParallelNamespaceTest + @TestDoc( + description = @Desc("Test validating Kafka Connect with TLS and SCRAM-SHA authentication along with associated resources setup and verification."), + steps = { + @Step(value = "Initialize test storage", expected = "Instances created successfully"), + @Step(value = "Create Kafka node pools (broker and controller)", expected = "Node pools created and ready"), + @Step(value = "Deploy Kafka cluster with TLS and SCRAM-SHA-512 authentication", expected = "Kafka cluster deployed with listeners configured"), + @Step(value = "Create Kafka user with SCRAM-SHA-512", expected = "User created successfully"), + @Step(value = "Deploy Kafka topic", expected = "Topic created successfully"), + @Step(value = "Deploy Kafka Connect with TLS and SCRAM-SHA-512 authentication", expected = "Kafka Connect deployed with plugins and configuration"), + @Step(value = "Deploy scraper pod for testing Kafka Connect", expected = "Scraper pod deployed successfully"), + @Step(value = "Deploy NetworkPolicies for Kafka Connect", expected = "NetworkPolicies applied successfully"), + @Step(value = "Create and configure FileStreamSink KafkaConnector", expected = "FileStreamSink KafkaConnector created and configured"), + @Step(value = "Create Kafka clients for SCRAM-SHA-512 over TLS", expected = "Kafka clients (producer and consumer) created successfully"), + @Step(value = "Wait for client operations to succeed", expected = "Message production and consumption verified"), + @Step(value = "Verify messages in Kafka Connect file sink", expected = "Messages found in the specified file path") + }, + labels = { + @Label(value = "connect") + } + ) void testSecretsWithKafkaConnectWithTlsAndScramShaAuthentication() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); @@ -575,6 +702,25 @@ void testSecretsWithKafkaConnectWithTlsAndScramShaAuthentication() { @ParallelNamespaceTest @MicroShiftNotSupported("The test is using Connect Build feature that is not available on MicroShift") + @TestDoc( + description = @Desc("Test the automatic restart functionality of Kafka Connect tasks when they fail."), + steps = { + @Step(value = "Create test storage instance", expected = "Test storage instance is created"), + @Step(value = "Create node pool resources", expected = "Node pool resources are created and waited for readiness"), + @Step(value = "Create Kafka cluster", expected = "Kafka cluster is created and waited for readiness"), + @Step(value = "Deploy EchoSink Kafka Connector with autor restart enabled", expected = "Kafka Connector is created with auto-restart enabled"), + @Step(value = "Send first batch of messages", expected = "First batch of messages is sent to the topic"), + @Step(value = "Ensure connection success for the first batch", expected = "Successfully produce the first batch of messages"), + @Step(value = "Send second batch of messages", expected = "Second batch of messages is sent to the topic"), + @Step(value = "Ensure connection success for the second batch", expected = "Successfully produce the second batch of messages"), + @Step(value = "Verify task failure and auto-restart", expected = "Connector task fails and is automatically restarted"), + @Step(value = "Wait for task to reach running state", expected = "Connector task returns to running state after recovery"), + @Step(value = "Verify auto-restart count reset", expected = "Auto-restart count is reset to zero after task stability") + }, + labels = { + @Label(value = "connect") + } + ) void testConnectorTaskAutoRestart() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); @@ -667,6 +813,19 @@ void testConnectorTaskAutoRestart() { } @ParallelNamespaceTest + @TestDoc( + description = @Desc("Test that verifies custom and updated environment variables and readiness/liveness probes for Kafka Connect."), + steps = { + @Step(value = "Create and configure Kafka Connect with initial values", expected = "Kafka Connect is created and configured with initial environment variables and readiness/liveness probes"), + @Step(value = "Create NodePools using resourceManager based on the configuration", expected = "NodePools for broker and controller are created or not based on configuration"), + @Step(value = "Verify initial configuration and environment variables", expected = "Initial configuration and environment variables are as expected"), + @Step(value = "Update Kafka Connect configuration and environment variables", expected = "Kafka Connect configuration and environment variables are updated"), + @Step(value = "Verify updated configuration and environment variables", expected = "Updated configuration and environment variables are as expected") + }, + labels = { + @Label(value = "connect") + } + ) void testCustomAndUpdatedValues() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); final String usedVariable = "KAFKA_CONNECT_CONFIGURATION"; @@ -768,6 +927,22 @@ void testCustomAndUpdatedValues() { @ParallelNamespaceTest @Tag(CONNECTOR_OPERATOR) @Tag(ACCEPTANCE) + @TestDoc( + description = @Desc("Test validating multi-node Kafka Connect cluster creation, connector deployment, and message processing."), + steps = { + @Step(value = "Initialize test storage and determine connect cluster name", expected = "Test storage and cluster name properly initialized"), + @Step(value = "Create broker and controller node pools", expected = "Broker and controller node pools created successfully"), + @Step(value = "Deploy Kafka cluster in ephemeral mode", expected = "Kafka cluster deployed successfully"), + @Step(value = "Create Kafka Connect cluster with default image", expected = "Kafka Connect cluster created with appropriate configuration"), + @Step(value = "Create and configure Kafka Connector", expected = "Kafka Connector deployed and configured with correct settings"), + @Step(value = "Verify the status of the Kafka Connector", expected = "Kafka Connector status retrieved and worker node identified"), + @Step(value = "Deploy Kafka clients for producer and consumer", expected = "Kafka producer and consumer clients deployed"), + @Step(value = "Verify that Kafka Connect writes messages to the specified file sink", expected = "Messages successfully written to the file sink by Kafka Connect") + }, + labels = { + @Label(value = "connect") + } + ) void testMultiNodeKafkaConnectWithConnectorCreation() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); @@ -822,6 +997,20 @@ void testMultiNodeKafkaConnectWithConnectorCreation() { @Tag(CONNECTOR_OPERATOR) @ParallelNamespaceTest + @TestDoc( + description = @Desc("Test verifying Kafka connect TLS authentication with a username containing unusual characters."), + steps = { + @Step(value = "Set up a name of username containing dots and 64 characters", expected = ""), + @Step(value = "Create NodePools using resourceManager based on the configuration", expected = "NodePools for broker and controller are created or not based on configuration"), + @Step(value = "Create Kafka broker, controller, topic, and Kafka user with the specified username", expected = "Resources are created with the expected configurations"), + @Step(value = "Setup Kafka Connect with the created Kafka instance and TLS authentication", expected = "Kafka Connect is set up with the expected configurations"), + @Step(value = "Check if the user can produce messages to Kafka", expected = "Messages are produced successfully"), + @Step(value = "Verify that Kafka Connect can consume messages", expected = "Messages are consumed successfully by Kafka Connect") + }, + labels = { + @Label(value = "connect") + } + ) void testConnectTlsAuthWithWeirdUserName() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); @@ -898,6 +1087,23 @@ void testConnectTlsAuthWithWeirdUserName() { @Tag(CONNECTOR_OPERATOR) @ParallelNamespaceTest + @TestDoc( + description = @Desc("Test verifying that Kafka Connect can authenticate with SCRAM-SHA-512 using a username with special characters and length exceeding typical constraints."), + steps = { + @Step(value = "Create resource with Node Pools", expected = "Node Pools created successfully"), + @Step(value = "Create NodePools using resourceManager based on the configuration", expected = "NodePools for broker and controller are created or not based on configuration"), + @Step(value = "Deploy Kafka cluster with SCRAM-SHA-512 authentication", expected = "Kafka cluster deployed with specified authentications"), + @Step(value = "Create Kafka Topic", expected = "Topic created successfully"), + @Step(value = "Create Kafka SCRAM-SHA-512 user with a weird username", expected = "User created successfully with SCRAM-SHA-512 credentials"), + @Step(value = "Deploy Kafka Connect with SCRAM-SHA-512 authentication", expected = "Kafka Connect instance deployed and configured with user credentials"), + @Step(value = "Deploy Kafka Connector", expected = "Kafka Connector deployed and configured successfully"), + @Step(value = "Send messages using the configured client", expected = "Messages sent successfully"), + @Step(value = "Verify that connector receives messages", expected = "Messages consumed by the connector and written to the specified sink") + }, + labels = { + @Label(value = "connect") + } + ) void testConnectScramShaAuthWithWeirdUserName() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); @@ -974,6 +1180,22 @@ void testConnectScramShaAuthWithWeirdUserName() { @ParallelNamespaceTest @Tag(COMPONENT_SCALING) + @TestDoc( + description = @Desc("Test to validate scaling KafkaConnect without a connector to zero replicas."), + steps = { + @Step(value = "Initialize TestStorage and create namespace", expected = "Namespace and storage initialized"), + @Step(value = "Create broker and controller node pools", expected = "Node pools created with 3 replicas."), + @Step(value = "Create ephemeral Kafka cluster", expected = "Kafka cluster created with 3 replicas."), + @Step(value = "Create KafkaConnect resource with 2 replicas", expected = "KafkaConnect resource created with 2 replicas."), + @Step(value = "Verify that KafkaConnect has 2 pods", expected = "2 KafkaConnect pods are running."), + @Step(value = "Scale down KafkaConnect to zero replicas", expected = "KafkaConnect scaled to zero replicas."), + @Step(value = "Wait for KafkaConnect to be ready", expected = "KafkaConnect is ready with 0 replicas."), + @Step(value = "Verify that KafkaConnect has 0 pods", expected = "No KafkaConnect pods are running and status is ready.") + }, + labels = { + @Label(value = "connect") + } + ) void testScaleConnectWithoutConnectorToZero() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); @@ -1006,6 +1228,24 @@ void testScaleConnectWithoutConnectorToZero() { @ParallelNamespaceTest @Tag(CONNECTOR_OPERATOR) @Tag(COMPONENT_SCALING) + @TestDoc( + description = @Desc("Test scaling Kafka Connect with a connector to zero replicas."), + steps = { + @Step(value = "Create TestStorage instance", expected = "TestStorage instance is created with context"), + @Step(value = "Create broker and controller node pools", expected = "Broker and Controller node pools are created"), + @Step(value = "Create ephemeral Kafka cluster", expected = "Kafka cluster with 3 replicas is created"), + @Step(value = "Create Kafka Connect with file plugin", expected = "Kafka Connect is created with 2 replicas and file plugin"), + @Step(value = "Create Kafka Connector", expected = "Kafka Connector is created with necessary configurations"), + @Step(value = "Check Kafka Connect pods", expected = "There are 2 Kafka Connect pods"), + @Step(value = "Scale down Kafka Connect to zero", expected = "Kafka Connect is scaled down to 0 replicas"), + @Step(value = "Wait for Kafka Connect to be ready", expected = "Kafka Connect readiness is verified"), + @Step(value = "Wait for Kafka Connector to not be ready", expected = "Kafka Connector readiness is verified"), + @Step(value = "Verify conditions", expected = "Pod size is 0, Kafka Connect is ready, Kafka Connector is not ready due to zero replicas") + }, + labels = { + @Label(value = "connect") + } + ) void testScaleConnectWithConnectorToZero() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); @@ -1056,6 +1296,21 @@ void testScaleConnectWithConnectorToZero() { @ParallelNamespaceTest @Tag(CONNECTOR_OPERATOR) @Tag(COMPONENT_SCALING) + @TestDoc( + description = @Desc("This test verifies the scaling functionality of Kafka Connect and Kafka Connector subresources."), + steps = { + @Step(value = "Initialize the test storage and create broker and controller pools", expected = "Broker and controller pools are created successfully"), + @Step(value = "Create NodePools using resourceManager based on the configuration", expected = "NodePools for broker and controller are created or not based on configuration"), + @Step(value = "Deploy Kafka, Kafka Connect and Kafka Connector resources", expected = "Kafka, Kafka Connect and Kafka Connector resources are deployed successfully"), + @Step(value = "Scale Kafka Connect subresource", expected = "Kafka Connect subresource is scaled successfully"), + @Step(value = "Verify Kafka Connect subresource scaling", expected = "Kafka Connect replicas and observed generation are as expected"), + @Step(value = "Scale Kafka Connector subresource", expected = "Kafka Connector subresource task max is set correctly"), + @Step(value = "Verify Kafka Connector subresource scaling", expected = "Kafka Connector task max in spec, status and Connect Pods API are as expected") + }, + labels = { + @Label(value = "connect") + } + ) void testScaleConnectAndConnectorSubresource() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); @@ -1127,6 +1382,20 @@ void testScaleConnectAndConnectorSubresource() { @ParallelNamespaceTest @SuppressWarnings({"checkstyle:MethodLength"}) + @TestDoc( + description = @Desc("This test verifies that Secrets and ConfigMaps can be mounted as volumes and environment variables in Kafka Connect."), + steps = { + @Step(value = "Create Secrets and ConfigMaps", expected = "Secrets and ConfigMaps are created successfully."), + @Step(value = "Create Kafka environment", expected = "Kafka broker, Kafka Connect, and other resources are deployed successfully."), + @Step(value = "Create NodePools using resourceManager based on the configuration", expected = "NodePools for broker and controller are created or not based on configuration"), + @Step(value = "Bind Secrets and ConfigMaps to Kafka Connect", expected = "Secrets and ConfigMaps are bound to Kafka Connect as volumes and environment variables."), + @Step(value = "Verify environment variables", expected = "Kafka Connect environment variables contain expected values from Secrets and ConfigMaps."), + @Step(value = "Verify mounted volumes", expected = "Kafka Connect mounted volumes contain expected values from Secrets and ConfigMaps.") + }, + labels = { + @Label(value = "connect") + } + ) void testMountingSecretAndConfigMapAsVolumesAndEnvVars() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); @@ -1294,6 +1563,20 @@ void testMountingSecretAndConfigMapAsVolumesAndEnvVars() { @ParallelNamespaceTest // changing the password in Secret should cause the RU of connect pod + @TestDoc( + description = @Desc("Verifies Kafka Connect functionality when SCRAM-SHA authentication password is changed and the component is rolled."), + steps = { + @Step(value = "Create NodePools using resourceManager based on the configuration", expected = "NodePools for broker and controller are created or not based on configuration"), + @Step(value = "Create Kafka cluster with SCRAM-SHA authentication", expected = "Kafka cluster is created with SCRAM-SHA authentication enabled"), + @Step(value = "Create a Kafka user with SCRAM-SHA authentication", expected = "Kafka user with SCRAM-SHA authentication is created"), + @Step(value = "Deploy Kafka Connect with the created user credentials", expected = "Kafka Connect is deployed successfully"), + @Step(value = "Update the SCRAM-SHA user password and reconfigure Kafka Connect", expected = "Kafka Connect is reconfigured with the new password"), + @Step(value = "Verify Kafka Connect continues to function after rolling update", expected = "Kafka Connect remains functional and REST API is available") + }, + labels = { + @Label(value = "connect") + } + ) void testKafkaConnectWithScramShaAuthenticationRolledAfterPasswordChanged() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext());