Skip to content

Commit

Permalink
[ST] Remove ZooKeeper from upgrade/downgrade tests (#11026)
Browse files Browse the repository at this point in the history
Signed-off-by: Lukas Kral <[email protected]>
  • Loading branch information
im-konge authored Jan 15, 2025
1 parent 3ad615f commit 751a6ba
Show file tree
Hide file tree
Showing 16 changed files with 461 additions and 1,335 deletions.
7 changes: 0 additions & 7 deletions .azure/systemtests/systemtest-settings.xml
Original file line number Diff line number Diff line change
Expand Up @@ -167,12 +167,5 @@
</properties>
</profile>

<profile>
<id>azp_migration</id>
<properties>
<skipTests>false</skipTests>
<groups>migration</groups>
</properties>
</profile>
</profiles>
</settings>
10 changes: 0 additions & 10 deletions systemtest/src/main/java/io/strimzi/systemtest/TestTags.java
Original file line number Diff line number Diff line change
Expand Up @@ -21,11 +21,6 @@ public interface TestTags {
*/
String REGRESSION = "regression";

/**
* Tag for upgrade tests.
*/
String UPGRADE = "upgrade";

/**
* Tag for KRaft to KRaft tests.
*/
Expand Down Expand Up @@ -156,11 +151,6 @@ public interface TestTags {
*/
String ROUTE = "route";

/**
* Tag for tests that focus on migration from ZK to KRaft
*/
String MIGRATION = "migration";

/**
* Tag for tests that focus on performance
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -67,8 +67,6 @@ public static boolean requiresSharedNamespace(final ExtensionContext extensionCo
TestTags.DYNAMIC_CONFIGURATION, // Dynamic configuration also because in DynamicConfSharedST we use @TestFactory
TestTags.TRACING, // Tracing, because we deploy Jaeger operator inside additional namespace
TestTags.KAFKA_SMOKE, // KafkaVersionsST, MigrationST because here we use @ParameterizedTest
TestTags.MIGRATION,
TestTags.UPGRADE,
TestTags.KRAFT_UPGRADE
);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -101,14 +101,6 @@ public String getKafkaFilePathAfter() {
return getFilePaths().get("kafkaAfter");
}

public String getKafkaKRaftFilePathBefore() {
return getFilePaths().get("kafkaKRaftBefore");
}

public String getKafkaKRaftFilePathAfter() {
return getFilePaths().get("kafkaKRaftAfter");
}

@Override
public String toString() {
return "\n" +
Expand Down

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
* Copyright Strimzi authors.
* License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
*/
package io.strimzi.systemtest.upgrade.kraft;
package io.strimzi.systemtest.upgrade;

import io.strimzi.api.kafka.Crds;
import io.strimzi.api.kafka.model.kafka.KafkaBuilder;
Expand Down Expand Up @@ -131,10 +131,10 @@ void runVersionChange(TestStorage testStorage, TestKafkaVersion initialVersion,

boolean sameMinorVersion = initialVersion.metadataVersion().equals(newVersion.metadataVersion());

if (KafkaResource.kafkaClient().inNamespace(testStorage.getNamespaceName()).withName(clusterName).get() == null) {
if (KafkaResource.kafkaClient().inNamespace(testStorage.getNamespaceName()).withName(CLUSTER_NAME).get() == null) {
LOGGER.info("Deploying initial Kafka version {} with metadataVersion={}", initialVersion.version(), initMetadataVersion);

KafkaBuilder kafka = KafkaTemplates.kafkaPersistent(testStorage.getNamespaceName(), clusterName, controllerReplicas, brokerReplicas)
KafkaBuilder kafka = KafkaTemplates.kafkaPersistent(testStorage.getNamespaceName(), CLUSTER_NAME, controllerReplicas, brokerReplicas)
.editMetadata()
.addToAnnotations(Annotations.ANNO_STRIMZI_IO_NODE_POOLS, "enabled")
.addToAnnotations(Annotations.ANNO_STRIMZI_IO_KRAFT, "enabled")
Expand All @@ -157,22 +157,22 @@ void runVersionChange(TestStorage testStorage, TestKafkaVersion initialVersion,
}

resourceManager.createResourceWithWait(
KafkaNodePoolTemplates.controllerPoolPersistentStorage(testStorage.getNamespaceName(), CONTROLLER_NODE_NAME, clusterName, controllerReplicas).build(),
KafkaNodePoolTemplates.brokerPoolPersistentStorage(testStorage.getNamespaceName(), BROKER_NODE_NAME, clusterName, brokerReplicas).build(),
KafkaNodePoolTemplates.controllerPoolPersistentStorage(testStorage.getNamespaceName(), CONTROLLER_NODE_NAME, CLUSTER_NAME, controllerReplicas).build(),
KafkaNodePoolTemplates.brokerPoolPersistentStorage(testStorage.getNamespaceName(), BROKER_NODE_NAME, CLUSTER_NAME, brokerReplicas).build(),
kafka.build()
);

// ##############################
// Attach clients which will continuously produce/consume messages to/from Kafka brokers during rolling update
// ##############################
// Setup topic, which has 3 replicas and 2 min.isr to see if producer will be able to work during rolling update
resourceManager.createResourceWithWait(KafkaTopicTemplates.topic(testStorage.getNamespaceName(), testStorage.getContinuousTopicName(), clusterName, 3, 3, 2).build());
resourceManager.createResourceWithWait(KafkaTopicTemplates.topic(testStorage.getNamespaceName(), testStorage.getContinuousTopicName(), CLUSTER_NAME, 3, 3, 2).build());
// 40s is used within TF environment to make upgrade/downgrade more stable on slow env
String producerAdditionConfiguration = "delivery.timeout.ms=300000\nrequest.timeout.ms=20000";

KafkaClients kafkaBasicClientJob = ClientUtils.getContinuousPlainClientBuilder(testStorage)
.withNamespaceName(testStorage.getNamespaceName())
.withBootstrapAddress(KafkaResources.plainBootstrapAddress(clusterName))
.withBootstrapAddress(KafkaResources.plainBootstrapAddress(CLUSTER_NAME))
.withMessageCount(continuousClientsMessageCount)
.withAdditionalConfig(producerAdditionConfiguration)
.build();
Expand All @@ -184,7 +184,7 @@ void runVersionChange(TestStorage testStorage, TestKafkaVersion initialVersion,

LOGGER.info("Deployment of initial Kafka version (" + initialVersion.version() + ") complete");

String controllerVersionResult = KafkaResource.kafkaClient().inNamespace(testStorage.getNamespaceName()).withName(clusterName).get().getStatus().getKafkaVersion();
String controllerVersionResult = KafkaResource.kafkaClient().inNamespace(testStorage.getNamespaceName()).withName(CLUSTER_NAME).get().getStatus().getKafkaVersion();
LOGGER.info("Pre-change Kafka version: " + controllerVersionResult);

controllerPods = PodUtils.podSnapshot(testStorage.getNamespaceName(), controllerSelector);
Expand All @@ -193,7 +193,7 @@ void runVersionChange(TestStorage testStorage, TestKafkaVersion initialVersion,
LOGGER.info("Updating Kafka CR version field to " + newVersion.version());

// Change the version in Kafka CR
KafkaResource.replaceKafkaResourceInSpecificNamespace(testStorage.getNamespaceName(), clusterName, kafka -> {
KafkaResource.replaceKafkaResourceInSpecificNamespace(testStorage.getNamespaceName(), CLUSTER_NAME, kafka -> {
kafka.getSpec().getKafka().setVersion(newVersion.version());
});

Expand All @@ -207,14 +207,14 @@ void runVersionChange(TestStorage testStorage, TestKafkaVersion initialVersion,
brokerPods = RollingUpdateUtils.waitTillComponentHasRolled(testStorage.getNamespaceName(), brokerSelector, brokerReplicas, brokerPods);
LOGGER.info("1st Brokers roll (image change) is complete");

String currentMetadataVersion = KafkaResource.kafkaClient().inNamespace(testStorage.getNamespaceName()).withName(clusterName).get().getSpec().getKafka().getMetadataVersion();
String currentMetadataVersion = KafkaResource.kafkaClient().inNamespace(testStorage.getNamespaceName()).withName(CLUSTER_NAME).get().getSpec().getKafka().getMetadataVersion();

LOGGER.info("Deployment of Kafka (" + newVersion.version() + ") complete");

PodUtils.verifyThatRunningPodsAreStable(testStorage.getNamespaceName(), clusterName);
PodUtils.verifyThatRunningPodsAreStable(testStorage.getNamespaceName(), CLUSTER_NAME);

String controllerPodName = kubeClient().listPodsByPrefixInName(testStorage.getNamespaceName(), KafkaResource.getStrimziPodSetName(clusterName, CONTROLLER_NODE_NAME)).get(0).getMetadata().getName();
String brokerPodName = kubeClient().listPodsByPrefixInName(testStorage.getNamespaceName(), KafkaResource.getStrimziPodSetName(clusterName, BROKER_NODE_NAME)).get(0).getMetadata().getName();
String controllerPodName = kubeClient().listPodsByPrefixInName(testStorage.getNamespaceName(), KafkaResource.getStrimziPodSetName(CLUSTER_NAME, CONTROLLER_NODE_NAME)).get(0).getMetadata().getName();
String brokerPodName = kubeClient().listPodsByPrefixInName(testStorage.getNamespaceName(), KafkaResource.getStrimziPodSetName(CLUSTER_NAME, BROKER_NODE_NAME)).get(0).getMetadata().getName();

// Extract the Kafka version number from the jars in the lib directory
controllerVersionResult = KafkaUtils.getVersionFromKafkaPodLibs(testStorage.getNamespaceName(), controllerPodName);
Expand All @@ -233,7 +233,7 @@ void runVersionChange(TestStorage testStorage, TestKafkaVersion initialVersion,
if (isUpgrade && !sameMinorVersion) {
LOGGER.info("Updating Kafka config attribute 'metadataVersion' from '{}' to '{}' version", initialVersion.metadataVersion(), newVersion.metadataVersion());

KafkaResource.replaceKafkaResourceInSpecificNamespace(testStorage.getNamespaceName(), clusterName, kafka -> {
KafkaResource.replaceKafkaResourceInSpecificNamespace(testStorage.getNamespaceName(), CLUSTER_NAME, kafka -> {
LOGGER.info("Kafka config before updating '{}'", kafka.getSpec().getKafka().toString());

kafka.getSpec().getKafka().setMetadataVersion(newVersion.metadataVersion());
Expand All @@ -242,24 +242,24 @@ void runVersionChange(TestStorage testStorage, TestKafkaVersion initialVersion,
});

LOGGER.info("Metadata version changed, it doesn't require rolling update, so the Pods should be stable");
PodUtils.verifyThatRunningPodsAreStable(testStorage.getNamespaceName(), clusterName);
PodUtils.verifyThatRunningPodsAreStable(testStorage.getNamespaceName(), CLUSTER_NAME);
assertFalse(RollingUpdateUtils.componentHasRolled(testStorage.getNamespaceName(), controllerSelector, controllerPods));
assertFalse(RollingUpdateUtils.componentHasRolled(testStorage.getNamespaceName(), brokerSelector, brokerPods));
}

if (!isUpgrade) {
LOGGER.info("Verifying that metadataVersion attribute updated correctly to version {}", initMetadataVersion);
assertThat(Crds.kafkaOperation(kubeClient().getClient()).inNamespace(testStorage.getNamespaceName()).withName(clusterName)
assertThat(Crds.kafkaOperation(kubeClient().getClient()).inNamespace(testStorage.getNamespaceName()).withName(CLUSTER_NAME)
.get().getStatus().getKafkaMetadataVersion().contains(initMetadataVersion), is(true));
} else {
if (currentMetadataVersion != null) {
LOGGER.info("Verifying that metadataVersion attribute updated correctly to version {}", newVersion.metadataVersion());
assertThat(Crds.kafkaOperation(kubeClient().getClient()).inNamespace(testStorage.getNamespaceName()).withName(clusterName)
assertThat(Crds.kafkaOperation(kubeClient().getClient()).inNamespace(testStorage.getNamespaceName()).withName(CLUSTER_NAME)
.get().getStatus().getKafkaMetadataVersion().contains(newVersion.metadataVersion()), is(true));
}
}

LOGGER.info("Waiting till Kafka Cluster {}/{} with specified version {} has the same version in status and specification", testStorage.getNamespaceName(), clusterName, newVersion.version());
KafkaUtils.waitUntilStatusKafkaVersionMatchesExpectedVersion(testStorage.getNamespaceName(), clusterName, newVersion.version());
LOGGER.info("Waiting till Kafka Cluster {}/{} with specified version {} has the same version in status and specification", testStorage.getNamespaceName(), CLUSTER_NAME, newVersion.version());
KafkaUtils.waitUntilStatusKafkaVersionMatchesExpectedVersion(testStorage.getNamespaceName(), CLUSTER_NAME, newVersion.version());
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
* Copyright Strimzi authors.
* License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
*/
package io.strimzi.systemtest.upgrade.regular;
package io.strimzi.systemtest.upgrade;

import com.fasterxml.jackson.dataformat.yaml.YAMLMapper;
import io.strimzi.api.kafka.model.kafka.KafkaResources;
Expand All @@ -16,9 +16,6 @@
import io.strimzi.systemtest.resources.operator.configuration.OlmConfiguration;
import io.strimzi.systemtest.resources.operator.configuration.OlmConfigurationBuilder;
import io.strimzi.systemtest.storage.TestStorage;
import io.strimzi.systemtest.upgrade.AbstractUpgradeST;
import io.strimzi.systemtest.upgrade.OlmVersionModificationData;
import io.strimzi.systemtest.upgrade.VersionModificationDataLoader;
import io.strimzi.systemtest.upgrade.VersionModificationDataLoader.ModificationType;
import io.strimzi.systemtest.utils.ClientUtils;
import io.strimzi.systemtest.utils.FileUtils;
Expand All @@ -28,7 +25,6 @@
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Disabled;
import org.junit.jupiter.api.Tag;

import java.io.File;
Expand All @@ -46,14 +42,13 @@

/**
* This test class contains tests for Strimzi downgrade from version X to version X - 1.
* The difference between this class and {@link StrimziUpgradeST} is in cluster operator install type.
* The difference between this class and {@link KRaftStrimziUpgradeST} is in cluster operator install type.
* Tests in this class use OLM for install cluster operator.
*/
@Tag(OLM_UPGRADE)
@Disabled // ZooKeeper is being removed
public class OlmUpgradeST extends AbstractUpgradeST {
public class KRaftOlmUpgradeST extends AbstractKRaftUpgradeST {

private static final Logger LOGGER = LogManager.getLogger(OlmUpgradeST.class);
private static final Logger LOGGER = LogManager.getLogger(KRaftOlmUpgradeST.class);
private final OlmVersionModificationData olmUpgradeData = new VersionModificationDataLoader(ModificationType.OLM_UPGRADE).getOlmUpgradeData();

@IsolatedTest
Expand All @@ -75,7 +70,7 @@ void testStrimziUpgrade() throws IOException {

// In this test we intend to setup Kafka once at the beginning and then upgrade it with CO
File dir = FileUtils.downloadAndUnzip(olmUpgradeData.getFromUrl());
File kafkaYaml = new File(dir, olmUpgradeData.getFromExamples() + "/examples/kafka/kafka-persistent.yaml");
File kafkaYaml = new File(dir, olmUpgradeData.getFromExamples() + olmUpgradeData.getKafkaFilePathBefore());

LOGGER.info("Deploying Kafka in Namespace: {} from file: {}", CO_NAMESPACE, kafkaYaml.getPath());
KubeClusterResource.cmdKubeClient(CO_NAMESPACE).create(kafkaYaml);
Expand Down Expand Up @@ -104,7 +99,7 @@ void testStrimziUpgrade() throws IOException {
.withProducerName(testStorage.getProducerName())
.withConsumerName(testStorage.getConsumerName())
.withNamespaceName(CO_NAMESPACE)
.withBootstrapAddress(KafkaResources.plainBootstrapAddress(clusterName))
.withBootstrapAddress(KafkaResources.plainBootstrapAddress(CLUSTER_NAME))
.withTopicName(topicUpgradeName)
.withMessageCount(testStorage.getMessageCount())
.withDelayMs(1000)
Expand Down Expand Up @@ -136,7 +131,7 @@ void testStrimziUpgrade() throws IOException {
// Wait for Rolling Update to finish
controllerPods = RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(CO_NAMESPACE, controllerSelector, 3, controllerPods);
brokerPods = RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(CO_NAMESPACE, brokerSelector, 3, brokerPods);
eoPods = DeploymentUtils.waitTillDepHasRolled(CO_NAMESPACE, KafkaResources.entityOperatorDeploymentName(clusterName), 1, eoPods);
eoPods = DeploymentUtils.waitTillDepHasRolled(CO_NAMESPACE, KafkaResources.entityOperatorDeploymentName(CLUSTER_NAME), 1, eoPods);
// ======== Cluster Operator upgrade ends ========

// ======== Kafka upgrade starts ========
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,14 +2,12 @@
* Copyright Strimzi authors.
* License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
*/
package io.strimzi.systemtest.upgrade.kraft;
package io.strimzi.systemtest.upgrade;

import io.strimzi.systemtest.annotations.KindIPv6NotSupported;
import io.strimzi.systemtest.annotations.MicroShiftNotSupported;
import io.strimzi.systemtest.resources.ResourceManager;
import io.strimzi.systemtest.storage.TestStorage;
import io.strimzi.systemtest.upgrade.BundleVersionModificationData;
import io.strimzi.systemtest.upgrade.UpgradeKafkaVersion;
import io.strimzi.systemtest.utils.StUtils;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
Expand Down
Loading

0 comments on commit 751a6ba

Please sign in to comment.