diff --git a/CHANGELOG.md b/CHANGELOG.md index 0497f782..ff3b0129 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ All notable changes to this project will be documented in this file. - Use `--file-log-max-files` (or `FILE_LOG_MAX_FILES`) to limit the number of log files kept. - Use `--file-log-rotation-period` (or `FILE_LOG_ROTATION_PERIOD`) to configure the frequency of rotation. - Use `--console-log-format` (or `CONSOLE_LOG_FORMAT`) to set the format to `plain` (default) or `json`. +- NiFi 2.x now supports storing cluster state in Kuberntes instead of ZooKeeper ([#775]). ### Changed @@ -37,6 +38,7 @@ All notable changes to this project will be documented in this file. [#771]: https://github.com/stackabletech/nifi-operator/pull/771 [#772]: https://github.com/stackabletech/nifi-operator/pull/772 [#774]: https://github.com/stackabletech/nifi-operator/pull/774 +[#775]: https://github.com/stackabletech/nifi-operator/pull/775 [#776]: https://github.com/stackabletech/nifi-operator/pull/776 [#782]: https://github.com/stackabletech/nifi-operator/pull/782 [#787]: https://github.com/stackabletech/nifi-operator/pull/787 diff --git a/deploy/helm/nifi-operator/crds/crds.yaml b/deploy/helm/nifi-operator/crds/crds.yaml index 7511f55a..4fb397ab 100644 --- a/deploy/helm/nifi-operator/crds/crds.yaml +++ b/deploy/helm/nifi-operator/crds/crds.yaml @@ -26,6 +26,10 @@ spec: description: A NiFi cluster stacklet. This resource is managed by the Stackable operator for Apache NiFi. Find more information on how to use it and the resources that the operator generates in the [operator documentation](https://docs.stackable.tech/home/nightly/nifi/). properties: clusterConfig: + anyOf: + - required: + - zookeeperConfigMapName + - {} description: Settings that affect all roles and role groups. The settings in the `clusterConfig` are cluster wide settings that do not need to be configurable at role or role group level. properties: authentication: @@ -158,12 +162,14 @@ spec: nullable: true type: string zookeeperConfigMapName: - description: NiFi requires a ZooKeeper cluster connection to run. Provide the name of the ZooKeeper [discovery ConfigMap](https://docs.stackable.tech/home/nightly/concepts/service_discovery) here. When using the [Stackable operator for Apache ZooKeeper](https://docs.stackable.tech/home/nightly/zookeeper/) to deploy a ZooKeeper cluster, this will simply be the name of your ZookeeperCluster resource. + description: |- + NiFi can either use ZooKeeper or Kubernetes for managing its cluster state. To use ZooKeeper, provide the name of the ZooKeeper [discovery ConfigMap](https://docs.stackable.tech/home/nightly/concepts/service_discovery) here. When using the [Stackable operator for Apache ZooKeeper](https://docs.stackable.tech/home/nightly/zookeeper/) to deploy a ZooKeeper cluster, this will simply be the name of your ZookeeperCluster resource. + + The Kubernetes provider will be used if this field is unset. Kubernetes is only supported for NiFi 2.x and newer, NiFi 1.x requires ZooKeeper. type: string required: - authentication - sensitiveProperties - - zookeeperConfigMapName type: object clusterOperation: default: diff --git a/deploy/helm/nifi-operator/templates/roles.yaml b/deploy/helm/nifi-operator/templates/roles.yaml index 53b90c67..17127ac4 100644 --- a/deploy/helm/nifi-operator/templates/roles.yaml +++ b/deploy/helm/nifi-operator/templates/roles.yaml @@ -133,9 +133,11 @@ rules: - apiGroups: - "" resources: - - configmaps - secrets - serviceaccounts + # This is redundant with the rule for specifically about configmaps + # (due to clustering), but we read them for other purposes too + - configmaps verbs: - get - apiGroups: @@ -144,6 +146,29 @@ rules: - events verbs: - create + # Required for Kubernetes-managed clustering, see https://nifi.apache.org/nifi-docs/administration-guide.html#kubernetes-clustering + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - update + # undocumented but required + - patch + # Required for Kubernetes cluster state provider, see https://nifi.apache.org/nifi-docs/administration-guide.html#kubernetes-configmap-cluster-state-provider + - apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - delete + - get + - list + - patch + - update {{ if .Capabilities.APIVersions.Has "security.openshift.io/v1" }} - apiGroups: - security.openshift.io diff --git a/docs/modules/nifi/pages/index.adoc b/docs/modules/nifi/pages/index.adoc index 34e74dac..ead76339 100644 --- a/docs/modules/nifi/pages/index.adoc +++ b/docs/modules/nifi/pages/index.adoc @@ -35,7 +35,7 @@ Every role group is accessible through it's own Service, and there is a Service == Dependencies -Apache NiFi depends on Apache ZooKeeper which you can run in Kubernetes with the xref:zookeeper:index.adoc[]. +Apache NiFi 1.x depends on Apache ZooKeeper which you can run in Kubernetes with the xref:zookeeper:index.adoc[]. == [[demos]]Demos diff --git a/docs/modules/nifi/pages/usage_guide/clustering.adoc b/docs/modules/nifi/pages/usage_guide/clustering.adoc new file mode 100644 index 00000000..417c9f9b --- /dev/null +++ b/docs/modules/nifi/pages/usage_guide/clustering.adoc @@ -0,0 +1,41 @@ += Clustering +:description: Apache NiFi requires a backend for cluster management, and supports either Kubernetes or Apache ZooKeeper. +:page-aliases: usage_guide/zookeeper-connection.adoc + +Apache NiFi requires{empty}footnote:[Apache NiFi also supports a single-node mode with no cluster backend, but this is not supported by the Stackable Operator for Apache NiFi. The Stackable Operator does require a cluster backend.] an external backend for state management and leader election. + +Currently, the Stackable Operator for Apache NiFi supports the following backends: + +- xref:#backend-kubernetes[] +- xref:#backend-zookeeper[] + +CAUTION: The cluster backend of an existing cluster should never be changed. Otherwise data loss may occur, both due to losing NiFi processor state, and due to potential split-brain scenarios during the migration. + +[#backend-kubernetes] +== Kubernetes + +NOTE: The Kubernetes provider is only supported by Apache NiFi 2.0 or newer. When using NiFi 1.x, use the xref:#backend-zookeeper[] backend instead. + +The Kubernetes backend is used by default (unless the xref:#backend-zookeeper[] backend is configured), and stores all state in Kubernetes objects, in the same namespace as the `NifiCluster` object. + +It takes no configuration. + +[#backend-zookeeper] +== Apache ZooKeeper + +NiFi can also be configured to store its state in Apache ZooKeeper. + +NiFi in cluster mode requires an Apache ZooKeeper ensemble for state management and leader election purposes, the Stackable operator for Apache NiFi does not support single node deployments without ZooKeeper, hence this is a required setting. + +This is enabled by setting the `spec.clusterConfig.zookeeperConfigMapName` to a xref:concepts:service-discovery.adoc[discovery ConfigMap]: + +[source,yaml] +---- +spec: + clusterConfig: + zookeeperConfigMapName: simple-nifi-znode +---- + +The ConfigMap needs to contain two keys: `ZOOKEEPER_HOSTS` containing the value being the ZooKeeper connection string, and `ZOOKEEPER_CHROOT` containing the ZooKeeper chroot. + +The xref:zookeeper:index.adoc[Stackable operator for Apache ZooKeeper] automatically creates this ConfigMap for every ZookeeperZnode object. diff --git a/docs/modules/nifi/pages/usage_guide/index.adoc b/docs/modules/nifi/pages/usage_guide/index.adoc index 79e803be..13dea386 100644 --- a/docs/modules/nifi/pages/usage_guide/index.adoc +++ b/docs/modules/nifi/pages/usage_guide/index.adoc @@ -43,7 +43,7 @@ spec: replicas: 3 ---- -<1> The xref:usage_guide/zookeeper-connection.adoc[ZooKeeper instance] to use. +<1> The xref:usage_guide/clustering.adoc#backend-zookeeper[ZooKeeper instance] to use. <2> How users should xref:usage_guide/security.adoc[authenticate] themselves. <3> xref:usage_guide/extra-volumes.adoc[Extra volumes] with files that can be referenced in custom workflows. <4> xref:usage_guide/resource-configuration.adoc[CPU and memory configuration] can be set per role group. diff --git a/docs/modules/nifi/pages/usage_guide/zookeeper-connection.adoc b/docs/modules/nifi/pages/usage_guide/zookeeper-connection.adoc deleted file mode 100644 index 489dc645..00000000 --- a/docs/modules/nifi/pages/usage_guide/zookeeper-connection.adoc +++ /dev/null @@ -1,14 +0,0 @@ -= Connecting NiFi to Apache ZooKeeper -:description: Connect NiFi to Apache ZooKeeper using the Stackable operator for cluster management, requiring a ZooKeeper ensemble for state management and leader election. - -NiFi in cluster mode requires an Apache ZooKeeper ensemble for state management and leader election purposes, the Stackable operator for Apache NiFi does not support single node deployments without ZooKeeper, hence this is a required setting. - -[source,yaml] ----- -spec: - clusterConfig: - zookeeperConfigMapName: simple-nifi-znode ----- - -Configuration happens via a xref:concepts:service-discovery.adoc[discovery ConfigMap], which needs to contain two keys called `ZOOKEEPER_HOSTS` with the value being the ZooKeeper connection string and `ZOOKEEPER_CHROOT` with the value being the ZooKeeper chroot. -When using the xref:zookeeper:index.adoc[Stackable operator for Apache ZooKeeper], the operator creates this ConfigMap for every ZNode automatically. diff --git a/docs/modules/nifi/partials/nav.adoc b/docs/modules/nifi/partials/nav.adoc index de4119e2..b753ce31 100644 --- a/docs/modules/nifi/partials/nav.adoc +++ b/docs/modules/nifi/partials/nav.adoc @@ -3,7 +3,7 @@ ** xref:nifi:getting_started/first_steps.adoc[] * xref:nifi:usage_guide/index.adoc[] ** xref:nifi:usage_guide/listenerclass.adoc[] -** xref:nifi:usage_guide/zookeeper-connection.adoc[] +** xref:nifi:usage_guide/clustering.adoc[] ** xref:nifi:usage_guide/extra-volumes.adoc[] ** xref:nifi:usage_guide/security.adoc[] ** xref:nifi:usage_guide/resource-configuration.adoc[] diff --git a/rust/operator-binary/src/config/mod.rs b/rust/operator-binary/src/config/mod.rs index b075c495..477cd0f7 100644 --- a/rust/operator-binary/src/config/mod.rs +++ b/rust/operator-binary/src/config/mod.rs @@ -5,7 +5,7 @@ use std::{ use jvm::build_merged_jvm_config; use product_config::{ProductConfigManager, types::PropertyNameKind}; -use snafu::{ResultExt, Snafu}; +use snafu::{ResultExt, Snafu, ensure}; use stackable_operator::{ commons::resources::Resources, memory::MemoryQuantity, @@ -20,7 +20,7 @@ use strum::{Display, EnumIter}; use crate::{ crd::{ HTTPS_PORT, NifiConfig, NifiConfigFragment, NifiRole, NifiStorageConfig, PROTOCOL_PORT, - v1alpha1, + v1alpha1::{self, NifiClusteringBackend}, }, operations::graceful_shutdown::graceful_shutdown_config_properties, security::{ @@ -96,6 +96,11 @@ pub enum Error { #[snafu(display("failed to generate OIDC config"))] GenerateOidcConfig { source: oidc::Error }, + + #[snafu(display( + "NiFi 1.x requires ZooKeeper (hint: upgrade to NiFi 2.x or set .spec.clusterConfig.zookeeperConfigMapName)" + ))] + Nifi1RequiresZookeeper, } /// Create the NiFi bootstrap.conf @@ -143,13 +148,15 @@ pub fn build_nifi_properties( overrides: BTreeMap, product_version: &str, ) -> Result { + // TODO: Remove once we dropped support for all NiFi 1.x versions + let is_nifi_1 = product_version.starts_with("1."); + let mut properties = BTreeMap::new(); // Core Properties // According to https://cwiki.apache.org/confluence/display/NIFI/Migration+Guidance#MigrationGuidance-Migratingto2.0.0-M1 // The nifi.flow.configuration.file property in nifi.properties must be changed to reference // "flow.json.gz" instead of "flow.xml.gz" - // TODO: Remove once we dropped support for all 1.x.x versions - let flow_file_name = if product_version.starts_with("1.") { + let flow_file_name = if is_nifi_1 { "flow.xml.gz" } else { "flow.json.gz" @@ -250,7 +257,10 @@ pub fn build_nifi_properties( // The ID of the cluster-wide state provider. This will be ignored if NiFi is not clustered but must be populated if running in a cluster. properties.insert( "nifi.state.management.provider.cluster".to_string(), - "zk-provider".to_string(), + match spec.cluster_config.clustering_backend { + v1alpha1::NifiClusteringBackend::ZooKeeper { .. } => "zk-provider".to_string(), + v1alpha1::NifiClusteringBackend::Kubernetes { .. } => "kubernetes-provider".to_string(), + }, ); // Specifies whether or not this instance of NiFi should run an embedded ZooKeeper server properties.insert( @@ -559,18 +569,41 @@ pub fn build_nifi_properties( "".to_string(), ); - // zookeeper properties, used for cluster management - // this will be replaced via a container command script - properties.insert( - "nifi.zookeeper.connect.string".to_string(), - "${env:ZOOKEEPER_HOSTS}".to_string(), - ); - - // this will be replaced via a container command script - properties.insert( - "nifi.zookeeper.root.node".to_string(), - "${env:ZOOKEEPER_CHROOT}".to_string(), - ); + match spec.cluster_config.clustering_backend { + v1alpha1::NifiClusteringBackend::ZooKeeper { .. } => { + properties.insert( + "nifi.cluster.leader.election.implementation".to_string(), + "CuratorLeaderElectionManager".to_string(), + ); + + // this will be replaced via a container command script + properties.insert( + "nifi.zookeeper.connect.string".to_string(), + "${env:ZOOKEEPER_HOSTS}".to_string(), + ); + + // this will be replaced via a container command script + properties.insert( + "nifi.zookeeper.root.node".to_string(), + "${env:ZOOKEEPER_CHROOT}".to_string(), + ); + } + + v1alpha1::NifiClusteringBackend::Kubernetes {} => { + ensure!(!is_nifi_1, Nifi1RequiresZookeeperSnafu); + + properties.insert( + "nifi.cluster.leader.election.implementation".to_string(), + "KubernetesLeaderElectionManager".to_string(), + ); + + // this will be replaced via a container command script + properties.insert( + "nifi.cluster.leader.election.kubernetes.lease.prefix".to_string(), + "${env:STACKLET_NAME}".to_string(), + ); + } + } // override with config overrides properties.extend(overrides); @@ -578,28 +611,42 @@ pub fn build_nifi_properties( Ok(format_properties(properties)) } -pub fn build_state_management_xml() -> String { +pub fn build_state_management_xml(clustering_backend: &NifiClusteringBackend) -> String { + // Inert providers are ignored by NiFi itself, but templating still fails if they refer to invalid environment variables, + // so only include the actually used provider. + let cluster_provider = match clustering_backend { + NifiClusteringBackend::ZooKeeper { .. } => { + r#" + zk-provider + org.apache.nifi.controller.state.providers.zookeeper.ZooKeeperStateProvider + ${env:ZOOKEEPER_HOSTS} + ${env:ZOOKEEPER_CHROOT} + 10 seconds + Open + "# + } + NifiClusteringBackend::Kubernetes {} => { + r#" + kubernetes-provider + org.apache.nifi.kubernetes.state.provider.KubernetesConfigMapStateProvider + ${env:STACKLET_NAME} + "# + } + }; format!( - " + r#" - local-provider + local-provider org.apache.nifi.controller.state.providers.local.WriteAheadLocalStateProvider - {} - false - 16 - 2 mins + {local_state_path} + false + 16 + 2 mins - - zk-provider - org.apache.nifi.controller.state.providers.zookeeper.ZooKeeperStateProvider - ${{env:ZOOKEEPER_HOSTS}} - ${{env:ZOOKEEPER_CHROOT}} - 10 seconds - Open - - ", - &NifiRepository::State.mount_path(), + {cluster_provider} + "#, + local_state_path = NifiRepository::State.mount_path(), ) } diff --git a/rust/operator-binary/src/controller.rs b/rust/operator-binary/src/controller.rs index cca71af6..c9f56312 100644 --- a/rust/operator-binary/src/controller.rs +++ b/rust/operator-binary/src/controller.rs @@ -774,7 +774,10 @@ async fn build_node_rolegroup_config_map( rolegroup: rolegroup.clone(), })?, ) - .add_data(NIFI_STATE_MANAGEMENT_XML, build_state_management_xml()) + .add_data( + NIFI_STATE_MANAGEMENT_XML, + build_state_management_xml(&nifi.spec.cluster_config.clustering_backend), + ) .add_data( LOGIN_IDENTITY_PROVIDERS_XML_FILE_NAME, login_identity_provider_xml, @@ -873,7 +876,7 @@ async fn build_node_rolegroup_statefulset( nifi_auth_config: &NifiAuthenticationConfig, rolling_update_supported: bool, replicas: Option, - sa_name: &str, + service_account_name: &str, ) -> Result { tracing::debug!("Building statefulset"); let role_group = role.role_groups.get(&rolegroup_ref.role_group); @@ -909,18 +912,36 @@ async fn build_node_rolegroup_statefulset( env_vars.push(EnvVar { name: "CONTAINERDEBUG_LOG_DIRECTORY".to_string(), value: Some(format!("{STACKABLE_LOG_DIR}/containerdebug")), - value_from: None, + ..Default::default() }); - env_vars.push(zookeeper_env_var( - "ZOOKEEPER_HOSTS", - &nifi.spec.cluster_config.zookeeper_config_map_name, - )); + env_vars.push(EnvVar { + name: "STACKLET_NAME".to_string(), + value: Some(nifi.name_unchecked().to_string()), + ..Default::default() + }); - env_vars.push(zookeeper_env_var( - "ZOOKEEPER_CHROOT", - &nifi.spec.cluster_config.zookeeper_config_map_name, - )); + match &nifi.spec.cluster_config.clustering_backend { + v1alpha1::NifiClusteringBackend::ZooKeeper { + zookeeper_config_map_name, + } => { + let zookeeper_env_var = |name: &str| EnvVar { + name: name.to_string(), + value_from: Some(EnvVarSource { + config_map_key_ref: Some(ConfigMapKeySelector { + name: zookeeper_config_map_name.to_string(), + key: name.to_string(), + ..ConfigMapKeySelector::default() + }), + ..EnvVarSource::default() + }), + ..EnvVar::default() + }; + env_vars.push(zookeeper_env_var("ZOOKEEPER_HOSTS")); + env_vars.push(zookeeper_env_var("ZOOKEEPER_CHROOT")); + } + v1alpha1::NifiClusteringBackend::Kubernetes {} => {} + } if let NifiAuthenticationConfig::Oidc { oidc, .. } = nifi_auth_config { env_vars.extend(AuthenticationProvider::client_credentials_env_var_mounts( @@ -965,11 +986,17 @@ async fn build_node_rolegroup_statefulset( format!("echo Importing {KEYSTORE_NIFI_CONTAINER_MOUNT}/keystore.p12 to {STACKABLE_SERVER_TLS_DIR}/keystore.p12"), format!("cp {KEYSTORE_NIFI_CONTAINER_MOUNT}/keystore.p12 {STACKABLE_SERVER_TLS_DIR}/keystore.p12"), format!("echo Importing {KEYSTORE_NIFI_CONTAINER_MOUNT}/truststore.p12 to {STACKABLE_SERVER_TLS_DIR}/truststore.p12"), + // secret-operator currently encrypts keystores with RC2, which NiFi is unable to read: https://github.com/stackabletech/nifi-operator/pull/510 + // As a workaround, reencrypt the keystore with keytool. + // keytool crashes if the target truststore already exists (covering up the true error + // if the init container fails later on in the script), so delete it first. + format!("test ! -e {STACKABLE_SERVER_TLS_DIR}/truststore.p12 || rm {STACKABLE_SERVER_TLS_DIR}/truststore.p12"), format!("keytool -importkeystore -srckeystore {KEYSTORE_NIFI_CONTAINER_MOUNT}/truststore.p12 -destkeystore {STACKABLE_SERVER_TLS_DIR}/truststore.p12 -srcstorepass {STACKABLE_TLS_STORE_PASSWORD} -deststorepass {STACKABLE_TLS_STORE_PASSWORD}"), + "echo Replacing config directory".to_string(), "cp /conf/* /stackable/nifi/conf".to_string(), - "ln -sf /stackable/log_config/logback.xml /stackable/nifi/conf/logback.xml".to_string(), - format!("export NODE_ADDRESS=\"{node_address}\""), + "test -L /stackable/nifi/conf/logback.xml || ln -sf /stackable/log_config/logback.xml /stackable/nifi/conf/logback.xml".to_string(), + format!(r#"export NODE_ADDRESS="{node_address}""#), ]); // This commands needs to go first, as they might set env variables needed by the templating @@ -1315,7 +1342,7 @@ async fn build_node_rolegroup_statefulset( ..Volume::default() }) .context(AddVolumeSnafu)? - .service_account_name(sa_name) + .service_account_name(service_account_name) .security_context( PodSecurityContextBuilder::new() .run_as_user(NIFI_UID) @@ -1427,22 +1454,6 @@ fn external_node_port(nifi_service: &Service) -> Result { port.node_port.with_context(|| ExternalPortSnafu {}) } -/// Used for the `ZOOKEEPER_HOSTS` and `ZOOKEEPER_CHROOT` env vars. -fn zookeeper_env_var(name: &str, configmap_name: &str) -> EnvVar { - EnvVar { - name: name.to_string(), - value_from: Some(EnvVarSource { - config_map_key_ref: Some(ConfigMapKeySelector { - name: configmap_name.to_string(), - key: name.to_string(), - ..ConfigMapKeySelector::default() - }), - ..EnvVarSource::default() - }), - ..EnvVar::default() - } -} - async fn get_proxy_hosts( client: &Client, nifi: &v1alpha1::NifiCluster, diff --git a/rust/operator-binary/src/crd/mod.rs b/rust/operator-binary/src/crd/mod.rs index c3227e77..a4953eea 100644 --- a/rust/operator-binary/src/crd/mod.rs +++ b/rust/operator-binary/src/crd/mod.rs @@ -138,11 +138,8 @@ pub mod versioned { #[serde(skip_serializing_if = "Option::is_none")] pub vector_aggregator_config_map_name: Option, - /// NiFi requires a ZooKeeper cluster connection to run. - /// Provide the name of the ZooKeeper [discovery ConfigMap](DOCS_BASE_URL_PLACEHOLDER/concepts/service_discovery) - /// here. When using the [Stackable operator for Apache ZooKeeper](DOCS_BASE_URL_PLACEHOLDER/zookeeper/) - /// to deploy a ZooKeeper cluster, this will simply be the name of your ZookeeperCluster resource. - pub zookeeper_config_map_name: String, + #[serde(flatten)] + pub clustering_backend: NifiClusteringBackend, /// Extra volumes similar to `.spec.volumes` on a Pod to mount into every container, this can be useful to for /// example make client certificates, keytabs or similar things available to processors. These volumes will be @@ -168,6 +165,25 @@ pub mod versioned { #[serde(default)] pub create_reporting_task_job: CreateReportingTaskJob, } + + // This is flattened in for backwards compatibility reasons, `zookeeper_config_map_name` already existed and used to be mandatory. + // For v1alpha2, consider migrating this to a tagged enum for consistency. + #[derive(Clone, Debug, Deserialize, JsonSchema, PartialEq, Serialize)] + #[serde(untagged)] + pub enum NifiClusteringBackend { + #[serde(rename_all = "camelCase")] + ZooKeeper { + /// NiFi can either use ZooKeeper or Kubernetes for managing its cluster state. To use ZooKeeper, provide the name of the + /// ZooKeeper [discovery ConfigMap](DOCS_BASE_URL_PLACEHOLDER/concepts/service_discovery) here. + /// When using the [Stackable operator for Apache ZooKeeper](DOCS_BASE_URL_PLACEHOLDER/zookeeper/) + /// to deploy a ZooKeeper cluster, this will simply be the name of your ZookeeperCluster resource. + /// + /// The Kubernetes provider will be used if this field is unset. Kubernetes is only supported for NiFi 2.x and newer, + /// NiFi 1.x requires ZooKeeper. + zookeeper_config_map_name: String, + }, + Kubernetes {}, + } } impl HasStatusCondition for v1alpha1::NifiCluster { diff --git a/rust/operator-binary/src/main.rs b/rust/operator-binary/src/main.rs index e6810d8b..a3fec9f4 100644 --- a/rust/operator-binary/src/main.rs +++ b/rust/operator-binary/src/main.rs @@ -1,6 +1,7 @@ use std::sync::Arc; use clap::Parser; +use crd::v1alpha1::NifiClusteringBackend; use futures::stream::StreamExt; use stackable_operator::{ YamlSchema, @@ -179,5 +180,10 @@ fn references_config_map( return false; }; - nifi.spec.cluster_config.zookeeper_config_map_name == config_map.name_any() + match &nifi.spec.cluster_config.clustering_backend { + NifiClusteringBackend::ZooKeeper { + zookeeper_config_map_name, + } => *zookeeper_config_map_name == config_map.name_any(), + NifiClusteringBackend::Kubernetes {} => false, + } } diff --git a/tests/templates/kuttl/smoke-zookeeperless/00-patch-ns.yaml.j2 b/tests/templates/kuttl/smoke-zookeeperless/00-patch-ns.yaml.j2 new file mode 100644 index 00000000..67185acf --- /dev/null +++ b/tests/templates/kuttl/smoke-zookeeperless/00-patch-ns.yaml.j2 @@ -0,0 +1,9 @@ +{% if test_scenario['values']['openshift'] == 'true' %} +# see https://github.com/stackabletech/issues/issues/566 +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - script: kubectl patch namespace $NAMESPACE -p '{"metadata":{"labels":{"pod-security.kubernetes.io/enforce":"privileged"}}}' + timeout: 120 +{% endif %} diff --git a/tests/templates/kuttl/smoke-zookeeperless/00-range-limit.yaml b/tests/templates/kuttl/smoke-zookeeperless/00-range-limit.yaml new file mode 100644 index 00000000..8fd02210 --- /dev/null +++ b/tests/templates/kuttl/smoke-zookeeperless/00-range-limit.yaml @@ -0,0 +1,11 @@ +--- +apiVersion: v1 +kind: LimitRange +metadata: + name: limit-request-ratio +spec: + limits: + - type: "Container" + maxLimitRequestRatio: + cpu: 5 + memory: 1 diff --git a/tests/templates/kuttl/smoke-zookeeperless/10-assert.yaml.j2 b/tests/templates/kuttl/smoke-zookeeperless/10-assert.yaml.j2 new file mode 100644 index 00000000..50b1d4c3 --- /dev/null +++ b/tests/templates/kuttl/smoke-zookeeperless/10-assert.yaml.j2 @@ -0,0 +1,10 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +{% if lookup('env', 'VECTOR_AGGREGATOR') %} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: vector-aggregator-discovery +{% endif %} diff --git a/tests/templates/kuttl/smoke-zookeeperless/10-install-vector-aggregator-discovery-configmap.yaml.j2 b/tests/templates/kuttl/smoke-zookeeperless/10-install-vector-aggregator-discovery-configmap.yaml.j2 new file mode 100644 index 00000000..2d6a0df5 --- /dev/null +++ b/tests/templates/kuttl/smoke-zookeeperless/10-install-vector-aggregator-discovery-configmap.yaml.j2 @@ -0,0 +1,9 @@ +{% if lookup('env', 'VECTOR_AGGREGATOR') %} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: vector-aggregator-discovery +data: + ADDRESS: {{ lookup('env', 'VECTOR_AGGREGATOR') }} +{% endif %} diff --git a/tests/templates/kuttl/smoke-zookeeperless/30-assert.yaml b/tests/templates/kuttl/smoke-zookeeperless/30-assert.yaml new file mode 100644 index 00000000..ae825d11 --- /dev/null +++ b/tests/templates/kuttl/smoke-zookeeperless/30-assert.yaml @@ -0,0 +1,25 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 1200 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: test-nifi-node-default +spec: + template: + spec: + terminationGracePeriodSeconds: 300 +status: + readyReplicas: 2 + replicas: 2 +--- +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: test-nifi-node +status: + expectedPods: 2 + currentHealthy: 2 + disruptionsAllowed: 1 diff --git a/tests/templates/kuttl/smoke-zookeeperless/30-install-nifi.yaml.j2 b/tests/templates/kuttl/smoke-zookeeperless/30-install-nifi.yaml.j2 new file mode 100644 index 00000000..c6bb1e16 --- /dev/null +++ b/tests/templates/kuttl/smoke-zookeeperless/30-install-nifi.yaml.j2 @@ -0,0 +1,73 @@ +--- +apiVersion: authentication.stackable.tech/v1alpha1 +kind: AuthenticationClass +metadata: + name: simple-nifi-users +spec: + provider: + static: + userCredentialsSecret: + name: simple-nifi-admin-credentials +--- +apiVersion: v1 +kind: Secret +metadata: + name: simple-nifi-admin-credentials +stringData: + admin: > + passwordWithSpecialCharacter\@<&>"' +--- +apiVersion: v1 +kind: Secret +metadata: + name: nifi-sensitive-property-key +stringData: + nifiSensitivePropsKey: mYsUp3rS3cr3tk3y +--- +apiVersion: nifi.stackable.tech/v1alpha1 +kind: NifiCluster +metadata: + name: test-nifi +spec: + image: +{% if test_scenario['values']['nifi_new'].find(",") > 0 %} + custom: "{{ test_scenario['values']['nifi_new'].split(',')[1] }}" + productVersion: "{{ test_scenario['values']['nifi_new'].split(',')[0] }}" +{% else %} + productVersion: "{{ test_scenario['values']['nifi_new'] }}" +{% endif %} + pullPolicy: IfNotPresent + clusterConfig: + listenerClass: {{ test_scenario['values']['listener-class'] }} + authentication: + - authenticationClass: simple-nifi-users + hostHeaderCheck: + allowAll: false + additionalAllowedHosts: + - example.com:1234 + sensitiveProperties: + keySecret: nifi-sensitive-property-key +{% if lookup('env', 'VECTOR_AGGREGATOR') %} + vectorAggregatorConfigMapName: vector-aggregator-discovery +{% endif %} + nodes: + envOverrides: + COMMON_VAR: role-value # overridden by role group below + ROLE_VAR: role-value # only defined here at role level + configOverrides: + "nifi.properties": + "nifi.diagnostics.on.shutdown.enabled": "true" + "nifi.diagnostics.on.shutdown.verbose": "false" + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + roleGroups: + default: + replicas: 2 + envOverrides: + COMMON_VAR: group-value # overrides role value + GROUP_VAR: group-value # only defined here at group level + configOverrides: + "nifi.properties": + "nifi.diagnostics.on.shutdown.enabled": "false" + "nifi.diagnostics.on.shutdown.max.filecount": "20" diff --git a/tests/templates/kuttl/smoke-zookeeperless/31-assert.yaml.j2 b/tests/templates/kuttl/smoke-zookeeperless/31-assert.yaml.j2 new file mode 100644 index 00000000..06b1dc78 --- /dev/null +++ b/tests/templates/kuttl/smoke-zookeeperless/31-assert.yaml.j2 @@ -0,0 +1,6 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 30 +commands: +- script: kubectl get cm -n $NAMESPACE test-nifi-node-default -o yaml | grep -- 'nifi.web.proxy.host=.*example.com:1234' | xargs test ! -z diff --git a/tests/templates/kuttl/smoke-zookeeperless/32-assert.yaml b/tests/templates/kuttl/smoke-zookeeperless/32-assert.yaml new file mode 100644 index 00000000..00da1613 --- /dev/null +++ b/tests/templates/kuttl/smoke-zookeeperless/32-assert.yaml @@ -0,0 +1,19 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 600 +commands: + # + # Test envOverrides + # + - script: | + kubectl -n $NAMESPACE get sts test-nifi-node-default -o yaml | yq -e '.spec.template.spec.containers[] | select (.name == "nifi") | .env[] | select (.name == "COMMON_VAR" and .value == "group-value")' + kubectl -n $NAMESPACE get sts test-nifi-node-default -o yaml | yq -e '.spec.template.spec.containers[] | select (.name == "nifi") | .env[] | select (.name == "GROUP_VAR" and .value == "group-value")' + kubectl -n $NAMESPACE get sts test-nifi-node-default -o yaml | yq -e '.spec.template.spec.containers[] | select (.name == "nifi") | .env[] | select (.name == "ROLE_VAR" and .value == "role-value")' + # + # Test configOverrides + # + - script: | + kubectl -n $NAMESPACE get cm test-nifi-node-default -o yaml | yq -e '.data."nifi.properties"' | grep "nifi.diagnostics.on.shutdown.enabled=false" + kubectl -n $NAMESPACE get cm test-nifi-node-default -o yaml | yq -e '.data."nifi.properties"' | grep "nifi.diagnostics.on.shutdown.verbose=false" + kubectl -n $NAMESPACE get cm test-nifi-node-default -o yaml | yq -e '.data."nifi.properties"' | grep "nifi.diagnostics.on.shutdown.max.filecount=20" diff --git a/tests/templates/kuttl/smoke-zookeeperless/33-assert.yaml b/tests/templates/kuttl/smoke-zookeeperless/33-assert.yaml new file mode 100644 index 00000000..b2a98140 --- /dev/null +++ b/tests/templates/kuttl/smoke-zookeeperless/33-assert.yaml @@ -0,0 +1,7 @@ +--- +# This test checks if the containerdebug-state.json file is present and valid +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 600 +commands: + - script: kubectl exec -n $NAMESPACE --container nifi test-nifi-node-default-0 -- cat /stackable/log/containerdebug-state.json | jq --exit-status '"valid JSON"' diff --git a/tests/templates/kuttl/smoke-zookeeperless/40-assert.yaml b/tests/templates/kuttl/smoke-zookeeperless/40-assert.yaml new file mode 100644 index 00000000..88f50b77 --- /dev/null +++ b/tests/templates/kuttl/smoke-zookeeperless/40-assert.yaml @@ -0,0 +1,12 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 1200 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: test-nifi-node-default +status: + readyReplicas: 3 + replicas: 3 diff --git a/tests/templates/kuttl/smoke-zookeeperless/40-scale-up-nifi.yaml.j2 b/tests/templates/kuttl/smoke-zookeeperless/40-scale-up-nifi.yaml.j2 new file mode 100644 index 00000000..987b0745 --- /dev/null +++ b/tests/templates/kuttl/smoke-zookeeperless/40-scale-up-nifi.yaml.j2 @@ -0,0 +1,8 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - script: >- + kubectl --namespace $NAMESPACE + patch nificlusters.nifi.stackable.tech test-nifi + --type=merge --patch '{"spec":{"nodes": {"roleGroups": {"default": {"replicas": 3}}}}}' diff --git a/tests/templates/kuttl/smoke-zookeeperless/50-assert.yaml b/tests/templates/kuttl/smoke-zookeeperless/50-assert.yaml new file mode 100644 index 00000000..d511ff46 --- /dev/null +++ b/tests/templates/kuttl/smoke-zookeeperless/50-assert.yaml @@ -0,0 +1,12 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 300 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: test-nifi +status: + readyReplicas: 1 + replicas: 1 diff --git a/tests/templates/kuttl/smoke-zookeeperless/50-install-test-nifi.yaml b/tests/templates/kuttl/smoke-zookeeperless/50-install-test-nifi.yaml new file mode 100644 index 00000000..3bc67dbc --- /dev/null +++ b/tests/templates/kuttl/smoke-zookeeperless/50-install-test-nifi.yaml @@ -0,0 +1,28 @@ +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: test-nifi + labels: + app: test-nifi +spec: + replicas: 1 + selector: + matchLabels: + app: test-nifi + template: + metadata: + labels: + app: test-nifi + spec: + containers: + - name: test-nifi + image: oci.stackable.tech/sdp/testing-tools:0.2.0-stackable0.0.0-dev + command: ["sleep", "infinity"] + resources: + requests: + memory: "128Mi" + cpu: "100m" + limits: + memory: "128Mi" + cpu: "400m" diff --git a/tests/templates/kuttl/smoke-zookeeperless/60-assert.yaml.j2 b/tests/templates/kuttl/smoke-zookeeperless/60-assert.yaml.j2 new file mode 100644 index 00000000..b62dc7c6 --- /dev/null +++ b/tests/templates/kuttl/smoke-zookeeperless/60-assert.yaml.j2 @@ -0,0 +1,9 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 300 +commands: + - script: kubectl exec -n $NAMESPACE test-nifi-0 -- python /tmp/test_nifi.py -u admin -p 'passwordWithSpecialCharacter\@<&>"'"'" -n $NAMESPACE -c 3 +{% if test_scenario['values']['nifi_new'].startswith('1.') %} + - script: kubectl exec -n $NAMESPACE test-nifi-0 -- python /tmp/test_nifi_metrics.py -n $NAMESPACE +{% endif %} diff --git a/tests/templates/kuttl/smoke-zookeeperless/60-prepare-test-nifi.yaml b/tests/templates/kuttl/smoke-zookeeperless/60-prepare-test-nifi.yaml new file mode 100644 index 00000000..c3ac9b79 --- /dev/null +++ b/tests/templates/kuttl/smoke-zookeeperless/60-prepare-test-nifi.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - script: kubectl cp -n $NAMESPACE ./test_nifi_metrics.py test-nifi-0:/tmp + - script: kubectl cp -n $NAMESPACE ./test_nifi.py test-nifi-0:/tmp + - script: kubectl cp -n $NAMESPACE ./cacert.pem test-nifi-0:/tmp diff --git a/tests/templates/kuttl/smoke-zookeeperless/70-assert.yaml b/tests/templates/kuttl/smoke-zookeeperless/70-assert.yaml new file mode 100644 index 00000000..29b0ff4d --- /dev/null +++ b/tests/templates/kuttl/smoke-zookeeperless/70-assert.yaml @@ -0,0 +1,42 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 600 +--- +apiVersion: v1 +kind: Event +reason: Started +source: + component: kubelet +involvedObject: + apiVersion: v1 + kind: Pod + name: test-nifi-node-default-0 +--- +apiVersion: v1 +kind: Event +reason: Started +source: + component: kubelet +involvedObject: + apiVersion: v1 + kind: Pod + name: test-nifi-node-default-1 +--- +apiVersion: v1 +kind: Event +reason: Started +source: + component: kubelet +involvedObject: + apiVersion: v1 + kind: Pod + name: test-nifi-node-default-2 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: test-nifi-node-default +status: + readyReplicas: 3 + replicas: 3 diff --git a/tests/templates/kuttl/smoke-zookeeperless/70-enable-anonymous.yaml.j2 b/tests/templates/kuttl/smoke-zookeeperless/70-enable-anonymous.yaml.j2 new file mode 100644 index 00000000..f39ce021 --- /dev/null +++ b/tests/templates/kuttl/smoke-zookeeperless/70-enable-anonymous.yaml.j2 @@ -0,0 +1,9 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - script: >- + kubectl --namespace $NAMESPACE + patch nificlusters.nifi.stackable.tech test-nifi + --type=merge --patch '{"spec":{"config": {"authentication": {"allowAnonymousAccess": true}}}}' + - command: kubectl rollout restart statefulset test-nifi-node-default --namespace $NAMESPACE diff --git a/tests/templates/kuttl/smoke-zookeeperless/cacert.pem b/tests/templates/kuttl/smoke-zookeeperless/cacert.pem new file mode 100644 index 00000000..ebe73910 --- /dev/null +++ b/tests/templates/kuttl/smoke-zookeeperless/cacert.pem @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDVTCCAj2gAwIBAgIJAJ8/0entaUgnMA0GCSqGSIb3DQEBCwUAMCYxJDAiBgNV +BAMMG3NlY3JldC1vcGVyYXRvciBzZWxmLXNpZ25lZDAeFw0yMjAxMTIxNDU3NDVa +Fw0yNDAxMTIxNTAyNDVaMCYxJDAiBgNVBAMMG3NlY3JldC1vcGVyYXRvciBzZWxm +LXNpZ25lZDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALLKNGhq5gE+ +mL9zFCLqtc22CLk8BSbjesjUEhBK3kxDvFDa2ou5atH0eUFjtOSszay2oBrCTVWK +wZBsdUkL0HkW/wq9A8EUkQ8EownXnsxpI61CLNGLPpBZc+CRHhyWDD6BqwGvEHEv +W546mh6k49//7zCiYfTK9/LCKBCFdDV6Sb7mNJ8HbNUj54uwC6iOgH25OCRDh4Bt +zXoSrV9GLAm6AM25ZFo+ONOUBMtv7pavaR0CFMnAixl2NKV2wyLBYAYnJgdJFzGD +8mP6HwuR7e2g7PkcyC01EnX4iOIuuKHT/Xl9ynut4nHI7g6popotgashrQ5Jf8MS +Kf98O12LzSMCAwEAAaOBhTCBgjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRf +U9OxCBwCqYiUjWqY05sz3a6cmjBABgNVHSMEOTA3oSqkKDAmMSQwIgYDVQQDDBtz +ZWNyZXQtb3BlcmF0b3Igc2VsZi1zaWduZWSCCQCfP9Hp7WlIJzAOBgNVHQ8BAf8E +BAMCAYYwDQYJKoZIhvcNAQELBQADggEBAA8Flk1XOb1pH33Mbie5ronP2xw/xf6t +Ox3PBEZ+5/jSPdIwoSaRp9JoP0L9Rg68jzcl5QMa4pOYWe+C1q8aZP0tjfq1eJfO +UD5ik2DQgEuoF1ELgW1xoM38vkd8wgE711swDHK2zAsOudSzO4XZ4rQ6kaXXtoej +2kFhxDYcC+na90LdkJM0kAqrjxlFaP7WgUK+HA2iN00CFSOI9FVdppLtootbcb+y ++WfXxM7gA9Exg4f2vKGVx7UxB/k4AbPvogBQZvK8VoAQocAhWrw7o2rqAesAw6JD +WwQjM69TlEfbHYXtTfMbi01Wi5TtVhFCjyXK6KDsqSgU+9McExIy70k= +-----END CERTIFICATE----- diff --git a/tests/templates/kuttl/smoke-zookeeperless/test_nifi.py b/tests/templates/kuttl/smoke-zookeeperless/test_nifi.py new file mode 100755 index 00000000..4280424a --- /dev/null +++ b/tests/templates/kuttl/smoke-zookeeperless/test_nifi.py @@ -0,0 +1,85 @@ +#!/usr/bin/env python +import requests +import json +import argparse +import urllib3 +from time import sleep + + +def get_token(nifi_host, username, password): + nifi_headers = { + "content-type": "application/x-www-form-urlencoded; charset=UTF-8", + } + data = {"username": username, "password": password} + + # TODO: handle actual errors when connecting properly + nifi_url = nifi_host + "/nifi-api/access/token" + response = requests.post( + nifi_url, headers=nifi_headers, data=data, verify=False + ) # , cert='./tmp/cacert.pem') + + if response.ok: + nifi_token = response.content.decode("utf-8") + return "Bearer " + nifi_token + else: + print(f"Failed to get token: {response.status_code}: {response.content}") + exit(-1) + + +if __name__ == "__main__": + # Construct an argument parser + all_args = argparse.ArgumentParser() + + # Add arguments to the parser + all_args.add_argument("-u", "--user", required=True, help="Username to connect as") + all_args.add_argument( + "-p", "--password", required=True, help="Password for the user" + ) + all_args.add_argument( + "-n", "--namespace", required=True, help="Namespace the test is running in" + ) + all_args.add_argument( + "-c", "--count", required=True, help="The expected number of Nodes" + ) + args = vars(all_args.parse_args()) + + # disable warnings as we have specified non-verified https connections + urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) + + host = f"https://test-nifi-node-default-1.test-nifi-node-default.{args['namespace']}.svc.cluster.local:8443" + token = get_token(host, args["user"], args["password"]) + headers = {"Authorization": token} + node_count = int(args["count"]) + + x = 0 + while x < 15: + url = host + "/nifi-api/controller/cluster" + cluster = requests.get( + url, headers=headers, verify=False + ) # , cert='/tmp/cacert.pem') + if cluster.status_code != 200: + print("Waiting for cluster...") + else: + cluster_data = json.loads(cluster.content.decode("utf-8")) + nodes = cluster_data["cluster"]["nodes"] + if len(nodes) != node_count: + print( + f"Cluster should have {node_count} nodes at this stage, but has: {len(nodes)}" + ) + else: + connected = True + for node in nodes: + if node["status"] != "CONNECTED": + print( + f"Node {node['nodeId']} is in state {node['status']} but should have been CONNECTED" + ) + connected = False + if connected: + print("Test succeeded!") + exit(0) + print("Retrying...") + x = x + 1 + sleep(10) + + print("Test failed") + exit(-1) diff --git a/tests/templates/kuttl/smoke-zookeeperless/test_nifi_metrics.py b/tests/templates/kuttl/smoke-zookeeperless/test_nifi_metrics.py new file mode 100755 index 00000000..fb75a747 --- /dev/null +++ b/tests/templates/kuttl/smoke-zookeeperless/test_nifi_metrics.py @@ -0,0 +1,62 @@ +#!/usr/bin/env python +import argparse +import requests +import time +from requests.exceptions import ConnectionError + +if __name__ == "__main__": + # Construct an argument parser + all_args = argparse.ArgumentParser() + # Add arguments to the parser + all_args.add_argument( + "-m", + "--metric", + required=False, + default="nifi_amount_bytes_read", + help="The name of a certain metric to check", + ) + all_args.add_argument( + "-n", "--namespace", required=True, help="The namespace the test is running in" + ) + all_args.add_argument( + "-p", + "--port", + required=False, + default="8081", + help="The port where metrics are exposed", + ) + all_args.add_argument( + "-t", + "--timeout", + required=False, + default="120", + help="The timeout in seconds to wait for the metrics port to be opened", + ) + + args = vars(all_args.parse_args()) + metric_name = args["metric"] + namespace = args["namespace"] + port = args["port"] + timeout = int(args["timeout"]) + + url = f"http://test-nifi-node-default-0.test-nifi-node-default.{namespace}.svc.cluster.local:{port}/metrics" + + # wait for 'timeout' seconds + t_end = time.time() + timeout + while time.time() < t_end: + try: + response = requests.post(url) + response.raise_for_status() + if metric_name in response.text: + print("Test metrics succeeded!") + exit(0) + else: + print( + f"Could not find metric [{metric_name}] in response:\n {response.text}" + ) + time.sleep(timeout) + except ConnectionError: + # NewConnectionError is expected until metrics are available + time.sleep(10) + + exit(-1) diff --git a/tests/test-definition.yaml b/tests/test-definition.yaml index d83e0751..93d0c034 100644 --- a/tests/test-definition.yaml +++ b/tests/test-definition.yaml @@ -63,6 +63,12 @@ tests: - zookeeper - listener-class - openshift + # Merge into smoke once we drop support for NiFi 1.x + - name: smoke-zookeeperless + dimensions: + - nifi_new + - listener-class + - openshift - name: resources dimensions: - nifi