From 1dce038c5b2c1835e2a1d0ae8ef14a075a3b2f55 Mon Sep 17 00:00:00 2001 From: Paul Maidment Date: Wed, 1 Jan 2025 09:44:33 +0200 Subject: [PATCH] MGMT-19258: Identify and copy shared code MGMT-19258: Copied context sensitive funcs into a context MGMT-19258: Ensure that all context sensitive code is in test context --- Makefile | 6 +- .../agent_based_installer_client_test.go | 15 +- subsystem/ams_subscriptions_test.go | 59 +- subsystem/authz_test.go | 117 +- subsystem/cluster_default_config_test.go | 9 +- subsystem/cluster_test.go | 1287 +++++++++-------- subsystem/cluster_v2_test.go | 249 ++-- subsystem/day2_cluster_test.go | 443 +++--- subsystem/events_test.go | 5 +- subsystem/feature_support_levels_test.go | 31 +- subsystem/host_test.go | 253 ++-- subsystem/host_v2_test.go | 107 +- subsystem/image_test.go | 11 +- subsystem/infra_env_test.go | 75 +- subsystem/ipv6_test.go | 23 +- subsystem/kubeapi/kubeapi_suite_test.go | 180 +++ subsystem/{ => kubeapi}/kubeapi_test.go | 1014 ++++++------- subsystem/manifests_test.go | 67 +- subsystem/metrics_test.go | 185 +-- subsystem/operators_test.go | 95 +- subsystem/spec_test.go | 3 +- subsystem/subsystem_suite_test.go | 67 +- subsystem/utils_test.go | 753 ---------- .../utils_test/subsystem_test_context.go | 777 ++++++++++ subsystem/utils_test/utils.go | 245 ++++ subsystem/utils_test/wiremock_stubs.go | 887 ++++++++++++ subsystem/versions_test.go | 9 +- 27 files changed, 4168 insertions(+), 2804 deletions(-) create mode 100644 subsystem/kubeapi/kubeapi_suite_test.go rename subsystem/{ => kubeapi}/kubeapi_test.go (87%) delete mode 100644 subsystem/utils_test.go create mode 100644 subsystem/utils_test/subsystem_test_context.go create mode 100644 subsystem/utils_test/utils.go create mode 100644 subsystem/utils_test/wiremock_stubs.go diff --git a/Makefile b/Makefile index b3c31888569..f35b6ecc768 100644 --- a/Makefile +++ b/Makefile @@ -448,10 +448,10 @@ deploy-dev-infra: create-hub-cluster ######## test: - $(MAKE) _run_subsystem_test AUTH_TYPE=rhsso ENABLE_ORG_TENANCY=true ENABLE_ORG_BASED_FEATURE_GATES=true + $(MAKE) _run_subsystem_test AUTH_TYPE=rhsso ENABLE_ORG_TENANCY=true ENABLE_ORG_BASED_FEATURE_GATES=true TEST="$(or $(TEST),'github.com/openshift/assisted-service/subsystem')" test-kube-api: - $(MAKE) _run_subsystem_test AUTH_TYPE=local FOCUS="$(or ${FOCUS},kube-api)" + $(MAKE) _run_subsystem_test AUTH_TYPE=local TEST="$(or $(TEST),'github.com/openshift/assisted-service/subsystem/kubeapi')" # Alias for test subsystem-test: test @@ -490,7 +490,7 @@ _run_subsystem_test: TEST_TOKEN_UNALLOWED="$(shell cat $(BUILD_FOLDER)/auth-tokenUnallowedString)" \ TEST_TOKEN_EDITOR="$(shell cat $(BUILD_FOLDER)/auth-tokenClusterEditor)" \ RELEASE_SOURCES='$(or ${RELEASE_SOURCES},${DEFAULT_RELEASE_SOURCES})' \ - $(MAKE) _test TEST_SCENARIO=subsystem TIMEOUT=120m TEST="$(or $(TEST),./subsystem/...)" + $(MAKE) _test TEST_SCENARIO=subsystem TIMEOUT=120m enable-kube-api-for-subsystem: $(BUILD_FOLDER) $(MAKE) deploy-service-requirements AUTH_TYPE=local ENABLE_KUBE_API=true ALLOW_CONVERGED_FLOW=true ISO_IMAGE_TYPE=minimal-iso diff --git a/subsystem/agent_based_installer_client_test.go b/subsystem/agent_based_installer_client_test.go index 9bbfbd5be2b..8d481ea6cd8 100644 --- a/subsystem/agent_based_installer_client_test.go +++ b/subsystem/agent_based_installer_client_test.go @@ -8,18 +8,19 @@ import ( "github.com/openshift/assisted-service/cmd/agentbasedinstaller" "github.com/openshift/assisted-service/internal/common" "github.com/openshift/assisted-service/internal/network" + "github.com/openshift/assisted-service/subsystem/utils_test" ) -// Note: userBMClient is used because subsystems defaults to use "rhsso" as AUTH_TYPE. +// Note: utils_test.TestContext.UserBMClient is used because subsystems defaults to use "rhsso" as AUTH_TYPE. // The ephermeral installer environment will use the "none" AUTH_TYPE at the start, and // a pre-generated infra-env-id will be used when creating the infra-env. // A new authentication scheme suited to the agent-based installer will be implemented -// in the future and userBMClient should be replaced at that time. +// in the future and utils_test.TestContext.UserBMClient should be replaced at that time. var _ = Describe("RegisterClusterAndInfraEnv", func() { ctx := context.Background() It("good flow", func() { - modelCluster, registerClusterErr := agentbasedinstaller.RegisterCluster(ctx, log, userBMClient, pullSecret, + modelCluster, registerClusterErr := agentbasedinstaller.RegisterCluster(ctx, log, utils_test.TestContext.UserBMClient, pullSecret, "../docs/hive-integration/crds/clusterDeployment.yaml", "../docs/hive-integration/crds/agentClusterInstall.yaml", "../docs/hive-integration/crds/clusterImageSet.yaml", "") @@ -30,7 +31,7 @@ var _ = Describe("RegisterClusterAndInfraEnv", func() { Expect(modelCluster.CPUArchitecture).To(Equal("x86_64")) Expect(modelCluster.Name).To(Equal("test-cluster")) - modelInfraEnv, registerInfraEnvErr := agentbasedinstaller.RegisterInfraEnv(ctx, log, userBMClient, pullSecret, + modelInfraEnv, registerInfraEnvErr := agentbasedinstaller.RegisterInfraEnv(ctx, log, utils_test.TestContext.UserBMClient, pullSecret, modelCluster, "../docs/hive-integration/crds/infraEnv.yaml", "../docs/hive-integration/crds/nmstate.yaml", "full-iso", "") @@ -41,7 +42,7 @@ var _ = Describe("RegisterClusterAndInfraEnv", func() { }) It("InstallConfig override good flow", func() { - modelCluster, registerClusterErr := agentbasedinstaller.RegisterCluster(ctx, log, userBMClient, pullSecret, + modelCluster, registerClusterErr := agentbasedinstaller.RegisterCluster(ctx, log, utils_test.TestContext.UserBMClient, pullSecret, "../docs/hive-integration/crds/clusterDeployment.yaml", "../docs/hive-integration/crds/agentClusterInstall-with-installconfig-overrides.yaml", "../docs/hive-integration/crds/clusterImageSet.yaml", "") @@ -53,7 +54,7 @@ var _ = Describe("RegisterClusterAndInfraEnv", func() { Expect(modelCluster.InstallConfigOverrides).To(Equal(`{"fips": true}`)) Expect(modelCluster.Name).To(Equal("test-cluster")) - modelInfraEnv, registerInfraEnvErr := agentbasedinstaller.RegisterInfraEnv(ctx, log, userBMClient, pullSecret, + modelInfraEnv, registerInfraEnvErr := agentbasedinstaller.RegisterInfraEnv(ctx, log, utils_test.TestContext.UserBMClient, pullSecret, modelCluster, "../docs/hive-integration/crds/infraEnv.yaml", "../docs/hive-integration/crds/nmstate.yaml", "full-iso", "") @@ -64,7 +65,7 @@ var _ = Describe("RegisterClusterAndInfraEnv", func() { }) It("missing one of the ZTP manifests", func() { - modelCluster, registerClusterErr := agentbasedinstaller.RegisterCluster(ctx, log, userBMClient, pullSecret, + modelCluster, registerClusterErr := agentbasedinstaller.RegisterCluster(ctx, log, utils_test.TestContext.UserBMClient, pullSecret, "file-does-not-exist", "../docs/hive-integration/crds/agentClusterInstall.yaml", "../docs/hive-integration/crds/clusterImageSet.yaml", "") diff --git a/subsystem/ams_subscriptions_test.go b/subsystem/ams_subscriptions_test.go index 649788cdc38..980070415f4 100644 --- a/subsystem/ams_subscriptions_test.go +++ b/subsystem/ams_subscriptions_test.go @@ -11,6 +11,7 @@ import ( "github.com/openshift/assisted-service/models" "github.com/openshift/assisted-service/pkg/auth" "github.com/openshift/assisted-service/pkg/ocm" + "github.com/openshift/assisted-service/subsystem/utils_test" "k8s.io/apimachinery/pkg/util/wait" ) @@ -43,10 +44,10 @@ var _ = Describe("test AMS subscriptions", func() { It("happy flow", func() { - clusterID, err := registerCluster(ctx, userBMClient, "test-cluster", pullSecret) + clusterID, err := utils_test.TestContext.RegisterCluster(ctx, utils_test.TestContext.UserBMClient, "test-cluster", pullSecret) Expect(err).ToNot(HaveOccurred()) log.Infof("Register cluster %s", clusterID) - cc := getCommonCluster(ctx, clusterID) + cc := utils_test.TestContext.GetCommonCluster(ctx, clusterID) Expect(cc.AmsSubscriptionID).To(Equal(FakeSubscriptionID)) }) @@ -59,7 +60,7 @@ var _ = Describe("test AMS subscriptions", func() { }) By("register cluster", func() { - _, err := registerCluster(ctx, userBMClient, "test-cluster", pullSecret) + _, err := utils_test.TestContext.RegisterCluster(ctx, utils_test.TestContext.UserBMClient, "test-cluster", pullSecret) Expect(err).To(HaveOccurred()) }) @@ -69,7 +70,7 @@ var _ = Describe("test AMS subscriptions", func() { }) By("register cluster", func() { - _, err := registerCluster(ctx, userBMClient, "test-cluster", pullSecret) + _, err := utils_test.TestContext.RegisterCluster(ctx, utils_test.TestContext.UserBMClient, "test-cluster", pullSecret) Expect(err).To(HaveOccurred()) }) }) @@ -84,7 +85,7 @@ var _ = Describe("test AMS subscriptions", func() { var err error By("create subscription (in 'reserved' status)", func() { - clusterID, err = registerCluster(ctx, userBMClient, "test-cluster", pullSecret) + clusterID, err = utils_test.TestContext.RegisterCluster(ctx, utils_test.TestContext.UserBMClient, "test-cluster", pullSecret) Expect(err).ToNot(HaveOccurred()) log.Infof("Register cluster %s", clusterID) }) @@ -98,7 +99,7 @@ var _ = Describe("test AMS subscriptions", func() { }) By("delete 'reserved' subscription", func() { - _, err = userBMClient.Installer.V2DeregisterCluster(ctx, &installer.V2DeregisterClusterParams{ClusterID: clusterID}) + _, err = utils_test.TestContext.UserBMClient.Installer.V2DeregisterCluster(ctx, &installer.V2DeregisterClusterParams{ClusterID: clusterID}) Expect(err).To(HaveOccurred()) }) }) @@ -110,7 +111,7 @@ var _ = Describe("test AMS subscriptions", func() { var err error By("create subscription", func() { - clusterID, err = registerCluster(ctx, userBMClient, "test-cluster", pullSecret) + clusterID, err = utils_test.TestContext.RegisterCluster(ctx, utils_test.TestContext.UserBMClient, "test-cluster", pullSecret) Expect(err).ToNot(HaveOccurred()) log.Infof("Register cluster %s", clusterID) }) @@ -131,7 +132,7 @@ var _ = Describe("test AMS subscriptions", func() { By("delete subscription", func() { // don't delete 'active' subscription // we can't really check that because it is done in an external dependency (AMS) so we just check there are no errors in the flow - _, err = userBMClient.Installer.V2DeregisterCluster(ctx, &installer.V2DeregisterClusterParams{ClusterID: clusterID}) + _, err = utils_test.TestContext.UserBMClient.Installer.V2DeregisterCluster(ctx, &installer.V2DeregisterClusterParams{ClusterID: clusterID}) Expect(err).ToNot(HaveOccurred()) }) }) @@ -143,7 +144,7 @@ var _ = Describe("test AMS subscriptions", func() { var err error By("create subscription", func() { - clusterID, err = registerCluster(ctx, userBMClient, "test-cluster", pullSecret) + clusterID, err = utils_test.TestContext.RegisterCluster(ctx, utils_test.TestContext.UserBMClient, "test-cluster", pullSecret) Expect(err).ToNot(HaveOccurred()) log.Infof("Register cluster %s", clusterID) }) @@ -154,7 +155,7 @@ var _ = Describe("test AMS subscriptions", func() { }) By("delete subscription", func() { - _, err = userBMClient.Installer.V2DeregisterCluster(ctx, &installer.V2DeregisterClusterParams{ClusterID: clusterID}) + _, err = utils_test.TestContext.UserBMClient.Installer.V2DeregisterCluster(ctx, &installer.V2DeregisterClusterParams{ClusterID: clusterID}) Expect(err).To(HaveOccurred()) }) @@ -164,7 +165,7 @@ var _ = Describe("test AMS subscriptions", func() { }) By("delete subscription", func() { - _, err = userBMClient.Installer.V2DeregisterCluster(ctx, &installer.V2DeregisterClusterParams{ClusterID: clusterID}) + _, err = utils_test.TestContext.UserBMClient.Installer.V2DeregisterCluster(ctx, &installer.V2DeregisterClusterParams{ClusterID: clusterID}) Expect(err).To(HaveOccurred()) }) }) @@ -176,7 +177,7 @@ var _ = Describe("test AMS subscriptions", func() { var err error By("create subscription", func() { - clusterID, err = registerCluster(ctx, userBMClient, "test-cluster", pullSecret) + clusterID, err = utils_test.TestContext.RegisterCluster(ctx, utils_test.TestContext.UserBMClient, "test-cluster", pullSecret) Expect(err).ToNot(HaveOccurred()) log.Infof("Register cluster %s", clusterID) }) @@ -187,7 +188,7 @@ var _ = Describe("test AMS subscriptions", func() { }) By("delete subscription", func() { - _, err = userBMClient.Installer.V2DeregisterCluster(ctx, &installer.V2DeregisterClusterParams{ClusterID: clusterID}) + _, err = utils_test.TestContext.UserBMClient.Installer.V2DeregisterCluster(ctx, &installer.V2DeregisterClusterParams{ClusterID: clusterID}) Expect(err).To(HaveOccurred()) }) @@ -197,7 +198,7 @@ var _ = Describe("test AMS subscriptions", func() { }) By("delete subscription", func() { - _, err = userBMClient.Installer.V2DeregisterCluster(ctx, &installer.V2DeregisterClusterParams{ClusterID: clusterID}) + _, err = utils_test.TestContext.UserBMClient.Installer.V2DeregisterCluster(ctx, &installer.V2DeregisterClusterParams{ClusterID: clusterID}) Expect(err).To(HaveOccurred()) }) }) @@ -211,14 +212,14 @@ var _ = Describe("test AMS subscriptions", func() { var err error By("create subscription", func() { - clusterID, err = registerCluster(ctx, userBMClient, "test-cluster", pullSecret) + clusterID, err = utils_test.TestContext.RegisterCluster(ctx, utils_test.TestContext.UserBMClient, "test-cluster", pullSecret) Expect(err).ToNot(HaveOccurred()) log.Infof("Register cluster %s", clusterID) }) By("update subscription's display name", func() { newClusterName := "ams-cluster-new-name" - _, err = userBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ ClusterID: clusterID, ClusterUpdateParams: &models.V2ClusterUpdateParams{ Name: &newClusterName, @@ -235,7 +236,7 @@ var _ = Describe("test AMS subscriptions", func() { var err error By("create subscription", func() { - clusterID, err = registerCluster(ctx, userBMClient, "test-cluster", pullSecret) + clusterID, err = utils_test.TestContext.RegisterCluster(ctx, utils_test.TestContext.UserBMClient, "test-cluster", pullSecret) Expect(err).ToNot(HaveOccurred()) log.Infof("Register cluster %s", clusterID) }) @@ -247,7 +248,7 @@ var _ = Describe("test AMS subscriptions", func() { By("update subscription's display name", func() { newClusterName := "ams-cluster-new-name" - _, err = userBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ ClusterID: clusterID, ClusterUpdateParams: &models.V2ClusterUpdateParams{ Name: &newClusterName, @@ -263,7 +264,7 @@ var _ = Describe("test AMS subscriptions", func() { waitForConsoleUrlUpdateInAMS := func(clusterID strfmt.UUID) { waitFunc := func(ctx context.Context) (bool, error) { - c := getCommonCluster(ctx, clusterID) + c := utils_test.TestContext.GetCommonCluster(ctx, clusterID) return c.IsAmsSubscriptionConsoleUrlSet, nil } err := wait.PollUntilContextTimeout(ctx, pollDefaultInterval, pollDefaultTimeout, false, waitFunc) @@ -276,7 +277,7 @@ var _ = Describe("test AMS subscriptions", func() { var err error By("create subscription", func() { - clusterID, err = registerCluster(ctx, userBMClient, "test-cluster", pullSecret) + clusterID, err = utils_test.TestContext.RegisterCluster(ctx, utils_test.TestContext.UserBMClient, "test-cluster", pullSecret) Expect(err).ToNot(HaveOccurred()) log.Infof("Register cluster %s", clusterID) }) @@ -288,17 +289,17 @@ var _ = Describe("test AMS subscriptions", func() { }) By("update subscription with console url", func() { - c := getCluster(clusterID) + c := utils_test.TestContext.GetCluster(clusterID) for _, host := range c.Hosts { - updateProgress(*host.ID, host.InfraEnvID, models.HostStageDone) + utils_test.TestContext.UpdateProgress(*host.ID, host.InfraEnvID, models.HostStageDone) } - waitForClusterState(ctx, clusterID, models.ClusterStatusFinalizing, defaultWaitForClusterStateTimeout, clusterFinalizingStateInfo) - completeInstallation(agentBMClient, clusterID) + waitForClusterState(ctx, clusterID, models.ClusterStatusFinalizing, utils_test.DefaultWaitForClusterStateTimeout, clusterFinalizingStateInfo) + completeInstallation(utils_test.TestContext.AgentBMClient, clusterID) waitForConsoleUrlUpdateInAMS(clusterID) }) By("update subscription with status 'Active'", func() { - waitForClusterState(ctx, clusterID, models.ClusterStatusInstalled, defaultWaitForClusterStateTimeout, clusterInstallingStateInfo) + waitForClusterState(ctx, clusterID, models.ClusterStatusInstalled, utils_test.DefaultWaitForClusterStateTimeout, clusterInstallingStateInfo) }) }) @@ -310,7 +311,7 @@ var _ = Describe("test AMS subscriptions", func() { var err error By("create subscription", func() { - clusterID, err = registerCluster(ctx, userBMClient, "test-cluster", pullSecret) + clusterID, err = utils_test.TestContext.RegisterCluster(ctx, utils_test.TestContext.UserBMClient, "test-cluster", pullSecret) Expect(err).ToNot(HaveOccurred()) log.Infof("Register cluster %s", clusterID) }) @@ -323,12 +324,12 @@ var _ = Describe("test AMS subscriptions", func() { By("update subscription with openshfit (external) cluster ID", func() { infraEnvID := registerInfraEnv(&clusterID, models.ImageTypeMinimalIso).ID registerHostsAndSetRoles(clusterID, *infraEnvID, minHosts, "test-cluster", "example.com") - reply, err = userBMClient.Installer.V2InstallCluster(context.Background(), &installer.V2InstallClusterParams{ClusterID: clusterID}) + reply, err = utils_test.TestContext.UserBMClient.Installer.V2InstallCluster(context.Background(), &installer.V2InstallClusterParams{ClusterID: clusterID}) Expect(err).NotTo(HaveOccurred()) c := reply.GetPayload() Expect(*c.Status).Should(Equal(models.ClusterStatusPreparingForInstallation)) - generateEssentialPrepareForInstallationSteps(ctx, c.Hosts...) - waitForLastInstallationCompletionStatus(clusterID, models.LastInstallationPreparationStatusFailed) + utils_test.TestContext.GenerateEssentialPrepareForInstallationSteps(ctx, c.Hosts...) + utils_test.TestContext.WaitForLastInstallationCompletionStatus(clusterID, models.LastInstallationPreparationStatusFailed) }) }) }) diff --git a/subsystem/authz_test.go b/subsystem/authz_test.go index eb3ef498608..e15c5275db4 100644 --- a/subsystem/authz_test.go +++ b/subsystem/authz_test.go @@ -17,10 +17,13 @@ import ( "github.com/openshift/assisted-service/internal/common" "github.com/openshift/assisted-service/models" "github.com/openshift/assisted-service/pkg/auth" + "github.com/openshift/assisted-service/subsystem/utils_test" ) const psTemplate = "{\"auths\":{\"cloud.openshift.com\":{\"auth\":\"%s\",\"email\":\"r@r.com\"}}}" +var db = utils_test.TestContext.GetDB() + var _ = Describe("test authorization", func() { ctx := context.Background() @@ -113,31 +116,31 @@ var _ = Describe("test authorization", func() { Skip("auth is disabled") } - userClusterID, err = registerCluster(ctx, userBMClient, "user-cluster", fmt.Sprintf(psTemplate, FakePS)) + userClusterID, err = utils_test.TestContext.RegisterCluster(ctx, utils_test.TestContext.UserBMClient, "user-cluster", fmt.Sprintf(psTemplate, FakePS)) Expect(err).ShouldNot(HaveOccurred()) - userClusterID2, err = registerCluster(ctx, user2BMClient, "user2-cluster", fmt.Sprintf(psTemplate, FakePS2)) + userClusterID2, err = utils_test.TestContext.RegisterCluster(ctx, utils_test.TestContext.User2BMClient, "user2-cluster", fmt.Sprintf(psTemplate, FakePS2)) Expect(err).ShouldNot(HaveOccurred()) - userClusterID3, err = registerCluster(ctx, editclusterUserBMClient, "user3-cluster", fmt.Sprintf(psTemplate, FakePS3)) + userClusterID3, err = utils_test.TestContext.RegisterCluster(ctx, utils_test.TestContext.EditclusterUserBMClient, "user3-cluster", fmt.Sprintf(psTemplate, FakePS3)) Expect(err).ShouldNot(HaveOccurred()) }) Context("Ignoring validations", func() { It("can't ignore validations if not permitted to do so", func() { - _, err := unallowedUserBMClient.Installer.V2SetIgnoredValidations(ctx, &installer.V2SetIgnoredValidationsParams{}) + _, err := utils_test.TestContext.UnallowedUserBMClient.Installer.V2SetIgnoredValidations(ctx, &installer.V2SetIgnoredValidationsParams{}) Expect(err).Should(HaveOccurred()) }) }) Context("unallowed user", func() { It("can't list clusters", func() { - _, err := unallowedUserBMClient.Installer.V2ListClusters(ctx, &installer.V2ListClustersParams{}) + _, err := utils_test.TestContext.UnallowedUserBMClient.Installer.V2ListClusters(ctx, &installer.V2ListClustersParams{}) Expect(err).Should(HaveOccurred()) }) }) Context("admin user", func() { It("can get all clusters", func() { - resp, err := readOnlyAdminUserBMClient.Installer.V2ListClusters( + resp, err := utils_test.TestContext.ReadOnlyAdminUserBMClient.Installer.V2ListClusters( ctx, &installer.V2ListClustersParams{}) Expect(err).ShouldNot(HaveOccurred()) @@ -145,7 +148,7 @@ var _ = Describe("test authorization", func() { }) It("can't register/delete with read only admin", func() { - _, err := readOnlyAdminUserBMClient.Installer.V2DeregisterCluster(ctx, &installer.V2DeregisterClusterParams{ClusterID: userClusterID}) + _, err := utils_test.TestContext.ReadOnlyAdminUserBMClient.Installer.V2DeregisterCluster(ctx, &installer.V2DeregisterClusterParams{ClusterID: userClusterID}) Expect(err).Should(HaveOccurred()) Expect(err).To(BeAssignableToTypeOf(installer.NewV2DeregisterClusterForbidden())) }) @@ -159,31 +162,31 @@ var _ = Describe("test authorization", func() { }) It("can delete cluster", func() { - _, err := editclusterUserBMClient.Installer.V2DeregisterCluster(ctx, &installer.V2DeregisterClusterParams{ClusterID: userClusterID}) + _, err := utils_test.TestContext.EditclusterUserBMClient.Installer.V2DeregisterCluster(ctx, &installer.V2DeregisterClusterParams{ClusterID: userClusterID}) Expect(err).ShouldNot(HaveOccurred()) }) It("can update cluster", func() { - _, err := editclusterUserBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ClusterID: userClusterID, + _, err := utils_test.TestContext.EditclusterUserBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ClusterID: userClusterID, ClusterUpdateParams: &models.V2ClusterUpdateParams{Name: swag.String("update-test")}}) Expect(err).ShouldNot(HaveOccurred()) }) It("can update day2 cluster", func() { // Install day1 cluster - clusterId, err := registerCluster(ctx, userBMClient, "test-cluster", pullSecret) + clusterId, err := utils_test.TestContext.RegisterCluster(ctx, utils_test.TestContext.UserBMClient, "test-cluster", pullSecret) Expect(err).ToNot(HaveOccurred()) infraEnvID := registerInfraEnv(&clusterId, models.ImageTypeMinimalIso).ID registerHostsAndSetRoles(clusterId, *infraEnvID, minHosts, "test-cluster", "example.com") setClusterAsFinalizing(ctx, clusterId) - completeInstallationAndVerify(ctx, agentBMClient, clusterId, true) + completeInstallationAndVerify(ctx, utils_test.TestContext.AgentBMClient, clusterId, true) // Get day1 cluster c, err := common.GetClusterFromDB(db, clusterId, common.SkipEagerLoading) Expect(err).ShouldNot(HaveOccurred()) // Create day2 cluster - res, err := userBMClient.Installer.V2ImportCluster(ctx, &installer.V2ImportClusterParams{ + res, err := utils_test.TestContext.UserBMClient.Installer.V2ImportCluster(ctx, &installer.V2ImportClusterParams{ NewImportClusterParams: &models.ImportClusterParams{ Name: swag.String("test-cluster"), APIVipDnsname: swag.String("api.test-cluster.example.com"), @@ -194,7 +197,7 @@ var _ = Describe("test authorization", func() { // Update day2 cluster by an editor user day2ClusterId := *res.GetPayload().ID - _, err = editclusterUserBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ + _, err = utils_test.TestContext.EditclusterUserBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ ClusterID: day2ClusterId, ClusterUpdateParams: &models.V2ClusterUpdateParams{ APIVipDNSName: swag.String("some-dns-name"), @@ -206,35 +209,35 @@ var _ = Describe("test authorization", func() { Context("regular user", func() { It("can get owned cluster", func() { - _, err := userBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: userClusterID}) + _, err := utils_test.TestContext.UserBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: userClusterID}) Expect(err).ShouldNot(HaveOccurred()) }) It("can't get not owned cluster", func() { - _, err := userBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: userClusterID2}) + _, err := utils_test.TestContext.UserBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: userClusterID2}) Expect(err).Should(HaveOccurred()) Expect(err).To(BeAssignableToTypeOf(installer.NewV2GetClusterNotFound())) }) It("can delete owned cluster", func() { - _, err := userBMClient.Installer.V2DeregisterCluster(ctx, &installer.V2DeregisterClusterParams{ClusterID: userClusterID}) + _, err := utils_test.TestContext.UserBMClient.Installer.V2DeregisterCluster(ctx, &installer.V2DeregisterClusterParams{ClusterID: userClusterID}) Expect(err).ShouldNot(HaveOccurred()) }) It("can get owned infra-env", func() { infraEnvID := registerInfraEnv(&userClusterID, models.ImageTypeMinimalIso).ID - _, err := userBMClient.Installer.GetInfraEnv(ctx, &installer.GetInfraEnvParams{InfraEnvID: *infraEnvID}) + _, err := utils_test.TestContext.UserBMClient.Installer.GetInfraEnv(ctx, &installer.GetInfraEnvParams{InfraEnvID: *infraEnvID}) Expect(err).ShouldNot(HaveOccurred()) }) It("can't get not owned infra-env", func() { - request, err := user2BMClient.Installer.RegisterInfraEnv(context.Background(), &installer.RegisterInfraEnvParams{ + request, err := utils_test.TestContext.User2BMClient.Installer.RegisterInfraEnv(context.Background(), &installer.RegisterInfraEnvParams{ InfraenvCreateParams: &models.InfraEnvCreateParams{ Name: swag.String("test-infra-env-2"), OpenshiftVersion: openshiftVersion, PullSecret: swag.String(fmt.Sprintf(psTemplate, FakePS2)), - SSHAuthorizedKey: swag.String(sshPublicKey), + SSHAuthorizedKey: swag.String(utils_test.SshPublicKey), ImageType: models.ImageTypeMinimalIso, ClusterID: &userClusterID2, }, @@ -242,20 +245,20 @@ var _ = Describe("test authorization", func() { Expect(err).NotTo(HaveOccurred()) infraEnvID2 := request.GetPayload().ID - _, err = userBMClient.Installer.GetInfraEnv(ctx, &installer.GetInfraEnvParams{InfraEnvID: *infraEnvID2}) + _, err = utils_test.TestContext.UserBMClient.Installer.GetInfraEnv(ctx, &installer.GetInfraEnvParams{InfraEnvID: *infraEnvID2}) Expect(err).Should(HaveOccurred()) Expect(err).To(BeAssignableToTypeOf(installer.NewGetInfraEnvNotFound())) }) It("can't update not owned cluster", func() { - _, err := userBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ClusterID: userClusterID2, + _, err := utils_test.TestContext.UserBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ClusterID: userClusterID2, ClusterUpdateParams: &models.V2ClusterUpdateParams{Name: swag.String("update-test")}}) Expect(err).Should(HaveOccurred()) Expect(err).To(BeAssignableToTypeOf(installer.NewV2UpdateClusterNotFound())) }) It("can't update not owned cluster, can only read cluster", func() { - _, err := userBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ClusterID: userClusterID3, + _, err := utils_test.TestContext.UserBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ClusterID: userClusterID3, ClusterUpdateParams: &models.V2ClusterUpdateParams{Name: swag.String("update-test")}}) Expect(err).Should(HaveOccurred()) Expect(err).To(BeAssignableToTypeOf(installer.NewV2UpdateClusterForbidden())) @@ -263,7 +266,7 @@ var _ = Describe("test authorization", func() { It("can't get non-existent infra-env", func() { infraEnvID := strfmt.UUID(uuid.New().String()) - _, err := userBMClient.Installer.GetInfraEnv(ctx, &installer.GetInfraEnvParams{InfraEnvID: infraEnvID}) + _, err := utils_test.TestContext.UserBMClient.Installer.GetInfraEnv(ctx, &installer.GetInfraEnvParams{InfraEnvID: infraEnvID}) Expect(err).Should(HaveOccurred()) Expect(err).To(BeAssignableToTypeOf(installer.NewGetInfraEnvNotFound())) }) @@ -271,29 +274,29 @@ var _ = Describe("test authorization", func() { Context("agent", func() { It("can get owned cluster", func() { - _, err := agentBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: userClusterID}) + _, err := utils_test.TestContext.AgentBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: userClusterID}) Expect(err).ShouldNot(HaveOccurred()) }) It("can't get not owned cluster", func() { - _, err := agentBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: userClusterID2}) + _, err := utils_test.TestContext.AgentBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: userClusterID2}) Expect(err).Should(HaveOccurred()) Expect(err).To(BeAssignableToTypeOf(installer.NewV2GetClusterNotFound())) }) It("can get owned infra-env", func() { infraEnvID := registerInfraEnv(&userClusterID, models.ImageTypeMinimalIso).ID - _, err := agentBMClient.Installer.GetInfraEnv(ctx, &installer.GetInfraEnvParams{InfraEnvID: *infraEnvID}) + _, err := utils_test.TestContext.AgentBMClient.Installer.GetInfraEnv(ctx, &installer.GetInfraEnvParams{InfraEnvID: *infraEnvID}) Expect(err).ShouldNot(HaveOccurred()) }) It("can't get not owned infra-env", func() { - request, err := user2BMClient.Installer.RegisterInfraEnv(context.Background(), &installer.RegisterInfraEnvParams{ + request, err := utils_test.TestContext.User2BMClient.Installer.RegisterInfraEnv(context.Background(), &installer.RegisterInfraEnvParams{ InfraenvCreateParams: &models.InfraEnvCreateParams{ Name: swag.String("test-infra-env-agent-2"), OpenshiftVersion: openshiftVersion, PullSecret: swag.String(fmt.Sprintf(psTemplate, FakePS2)), - SSHAuthorizedKey: swag.String(sshPublicKey), + SSHAuthorizedKey: swag.String(utils_test.SshPublicKey), ImageType: models.ImageTypeMinimalIso, ClusterID: &userClusterID2, }, @@ -301,7 +304,7 @@ var _ = Describe("test authorization", func() { Expect(err).NotTo(HaveOccurred()) infraEnvID2 := request.GetPayload().ID - _, err = agentBMClient.Installer.GetInfraEnv(ctx, &installer.GetInfraEnvParams{InfraEnvID: *infraEnvID2}) + _, err = utils_test.TestContext.AgentBMClient.Installer.GetInfraEnv(ctx, &installer.GetInfraEnvParams{InfraEnvID: *infraEnvID2}) Expect(err).Should(HaveOccurred()) Expect(err).To(BeAssignableToTypeOf(installer.NewGetInfraEnvNotFound())) }) @@ -338,8 +341,8 @@ var _ = Describe("test authorization", func() { }) It("try to fetch ignored validations when not allowed", func() { - createCluster(userBMClient, FakePS) - _, err := userBMClient.Installer.V2GetIgnoredValidations(ctx, &installer.V2GetIgnoredValidationsParams{ + createCluster(utils_test.TestContext.UserBMClient, FakePS) + _, err := utils_test.TestContext.UserBMClient.Installer.V2GetIgnoredValidations(ctx, &installer.V2GetIgnoredValidationsParams{ ClusterID: *cluster.ID, }) Expect(err).Should(HaveOccurred()) @@ -347,8 +350,8 @@ var _ = Describe("test authorization", func() { }) It("attempt to ignore validations when allowed to ignore validations and request is valid", func() { - createCluster(user2BMClient, FakePS2) - _, err := user2BMClient.Installer.V2SetIgnoredValidations(ctx, &installer.V2SetIgnoredValidationsParams{ + createCluster(utils_test.TestContext.User2BMClient, FakePS2) + _, err := utils_test.TestContext.User2BMClient.Installer.V2SetIgnoredValidations(ctx, &installer.V2SetIgnoredValidationsParams{ ClusterID: *cluster.ID, IgnoredValidations: &models.IgnoredValidations{ ClusterValidationIds: "[\"dns-domain-defined\"]", @@ -356,7 +359,7 @@ var _ = Describe("test authorization", func() { }, }) Expect(err).ShouldNot(HaveOccurred()) - ignoredValidations, err := user2BMClient.Installer.V2GetIgnoredValidations(ctx, &installer.V2GetIgnoredValidationsParams{ + ignoredValidations, err := utils_test.TestContext.User2BMClient.Installer.V2GetIgnoredValidations(ctx, &installer.V2GetIgnoredValidationsParams{ ClusterID: *cluster.ID, }) Expect(err).ShouldNot(HaveOccurred()) @@ -365,8 +368,8 @@ var _ = Describe("test authorization", func() { }) It("attempt to ignore validations with ID's that do not exist", func() { - createCluster(user2BMClient, FakePS2) - _, err := user2BMClient.Installer.V2SetIgnoredValidations(ctx, &installer.V2SetIgnoredValidationsParams{ + createCluster(utils_test.TestContext.User2BMClient, FakePS2) + _, err := utils_test.TestContext.User2BMClient.Installer.V2SetIgnoredValidations(ctx, &installer.V2SetIgnoredValidationsParams{ ClusterID: *cluster.ID, IgnoredValidations: &models.IgnoredValidations{ ClusterValidationIds: "[\"all\", \"dns-domain-defined\", \"does-not-exist\"]", @@ -385,8 +388,8 @@ var _ = Describe("test authorization", func() { }) It("attempt to ignore validations when not allowed to ignore validations", func() { - createCluster(userBMClient, FakePS) - _, err := userBMClient.Installer.V2SetIgnoredValidations(ctx, &installer.V2SetIgnoredValidationsParams{ + createCluster(utils_test.TestContext.UserBMClient, FakePS) + _, err := utils_test.TestContext.UserBMClient.Installer.V2SetIgnoredValidations(ctx, &installer.V2SetIgnoredValidationsParams{ ClusterID: *cluster.ID, IgnoredValidations: &models.IgnoredValidations{ ClusterValidationIds: "", @@ -402,8 +405,8 @@ var _ = Describe("test authorization", func() { }) It("Attempt to ignore a host validation that is not ignorable", func() { - createCluster(user2BMClient, FakePS2) - _, err := user2BMClient.Installer.V2SetIgnoredValidations(ctx, &installer.V2SetIgnoredValidationsParams{ + createCluster(utils_test.TestContext.User2BMClient, FakePS2) + _, err := utils_test.TestContext.User2BMClient.Installer.V2SetIgnoredValidations(ctx, &installer.V2SetIgnoredValidationsParams{ ClusterID: *cluster.ID, IgnoredValidations: &models.IgnoredValidations{ ClusterValidationIds: "[\"api-vips-defined\",\"ingress-vips-defined\"]", @@ -413,7 +416,7 @@ var _ = Describe("test authorization", func() { Expect(err).Should(HaveOccurred()) Expect(marshalError(err)).To(ContainSubstring("unable to ignore the following host validations (connected,has-inventory)")) Expect(marshalError(err)).To(ContainSubstring("unable to ignore the following cluster validations (api-vips-defined,ingress-vips-defined)")) - ignoredValidations, err := user2BMClient.Installer.V2GetIgnoredValidations(ctx, &installer.V2GetIgnoredValidationsParams{ + ignoredValidations, err := utils_test.TestContext.User2BMClient.Installer.V2GetIgnoredValidations(ctx, &installer.V2GetIgnoredValidationsParams{ ClusterID: *cluster.ID, }) Expect(err).ShouldNot(HaveOccurred()) @@ -422,7 +425,7 @@ var _ = Describe("test authorization", func() { }) It("allowed to register a multiarch cluster", func() { - request, err := user2BMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ + request, err := utils_test.TestContext.User2BMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ NewClusterParams: &models.ClusterCreateParams{ CPUArchitecture: common.MultiCPUArchitecture, Name: swag.String("test-multiarch-cluster"), @@ -434,7 +437,7 @@ var _ = Describe("test authorization", func() { Expect(request.Payload.CPUArchitecture).To(Equal(common.MultiCPUArchitecture)) }) It("not allowed to register a multiarch cluster", func() { - _, err := userBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ + _, err := utils_test.TestContext.UserBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ NewClusterParams: &models.ClusterCreateParams{ CPUArchitecture: common.MultiCPUArchitecture, Name: swag.String("test-multiarch-cluster"), @@ -457,7 +460,7 @@ var _ = Describe("Make sure that sensitive files are accessible only by owners o BeforeEach(func() { ctx = context.Background() - cID, err := registerCluster(ctx, userBMClient, "test-cluster", pullSecret) + cID, err := utils_test.TestContext.RegisterCluster(ctx, utils_test.TestContext.UserBMClient, "test-cluster", pullSecret) Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred()) clusterID = cID @@ -465,20 +468,20 @@ var _ = Describe("Make sure that sensitive files are accessible only by owners o registerHostsAndSetRoles(clusterID, *infraEnvID, minHosts, "test-cluster", "example.com") setClusterAsFinalizing(ctx, clusterID) - res, err := agentBMClient.Installer.V2UploadClusterIngressCert(ctx, &installer.V2UploadClusterIngressCertParams{ClusterID: clusterID, IngressCertParams: models.IngressCertParams(ingressCa)}) + res, err := utils_test.TestContext.AgentBMClient.Installer.V2UploadClusterIngressCert(ctx, &installer.V2UploadClusterIngressCertParams{ClusterID: clusterID, IngressCertParams: models.IngressCertParams(ingressCa)}) Expect(err).NotTo(HaveOccurred()) Expect(res).To(BeAssignableToTypeOf(installer.NewV2UploadClusterIngressCertCreated())) }) Context("/v2/clusters/{cluster_id}/credentials", func() { It("Should not allow read-only-admins to download kubeconfig", func() { - _, err := readOnlyAdminUserBMClient.Installer.V2GetCredentials(ctx, &installer.V2GetCredentialsParams{ClusterID: clusterID}) + _, err := utils_test.TestContext.ReadOnlyAdminUserBMClient.Installer.V2GetCredentials(ctx, &installer.V2GetCredentialsParams{ClusterID: clusterID}) Expect(err).To(HaveOccurred()) Expect(err).To(BeAssignableToTypeOf(installer.NewV2GetCredentialsForbidden())) }) It("Should allow 'user role' to download kubeconfig", func() { - completeInstallationAndVerify(ctx, agentBMClient, clusterID, true) - res, err := userBMClient.Installer.V2GetCredentials(ctx, &installer.V2GetCredentialsParams{ClusterID: clusterID}) + completeInstallationAndVerify(ctx, utils_test.TestContext.AgentBMClient, clusterID, true) + res, err := utils_test.TestContext.UserBMClient.Installer.V2GetCredentials(ctx, &installer.V2GetCredentialsParams{ClusterID: clusterID}) Expect(err).ToNot(HaveOccurred()) Expect(res).To(BeAssignableToTypeOf(installer.NewV2GetCredentialsOK())) }) @@ -491,13 +494,13 @@ var _ = Describe("Make sure that sensitive files are accessible only by owners o It(it, func() { file, err := os.CreateTemp("", "tmp") Expect(err).NotTo(HaveOccurred()) - _, err = readOnlyAdminUserBMClient.Installer.V2DownloadClusterCredentials(ctx, &installer.V2DownloadClusterCredentialsParams{ClusterID: clusterID, FileName: fileName}, file) + _, err = utils_test.TestContext.ReadOnlyAdminUserBMClient.Installer.V2DownloadClusterCredentials(ctx, &installer.V2DownloadClusterCredentialsParams{ClusterID: clusterID, FileName: fileName}, file) Expect(err).To(HaveOccurred()) Expect(err).To(BeAssignableToTypeOf(installer.NewV2DownloadClusterCredentialsForbidden())) }) It(it, func() { - _, err := readOnlyAdminUserBMClient.Installer.V2GetPresignedForClusterCredentials(ctx, &installer.V2GetPresignedForClusterCredentialsParams{ClusterID: clusterID, FileName: fileName}) + _, err := utils_test.TestContext.ReadOnlyAdminUserBMClient.Installer.V2GetPresignedForClusterCredentials(ctx, &installer.V2GetPresignedForClusterCredentialsParams{ClusterID: clusterID, FileName: fileName}) Expect(err).To(HaveOccurred()) Expect(err).To(BeAssignableToTypeOf(installer.NewV2GetPresignedForClusterCredentialsForbidden())) }) @@ -507,12 +510,12 @@ var _ = Describe("Make sure that sensitive files are accessible only by owners o It(it, func() { file, err := os.CreateTemp("", "tmp") Expect(err).NotTo(HaveOccurred()) - _, err = userBMClient.Installer.V2DownloadClusterCredentials(ctx, &installer.V2DownloadClusterCredentialsParams{ClusterID: clusterID, FileName: fileName}, file) + _, err = utils_test.TestContext.UserBMClient.Installer.V2DownloadClusterCredentials(ctx, &installer.V2DownloadClusterCredentialsParams{ClusterID: clusterID, FileName: fileName}, file) Expect(err).ToNot(HaveOccurred()) }) it = fmt.Sprintf("Should allow cluster users to download '%v' via downloads/files-presigned endpoint", fileName) It(it, func() { - _, err := userBMClient.Installer.V2GetPresignedForClusterCredentials(ctx, &installer.V2GetPresignedForClusterCredentialsParams{ClusterID: clusterID, FileName: fileName}) + _, err := utils_test.TestContext.UserBMClient.Installer.V2GetPresignedForClusterCredentials(ctx, &installer.V2GetPresignedForClusterCredentialsParams{ClusterID: clusterID, FileName: fileName}) Expect(err).NotTo(BeAssignableToTypeOf(installer.NewV2GetPresignedForClusterCredentialsForbidden())) }) @@ -527,25 +530,25 @@ var _ = Describe("Cluster credentials should be accessed only by cluster owner", BeforeEach(func() { ctx = context.Background() - cID, err := registerCluster(ctx, userBMClient, "test-cluster", pullSecret) + cID, err := utils_test.TestContext.RegisterCluster(ctx, utils_test.TestContext.UserBMClient, "test-cluster", pullSecret) Expect(err).ToNot(HaveOccurred()) clusterID = cID infraEnvID = registerInfraEnv(&clusterID, models.ImageTypeMinimalIso).ID registerHostsAndSetRoles(clusterID, *infraEnvID, minHosts, "test-cluster", "example.com") setClusterAsFinalizing(ctx, clusterID) - res, err := agentBMClient.Installer.V2UploadClusterIngressCert(ctx, &installer.V2UploadClusterIngressCertParams{ClusterID: clusterID, IngressCertParams: models.IngressCertParams(ingressCa)}) + res, err := utils_test.TestContext.AgentBMClient.Installer.V2UploadClusterIngressCert(ctx, &installer.V2UploadClusterIngressCertParams{ClusterID: clusterID, IngressCertParams: models.IngressCertParams(ingressCa)}) Expect(err).NotTo(HaveOccurred()) Expect(res).To(BeAssignableToTypeOf(installer.NewV2UploadClusterIngressCertCreated())) - completeInstallationAndVerify(ctx, agentBMClient, clusterID, true) + completeInstallationAndVerify(ctx, utils_test.TestContext.AgentBMClient, clusterID, true) }) It("Should not allow read-only-admins to get credentials", func() { - _, err := readOnlyAdminUserBMClient.Installer.V2GetCredentials(ctx, &installer.V2GetCredentialsParams{ClusterID: clusterID}) + _, err := utils_test.TestContext.ReadOnlyAdminUserBMClient.Installer.V2GetCredentials(ctx, &installer.V2GetCredentialsParams{ClusterID: clusterID}) Expect(err).To(HaveOccurred()) Expect(err).To(BeAssignableToTypeOf(installer.NewV2GetCredentialsForbidden())) }) It("Should allow cluster user to get credentials", func() { - _, err := userBMClient.Installer.V2GetCredentials(ctx, &installer.V2GetCredentialsParams{ClusterID: clusterID}) + _, err := utils_test.TestContext.UserBMClient.Installer.V2GetCredentials(ctx, &installer.V2GetCredentialsParams{ClusterID: clusterID}) Expect(err).ToNot(HaveOccurred()) }) }) diff --git a/subsystem/cluster_default_config_test.go b/subsystem/cluster_default_config_test.go index c2878fdd9f6..699f9c28b71 100644 --- a/subsystem/cluster_default_config_test.go +++ b/subsystem/cluster_default_config_test.go @@ -7,17 +7,18 @@ import ( . "github.com/onsi/gomega" "github.com/openshift/assisted-service/client/installer" "github.com/openshift/assisted-service/models" + "github.com/openshift/assisted-service/subsystem/utils_test" ) var _ = Describe("V2GetClusterDefaultConfig", func() { It("InactiveDeletionHours", func() { - res, err := userBMClient.Installer.V2GetClusterDefaultConfig(context.Background(), &installer.V2GetClusterDefaultConfigParams{}) + res, err := utils_test.TestContext.UserBMClient.Installer.V2GetClusterDefaultConfig(context.Background(), &installer.V2GetClusterDefaultConfigParams{}) Expect(err).NotTo(HaveOccurred()) Expect(res.GetPayload().InactiveDeletionHours).To(Equal(int64(Options.DeregisterInactiveAfter.Hours()))) }) It("Default IPv4 networks", func() { - res, err := userBMClient.Installer.V2GetClusterDefaultConfig(context.Background(), &installer.V2GetClusterDefaultConfigParams{}) + res, err := utils_test.TestContext.UserBMClient.Installer.V2GetClusterDefaultConfig(context.Background(), &installer.V2GetClusterDefaultConfigParams{}) Expect(err).NotTo(HaveOccurred()) Expect(res.GetPayload().ClusterNetworksIPV4[0].Cidr).To(Equal(models.Subnet("10.128.0.0/14"))) @@ -25,7 +26,7 @@ var _ = Describe("V2GetClusterDefaultConfig", func() { Expect(res.GetPayload().ServiceNetworksIPV4[0].Cidr).To(Equal(models.Subnet("172.30.0.0/16"))) }) It("Default dual-stack networks", func() { - res, err := userBMClient.Installer.V2GetClusterDefaultConfig(context.Background(), &installer.V2GetClusterDefaultConfigParams{}) + res, err := utils_test.TestContext.UserBMClient.Installer.V2GetClusterDefaultConfig(context.Background(), &installer.V2GetClusterDefaultConfigParams{}) Expect(err).NotTo(HaveOccurred()) Expect(res.GetPayload().ClusterNetworksDualstack[0].Cidr).To(Equal(models.Subnet("10.128.0.0/14"))) @@ -38,7 +39,7 @@ var _ = Describe("V2GetClusterDefaultConfig", func() { }) It("Forbidden hostnames", func() { - res, err := userBMClient.Installer.V2GetClusterDefaultConfig(context.Background(), &installer.V2GetClusterDefaultConfigParams{}) + res, err := utils_test.TestContext.UserBMClient.Installer.V2GetClusterDefaultConfig(context.Background(), &installer.V2GetClusterDefaultConfigParams{}) Expect(err).NotTo(HaveOccurred()) Expect(len(res.GetPayload().ForbiddenHostnames)).To(Equal(6)) Expect(res.GetPayload().ForbiddenHostnames[0]).To(Equal("localhost")) diff --git a/subsystem/cluster_test.go b/subsystem/cluster_test.go index 75563bd349c..16dd0ef7032 100644 --- a/subsystem/cluster_test.go +++ b/subsystem/cluster_test.go @@ -49,6 +49,7 @@ import ( "github.com/openshift/assisted-service/models" "github.com/openshift/assisted-service/pkg/auth" "github.com/openshift/assisted-service/pkg/conversions" + "github.com/openshift/assisted-service/subsystem/utils_test" "golang.org/x/sync/errgroup" "gopkg.in/yaml.v2" "k8s.io/utils/ptr" @@ -160,7 +161,7 @@ var ( ) func getMinimalMasterInventory(cidr string) *models.Inventory { - inventory := *getDefaultInventory(cidr) + inventory := *utils_test.GetDefaultInventory(cidr) inventory.CPU = &models.CPU{Count: 4} inventory.Memory = &models.Memory{PhysicalBytes: int64(16 * units.GiB), UsableBytes: int64(16 * units.GiB)} return &inventory @@ -190,7 +191,7 @@ var _ = Describe("Cluster with Platform", func() { Context("vSphere", func() { It("vSphere cluster on OCP 4.12 - Success", func() { - cluster, err := userBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ + cluster, err := utils_test.TestContext.UserBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ NewClusterParams: &models.ClusterCreateParams{ Name: swag.String("test-cluster"), OpenshiftVersion: swag.String("4.12"), @@ -204,7 +205,7 @@ var _ = Describe("Cluster with Platform", func() { }) It("vSphere cluster on OCP 4.12 with dual stack - Failure", func() { - _, err := userBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ + _, err := utils_test.TestContext.UserBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ NewClusterParams: &models.ClusterCreateParams{ Name: swag.String("test-cluster"), OpenshiftVersion: swag.String("4.12"), @@ -228,7 +229,7 @@ var _ = Describe("Cluster with Platform", func() { }) It("vSphere cluster on OCP 4.13 with dual stack - Succeess", func() { - _, err := userBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ + _, err := utils_test.TestContext.UserBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ NewClusterParams: &models.ClusterCreateParams{ Name: swag.String("test-cluster"), OpenshiftVersion: swag.String("4.13"), @@ -254,7 +255,7 @@ var _ = Describe("Cluster", func() { var err error BeforeEach(func() { - cluster, err = userBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ + cluster, err = utils_test.TestContext.UserBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ NewClusterParams: &models.ClusterCreateParams{ Name: swag.String("test-cluster"), OpenshiftVersion: swag.String(openshiftVersion), @@ -275,20 +276,20 @@ var _ = Describe("Cluster", func() { It("register an unregistered host success", func() { infraEnvID = registerInfraEnv(&clusterID, models.ImageTypeMinimalIso).ID - h := registerHost(*infraEnvID) - _, err1 := userBMClient.Installer.V2DeregisterHost(ctx, &installer.V2DeregisterHostParams{ + h := utils_test.TestContext.RegisterHost(*infraEnvID) + _, err1 := utils_test.TestContext.UserBMClient.Installer.V2DeregisterHost(ctx, &installer.V2DeregisterHostParams{ InfraEnvID: *infraEnvID, HostID: *h.ID, }) Expect(err1).ShouldNot(HaveOccurred()) - _, err2 := agentBMClient.Installer.V2RegisterHost(ctx, &installer.V2RegisterHostParams{ + _, err2 := utils_test.TestContext.AgentBMClient.Installer.V2RegisterHost(ctx, &installer.V2RegisterHostParams{ InfraEnvID: *infraEnvID, NewHostParams: &models.HostCreateParams{ HostID: h.ID, }, }) Expect(err2).ShouldNot(HaveOccurred()) - c := getCluster(clusterID) + c := utils_test.TestContext.GetCluster(clusterID) Expect(len(c.Hosts)).Should(Equal(1)) Expect(c.Hosts[0].ID.String()).Should(Equal(h.ID.String())) }) @@ -299,7 +300,7 @@ var _ = Describe("Cluster", func() { c := cluster.GetPayload() hosts := registerHostsAndSetRoles(clusterID, *infraEnvID, 3, c.Name, c.BaseDNSDomain) By("deregister node and check master count validation") - _, err = userBMClient.Installer.V2DeregisterHost(ctx, &installer.V2DeregisterHostParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2DeregisterHost(ctx, &installer.V2DeregisterHostParams{ InfraEnvID: *infraEnvID, HostID: *hosts[0].ID, }) @@ -310,7 +311,7 @@ var _ = Describe("Cluster", func() { }) It("update cluster name exceed max length (54 characters)", func() { - _, err = userBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ ClusterUpdateParams: &models.V2ClusterUpdateParams{ Name: swag.String("loveisintheaireverywhereilookaroundloveisintheaireverysightandeverysound"), }, @@ -320,11 +321,11 @@ var _ = Describe("Cluster", func() { }) It("cluster name exceed max length (54 characters)", func() { - _, err1 := userBMClient.Installer.V2DeregisterCluster(ctx, &installer.V2DeregisterClusterParams{ + _, err1 := utils_test.TestContext.UserBMClient.Installer.V2DeregisterCluster(ctx, &installer.V2DeregisterClusterParams{ ClusterID: clusterID, }) Expect(err1).ShouldNot(HaveOccurred()) - cluster, err = userBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ + cluster, err = utils_test.TestContext.UserBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ NewClusterParams: &models.ClusterCreateParams{ Name: swag.String("abcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghij"), OpenshiftVersion: swag.String(openshiftVersion), @@ -335,11 +336,11 @@ var _ = Describe("Cluster", func() { }) It("register an unregistered cluster success", func() { - _, err1 := userBMClient.Installer.V2DeregisterCluster(ctx, &installer.V2DeregisterClusterParams{ + _, err1 := utils_test.TestContext.UserBMClient.Installer.V2DeregisterCluster(ctx, &installer.V2DeregisterClusterParams{ ClusterID: clusterID, }) Expect(err1).ShouldNot(HaveOccurred()) - cluster, err = userBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ + cluster, err = utils_test.TestContext.UserBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ NewClusterParams: &models.ClusterCreateParams{ Name: swag.String("test-cluster"), OpenshiftVersion: swag.String(openshiftVersion), @@ -354,10 +355,10 @@ var _ = Describe("Cluster", func() { It("list clusters - get unregistered cluster", func() { infraEnvID = registerInfraEnv(&clusterID, models.ImageTypeMinimalIso).ID - _ = registerHost(*infraEnvID) - _, err1 := userBMClient.Installer.V2DeregisterCluster(ctx, &installer.V2DeregisterClusterParams{ClusterID: clusterID}) + _ = utils_test.TestContext.RegisterHost(*infraEnvID) + _, err1 := utils_test.TestContext.UserBMClient.Installer.V2DeregisterCluster(ctx, &installer.V2DeregisterClusterParams{ClusterID: clusterID}) Expect(err1).ShouldNot(HaveOccurred()) - ret, err2 := readOnlyAdminUserBMClient.Installer.V2ListClusters(ctx, &installer.V2ListClustersParams{GetUnregisteredClusters: swag.Bool(true)}) + ret, err2 := utils_test.TestContext.ReadOnlyAdminUserBMClient.Installer.V2ListClusters(ctx, &installer.V2ListClustersParams{GetUnregisteredClusters: swag.Bool(true)}) Expect(err2).ShouldNot(HaveOccurred()) clusters := ret.GetPayload() Expect(len(clusters)).ShouldNot(Equal(0)) @@ -375,10 +376,10 @@ var _ = Describe("Cluster", func() { It("list clusters - get unregistered cluster with hosts", func() { infraEnvID = registerInfraEnv(&clusterID, models.ImageTypeMinimalIso).ID - _ = registerHost(*infraEnvID) - _, err1 := userBMClient.Installer.V2DeregisterCluster(ctx, &installer.V2DeregisterClusterParams{ClusterID: clusterID}) + _ = utils_test.TestContext.RegisterHost(*infraEnvID) + _, err1 := utils_test.TestContext.UserBMClient.Installer.V2DeregisterCluster(ctx, &installer.V2DeregisterClusterParams{ClusterID: clusterID}) Expect(err1).ShouldNot(HaveOccurred()) - ret, err2 := readOnlyAdminUserBMClient.Installer.V2ListClusters(ctx, + ret, err2 := utils_test.TestContext.ReadOnlyAdminUserBMClient.Installer.V2ListClusters(ctx, &installer.V2ListClustersParams{GetUnregisteredClusters: swag.Bool(true), WithHosts: true, }) @@ -397,42 +398,42 @@ var _ = Describe("Cluster", func() { It("cluster CRUD", func() { infraEnvID = registerInfraEnv(&clusterID, models.ImageTypeMinimalIso).ID - _ = registerHost(*infraEnvID) + _ = utils_test.TestContext.RegisterHost(*infraEnvID) Expect(err).NotTo(HaveOccurred()) - getReply, err := userBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) + getReply, err := utils_test.TestContext.UserBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) Expect(err).NotTo(HaveOccurred()) Expect(getReply.GetPayload().Hosts[0].ClusterID.String()).Should(Equal(clusterID.String())) - getReply, err = agentBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) + getReply, err = utils_test.TestContext.AgentBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) Expect(err).NotTo(HaveOccurred()) Expect(getReply.GetPayload().Hosts[0].ClusterID.String()).Should(Equal(clusterID.String())) - list, err := userBMClient.Installer.V2ListClusters(ctx, &installer.V2ListClustersParams{}) + list, err := utils_test.TestContext.UserBMClient.Installer.V2ListClusters(ctx, &installer.V2ListClustersParams{}) Expect(err).NotTo(HaveOccurred()) Expect(len(list.GetPayload())).Should(Equal(1)) - _, err = userBMClient.Installer.V2DeregisterCluster(ctx, &installer.V2DeregisterClusterParams{ClusterID: clusterID}) + _, err = utils_test.TestContext.UserBMClient.Installer.V2DeregisterCluster(ctx, &installer.V2DeregisterClusterParams{ClusterID: clusterID}) Expect(err).NotTo(HaveOccurred()) - list, err = userBMClient.Installer.V2ListClusters(ctx, &installer.V2ListClustersParams{}) + list, err = utils_test.TestContext.UserBMClient.Installer.V2ListClusters(ctx, &installer.V2ListClustersParams{}) Expect(err).NotTo(HaveOccurred()) Expect(len(list.GetPayload())).Should(Equal(0)) - _, err = userBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) + _, err = utils_test.TestContext.UserBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) Expect(err).Should(HaveOccurred()) }) It("cluster update", func() { By("update cluster with valid ssh key") infraEnvID = registerInfraEnv(&clusterID, models.ImageTypeMinimalIso).ID - host1 := registerHost(*infraEnvID) - host2 := registerHost(*infraEnvID) + host1 := utils_test.TestContext.RegisterHost(*infraEnvID) + host2 := utils_test.TestContext.RegisterHost(*infraEnvID) - validPublicKey := sshPublicKey + validPublicKey := utils_test.SshPublicKey //update host roles with v2 UpdateHost request - _, err := userBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ + _, err := utils_test.TestContext.UserBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ HostUpdateParams: &models.HostUpdateParams{ HostRole: swag.String(string(models.HostRoleMaster)), }, @@ -440,7 +441,7 @@ var _ = Describe("Cluster", func() { InfraEnvID: *infraEnvID, }) Expect(err).NotTo(HaveOccurred()) - _, err = userBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ HostUpdateParams: &models.HostUpdateParams{ HostRole: swag.String(string(models.HostRoleWorker)), }, @@ -449,7 +450,7 @@ var _ = Describe("Cluster", func() { }) Expect(err).NotTo(HaveOccurred()) - c, err := userBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ + c, err := utils_test.TestContext.UserBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ ClusterUpdateParams: &models.V2ClusterUpdateParams{ SSHPublicKey: &validPublicKey, }, @@ -458,16 +459,16 @@ var _ = Describe("Cluster", func() { Expect(err).NotTo(HaveOccurred()) Expect(c.GetPayload().SSHPublicKey).Should(Equal(validPublicKey)) - h := getHostV2(*infraEnvID, *host1.ID) + h := utils_test.TestContext.GetHostV2(*infraEnvID, *host1.ID) Expect(h.Role).Should(Equal(models.HostRole(models.HostRoleUpdateParamsMaster))) - h = getHostV2(*infraEnvID, *host2.ID) + h = utils_test.TestContext.GetHostV2(*infraEnvID, *host2.ID) Expect(h.Role).Should(Equal(models.HostRole(models.HostRoleUpdateParamsWorker))) By("update cluster invalid ssh key") invalidPublicKey := `ssh-rsa AAAAB3NzaC1yc2EAAAADAABgQD14Gv4V5DVvyr7O6/44laYx52VYLe8yrEA3fOieWDmojRs3scqLnfeLHJWsfYA4QMjTuraLKhT8dhETSYiSR88RMM56+isLbcLshE6GkNkz3MBZE2hcdakqMDm6vucP3dJD6snuh5Hfpq7OWDaTcC0zCAzNECJv8F7LcWVa8TLpyRgpek4U022T5otE1ZVbNFqN9OrGHgyzVQLtC4xN1yT83ezo3r+OEdlSVDRQfsq73Zg26d4dyagb6lmrryUUAAbfmn/HalJTHB73LyjilKiPvJ+x2bG7AeiqyVHwtQSpt02FCdQGptmsSqqWF/b9botOO38eUsqPNppMn7LT5wzDZdDlfwTCBWkpqijPcdo/LTD9dJlNHjwXZtHETtiid6N3ZZWpA0/VKjqUeQdSnHqLEzTidswsnOjCIoIhmJFqczeP5kOty/MWdq1II/FX/EpYCJxoSWkT/hVwD6VOamGwJbLVw9LkEb0VVWFRJB5suT/T8DtPdPl+A0qUGiN4KM= oscohen@localhost.localdomain` - _, err = userBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ ClusterUpdateParams: &models.V2ClusterUpdateParams{ SSHPublicKey: &invalidPublicKey, }, @@ -478,7 +479,7 @@ var _ = Describe("Cluster", func() { }) func isClusterInState(ctx context.Context, clusterID strfmt.UUID, state, stateInfo string) (bool, string, string) { - rep, err := userBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) + rep, err := utils_test.TestContext.UserBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) ExpectWithOffset(2, err).NotTo(HaveOccurred()) c := rep.GetPayload() if swag.StringValue(c.Status) == state { @@ -521,7 +522,7 @@ func waitForClusterState(ctx context.Context, clusterID strfmt.UUID, state strin } func isHostInState(ctx context.Context, infraEnvID strfmt.UUID, hostID strfmt.UUID, state string) (bool, string, string) { - rep, err := userBMClient.Installer.V2GetHost(ctx, &installer.V2GetHostParams{ + rep, err := utils_test.TestContext.UserBMClient.Installer.V2GetHost(ctx, &installer.V2GetHostParams{ InfraEnvID: infraEnvID, HostID: hostID, }) @@ -586,7 +587,7 @@ func waitForMachineNetworkCIDR( currentMachineNetworkCIDR := "" for start, _ := time.Now(), 0; time.Since(start) < timeout; { - rep, err := userBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) + rep, err := utils_test.TestContext.UserBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) Expect(err).NotTo(HaveOccurred()) c := rep.GetPayload() @@ -612,18 +613,18 @@ func waitForMachineNetworkCIDR( func installCluster(clusterID strfmt.UUID) *models.Cluster { ctx := context.Background() - reply, err := userBMClient.Installer.V2InstallCluster(ctx, &installer.V2InstallClusterParams{ClusterID: clusterID}) + reply, err := utils_test.TestContext.UserBMClient.Installer.V2InstallCluster(ctx, &installer.V2InstallClusterParams{ClusterID: clusterID}) Expect(err).NotTo(HaveOccurred()) c := reply.GetPayload() Expect(*c.Status).Should(Equal(models.ClusterStatusPreparingForInstallation)) - generateEssentialPrepareForInstallationSteps(ctx, c.Hosts...) + utils_test.TestContext.GenerateEssentialPrepareForInstallationSteps(ctx, c.Hosts...) waitForClusterState(ctx, clusterID, models.ClusterStatusInstalling, 180*time.Second, "Installation in progress") - waitForHostState(ctx, models.HostStatusInstalling, defaultWaitForHostStateTimeout, c.Hosts...) + waitForHostState(ctx, models.HostStatusInstalling, utils_test.DefaultWaitForHostStateTimeout, c.Hosts...) - rep, err := userBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) + rep, err := utils_test.TestContext.UserBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) Expect(err).NotTo(HaveOccurred()) c = rep.GetPayload() Expect(c).NotTo(BeNil()) @@ -634,16 +635,16 @@ func installCluster(clusterID strfmt.UUID) *models.Cluster { func tryInstallClusterWithDiskResponses(clusterID strfmt.UUID, successfulHosts, failedHosts []*models.Host) *models.Cluster { Expect(len(failedHosts)).To(BeNumerically(">", 0)) ctx := context.Background() - reply, err := userBMClient.Installer.V2InstallCluster(ctx, &installer.V2InstallClusterParams{ClusterID: clusterID}) + reply, err := utils_test.TestContext.UserBMClient.Installer.V2InstallCluster(ctx, &installer.V2InstallClusterParams{ClusterID: clusterID}) Expect(err).NotTo(HaveOccurred()) c := reply.GetPayload() Expect(*c.Status).Should(Equal(models.ClusterStatusPreparingForInstallation)) - generateFailedDiskSpeedResponses(ctx, sdbId, failedHosts...) - generateSuccessfulDiskSpeedResponses(ctx, sdbId, successfulHosts...) + utils_test.TestContext.GenerateFailedDiskSpeedResponses(ctx, sdbId, failedHosts...) + utils_test.TestContext.GenerateSuccessfulDiskSpeedResponses(ctx, sdbId, successfulHosts...) waitForClusterState(ctx, clusterID, models.ClusterStatusInsufficient, 180*time.Second, IgnoreStateInfo) - waitForHostState(ctx, models.HostStatusInsufficient, defaultWaitForHostStateTimeout, failedHosts...) + waitForHostState(ctx, models.HostStatusInsufficient, utils_test.DefaultWaitForHostStateTimeout, failedHosts...) expectedKnownHosts := make([]*models.Host, 0) outer: @@ -656,9 +657,9 @@ outer: expectedKnownHosts = append(expectedKnownHosts, h) } - waitForHostState(ctx, models.HostStatusKnown, defaultWaitForHostStateTimeout, expectedKnownHosts...) + waitForHostState(ctx, models.HostStatusKnown, utils_test.DefaultWaitForHostStateTimeout, expectedKnownHosts...) - rep, err := userBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) + rep, err := utils_test.TestContext.UserBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) Expect(err).NotTo(HaveOccurred()) c = rep.GetPayload() Expect(c).NotTo(BeNil()) @@ -674,7 +675,7 @@ func completeInstallation(client *client.AssistedInstall, clusterID strfmt.UUID) status := models.OperatorStatusAvailable Eventually(func() error { - _, err = agentBMClient.Installer.V2UploadClusterIngressCert(ctx, &installer.V2UploadClusterIngressCertParams{ + _, err = utils_test.TestContext.AgentBMClient.Installer.V2UploadClusterIngressCert(ctx, &installer.V2UploadClusterIngressCertParams{ ClusterID: clusterID, IngressCertParams: models.IngressCertParams(ingressCa), }) @@ -686,7 +687,7 @@ func completeInstallation(client *client.AssistedInstall, clusterID strfmt.UUID) continue } - v2ReportMonitoredOperatorStatus(ctx, client, clusterID, operator.Name, status, "") + utils_test.TestContext.V2ReportMonitoredOperatorStatus(ctx, clusterID, operator.Name, status, "") } } @@ -705,7 +706,7 @@ func failInstallation(client *client.AssistedInstall, clusterID strfmt.UUID) { func completeInstallationAndVerify(ctx context.Context, client *client.AssistedInstall, clusterID strfmt.UUID, completeSuccess bool) { if completeSuccess { completeInstallation(client, clusterID) - waitForClusterState(ctx, clusterID, models.ClusterStatusInstalled, defaultWaitForClusterStateTimeout, IgnoreStateInfo) + waitForClusterState(ctx, clusterID, models.ClusterStatusInstalled, utils_test.DefaultWaitForClusterStateTimeout, IgnoreStateInfo) } else { failInstallation(client, clusterID) } @@ -723,13 +724,13 @@ func setClusterAsInstalling(ctx context.Context, clusterID strfmt.UUID) { func setClusterAsFinalizing(ctx context.Context, clusterID strfmt.UUID) { setClusterAsInstalling(ctx, clusterID) - c := getCluster(clusterID) + c := utils_test.TestContext.GetCluster(clusterID) for _, host := range c.Hosts { - updateProgress(*host.ID, host.InfraEnvID, models.HostStageDone) + utils_test.TestContext.UpdateProgress(*host.ID, host.InfraEnvID, models.HostStageDone) } - waitForClusterState(ctx, clusterID, models.ClusterStatusFinalizing, defaultWaitForClusterStateTimeout, clusterFinalizingStateInfo) + waitForClusterState(ctx, clusterID, models.ClusterStatusFinalizing, utils_test.DefaultWaitForClusterStateTimeout, clusterFinalizingStateInfo) } var _ = Describe("V2ListClusters", func() { @@ -741,13 +742,13 @@ var _ = Describe("V2ListClusters", func() { BeforeEach(func() { - registerClusterReply, err := userBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ + registerClusterReply, err := utils_test.TestContext.UserBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ NewClusterParams: &models.ClusterCreateParams{ BaseDNSDomain: "example.com", Name: swag.String("test-cluster"), OpenshiftVersion: swag.String(VipAutoAllocOpenshiftVersion), PullSecret: swag.String(pullSecret), - SSHPublicKey: sshPublicKey, + SSHPublicKey: utils_test.SshPublicKey, VipDhcpAllocation: swag.Bool(true), NetworkType: swag.String(models.ClusterCreateParamsNetworkTypeOpenShiftSDN), }, @@ -766,15 +767,15 @@ var _ = Describe("V2ListClusters", func() { }) It("searching for an existing openshift cluster ID", func() { - list, err := userBMClient.Installer.V2ListClusters( + list, err := utils_test.TestContext.UserBMClient.Installer.V2ListClusters( ctx, - &installer.V2ListClustersParams{OpenshiftClusterID: strToUUID("41940ee8-ec99-43de-8766-174381b4921d")}) + &installer.V2ListClustersParams{OpenshiftClusterID: utils_test.StrToUUID("41940ee8-ec99-43de-8766-174381b4921d")}) Expect(err).NotTo(HaveOccurred()) Expect(len(list.GetPayload())).Should(Equal(1)) }) It("discarding openshift cluster ID field", func() { - list, err := userBMClient.Installer.V2ListClusters( + list, err := utils_test.TestContext.UserBMClient.Installer.V2ListClusters( ctx, &installer.V2ListClustersParams{}) Expect(err).NotTo(HaveOccurred()) @@ -782,9 +783,9 @@ var _ = Describe("V2ListClusters", func() { }) It("searching for a non-existing openshift cluster ID", func() { - list, err := userBMClient.Installer.V2ListClusters( + list, err := utils_test.TestContext.UserBMClient.Installer.V2ListClusters( ctx, - &installer.V2ListClustersParams{OpenshiftClusterID: strToUUID("00000000-0000-0000-0000-000000000000")}) + &installer.V2ListClustersParams{OpenshiftClusterID: utils_test.StrToUUID("00000000-0000-0000-0000-000000000000")}) Expect(err).NotTo(HaveOccurred()) Expect(len(list.GetPayload())).Should(Equal(0)) }) @@ -799,7 +800,7 @@ var _ = Describe("V2ListClusters", func() { }) It("searching for an existing AMS subscription ID", func() { - list, err := userBMClient.Installer.V2ListClusters(ctx, &installer.V2ListClustersParams{ + list, err := utils_test.TestContext.UserBMClient.Installer.V2ListClusters(ctx, &installer.V2ListClustersParams{ AmsSubscriptionIds: []string{FakeSubscriptionID.String()}, }) Expect(err).NotTo(HaveOccurred()) @@ -808,13 +809,13 @@ var _ = Describe("V2ListClusters", func() { }) It("discarding AMS subscription ID field", func() { - list, err := userBMClient.Installer.V2ListClusters(ctx, &installer.V2ListClustersParams{}) + list, err := utils_test.TestContext.UserBMClient.Installer.V2ListClusters(ctx, &installer.V2ListClustersParams{}) Expect(err).NotTo(HaveOccurred()) Expect(len(list.GetPayload())).Should(Equal(1)) }) It("searching for a non-existing AMS Subscription ID", func() { - list, err := userBMClient.Installer.V2ListClusters(ctx, &installer.V2ListClustersParams{ + list, err := utils_test.TestContext.UserBMClient.Installer.V2ListClusters(ctx, &installer.V2ListClustersParams{ AmsSubscriptionIds: []string{"1h89fvtqeelulpo0fl5oddngj2ao7XXX"}, }) Expect(err).NotTo(HaveOccurred()) @@ -822,7 +823,7 @@ var _ = Describe("V2ListClusters", func() { }) It("searching for both existing and non-existing AMS subscription IDs", func() { - list, err := userBMClient.Installer.V2ListClusters(ctx, &installer.V2ListClustersParams{ + list, err := utils_test.TestContext.UserBMClient.Installer.V2ListClusters(ctx, &installer.V2ListClustersParams{ AmsSubscriptionIds: []string{ FakeSubscriptionID.String(), "1h89fvtqeelulpo0fl5oddngj2ao7XXX", @@ -853,7 +854,7 @@ var _ = Describe("cluster install - DHCP", func() { } b, err := json.Marshal(&r) Expect(err).ToNot(HaveOccurred()) - _, err = agentBMClient.Installer.V2PostStepReply(ctx, &installer.V2PostStepReplyParams{ + _, err = utils_test.TestContext.AgentBMClient.Installer.V2PostStepReply(ctx, &installer.V2PostStepReplyParams{ InfraEnvID: h.InfraEnvID, HostID: *h.ID, Reply: &models.StepReply{ @@ -872,7 +873,7 @@ var _ = Describe("cluster install - DHCP", func() { BeforeEach(func() { - registerClusterReply, err := userBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ + registerClusterReply, err := utils_test.TestContext.UserBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ NewClusterParams: &models.ClusterCreateParams{ BaseDNSDomain: "example.com", ClusterNetworks: []*models.ClusterNetwork{{Cidr: models.Subnet(clusterCIDR), HostPrefix: 23}}, @@ -880,7 +881,7 @@ var _ = Describe("cluster install - DHCP", func() { Name: swag.String("test-cluster"), OpenshiftVersion: swag.String(VipAutoAllocOpenshiftVersion), PullSecret: swag.String(pullSecret), - SSHPublicKey: sshPublicKey, + SSHPublicKey: utils_test.SshPublicKey, VipDhcpAllocation: swag.Bool(true), NetworkType: swag.String(models.ClusterCreateParamsNetworkTypeOpenShiftSDN), }, @@ -919,9 +920,9 @@ var _ = Describe("cluster install - DHCP", func() { }) AfterEach(func() { - reply, err := userBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) + reply, err := utils_test.TestContext.UserBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) Expect(err).NotTo(HaveOccurred()) - Expect(reply.GetPayload().OpenshiftClusterID).To(Equal(*strToUUID("41940ee8-ec99-43de-8766-174381b4921d"))) + Expect(reply.GetPayload().OpenshiftClusterID).To(Equal(*utils_test.StrToUUID("41940ee8-ec99-43de-8766-174381b4921d"))) }) }) @@ -929,7 +930,7 @@ var _ = Describe("cluster install - DHCP", func() { clusterID := *cluster.ID infraEnvID = registerInfraEnvSpecificVersion(&clusterID, models.ImageTypeMinimalIso, cluster.OpenshiftVersion).ID registerHostsAndSetRolesDHCP(clusterID, *infraEnvID, 5, "test-cluster", "example.com") - reply, err := userBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ + reply, err := utils_test.TestContext.UserBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ ClusterUpdateParams: &models.V2ClusterUpdateParams{ VipDhcpAllocation: swag.Bool(false), }, @@ -943,7 +944,7 @@ var _ = Describe("cluster install - DHCP", func() { } generateDhcpStepReply(reply.Payload.Hosts[0], "1.2.3.102", "1.2.3.103", true) - _, err = userBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ ClusterUpdateParams: &models.V2ClusterUpdateParams{ APIVips: []*models.APIVip{{IP: "1.2.3.100", ClusterID: clusterID}}, IngressVips: []*models.IngressVip{{IP: "1.2.3.101", ClusterID: clusterID}}, @@ -951,9 +952,9 @@ var _ = Describe("cluster install - DHCP", func() { ClusterID: clusterID, }) Expect(err).ToNot(HaveOccurred()) - waitForClusterState(ctx, clusterID, models.ClusterStatusReady, defaultWaitForClusterStateTimeout, + waitForClusterState(ctx, clusterID, models.ClusterStatusReady, utils_test.DefaultWaitForClusterStateTimeout, IgnoreStateInfo) - reply, err = userBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ + reply, err = utils_test.TestContext.UserBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ ClusterUpdateParams: &models.V2ClusterUpdateParams{ VipDhcpAllocation: swag.Bool(false), }, @@ -961,7 +962,7 @@ var _ = Describe("cluster install - DHCP", func() { }) Expect(err).ToNot(HaveOccurred()) Expect(swag.StringValue(reply.Payload.Status)).To(Equal(models.ClusterStatusReady)) - reply, err = userBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ + reply, err = utils_test.TestContext.UserBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ ClusterUpdateParams: &models.V2ClusterUpdateParams{ VipDhcpAllocation: swag.Bool(true), MachineNetworks: common.TestIPv4Networking.MachineNetworks, @@ -969,9 +970,9 @@ var _ = Describe("cluster install - DHCP", func() { ClusterID: clusterID, }) Expect(err).ToNot(HaveOccurred()) - waitForClusterState(ctx, clusterID, models.ClusterStatusInsufficient, defaultWaitForClusterStateTimeout, + waitForClusterState(ctx, clusterID, models.ClusterStatusInsufficient, utils_test.DefaultWaitForClusterStateTimeout, IgnoreStateInfo) - _, err = userBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ ClusterUpdateParams: &models.V2ClusterUpdateParams{ APIVips: []*models.APIVip{{IP: "1.2.3.100", ClusterID: clusterID}}, IngressVips: []*models.IngressVip{{IP: "1.2.3.101", ClusterID: clusterID}}, @@ -981,7 +982,7 @@ var _ = Describe("cluster install - DHCP", func() { Expect(err).To(HaveOccurred()) generateDhcpStepReply(reply.Payload.Hosts[0], "1.2.3.102", "1.2.3.103", false) waitForClusterState(ctx, clusterID, models.ClusterStatusReady, 60*time.Second, clusterReadyStateInfo) - getReply, err := userBMClient.Installer.V2GetCluster(ctx, installer.NewV2GetClusterParams().WithClusterID(clusterID)) + getReply, err := utils_test.TestContext.UserBMClient.Installer.V2GetCluster(ctx, installer.NewV2GetClusterParams().WithClusterID(clusterID)) Expect(err).ToNot(HaveOccurred()) c := getReply.Payload Expect(swag.StringValue(c.Status)).To(Equal(models.ClusterStatusReady)) @@ -1002,7 +1003,7 @@ var _ = Describe("Validate BaseDNSDomain when creating a cluster", func() { ShouldThrow bool } createClusterWithBaseDNS := func(baseDNS string) (*installer.V2RegisterClusterCreated, error) { - return userBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ + return utils_test.TestContext.UserBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ NewClusterParams: &models.ClusterCreateParams{ BaseDNSDomain: baseDNS, ClusterNetworks: []*models.ClusterNetwork{{Cidr: models.Subnet(clusterCIDR), HostPrefix: 23}}, @@ -1010,7 +1011,7 @@ var _ = Describe("Validate BaseDNSDomain when creating a cluster", func() { Name: swag.String("test-cluster"), OpenshiftVersion: swag.String(openshiftVersion), PullSecret: swag.String(pullSecret), - SSHPublicKey: sshPublicKey, + SSHPublicKey: utils_test.SshPublicKey, }, }) } @@ -1091,7 +1092,7 @@ var _ = Describe("cluster update - BaseDNS", func() { BeforeEach(func() { var registerClusterReply *installer.V2RegisterClusterCreated - registerClusterReply, err = userBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ + registerClusterReply, err = utils_test.TestContext.UserBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ NewClusterParams: &models.ClusterCreateParams{ BaseDNSDomain: "example.com", ClusterNetworks: []*models.ClusterNetwork{{Cidr: models.Subnet(clusterCIDR), HostPrefix: 23}}, @@ -1099,7 +1100,7 @@ var _ = Describe("cluster update - BaseDNS", func() { Name: swag.String("test-cluster"), OpenshiftVersion: swag.String(openshiftVersion), PullSecret: swag.String(pullSecret), - SSHPublicKey: sshPublicKey, + SSHPublicKey: utils_test.SshPublicKey, }, }) Expect(err).NotTo(HaveOccurred()) @@ -1109,7 +1110,7 @@ var _ = Describe("cluster update - BaseDNS", func() { }) Context("Update BaseDNS", func() { It("Should not throw an error with valid 2 part DNS", func() { - _, err = userBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ ClusterUpdateParams: &models.V2ClusterUpdateParams{ BaseDNSDomain: swag.String("abc.com"), }, @@ -1118,7 +1119,7 @@ var _ = Describe("cluster update - BaseDNS", func() { Expect(err).ToNot(HaveOccurred()) }) It("Should not throw an error with valid 3 part DNS", func() { - _, err = userBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ ClusterUpdateParams: &models.V2ClusterUpdateParams{ BaseDNSDomain: swag.String("abc.def.com"), }, @@ -1128,7 +1129,7 @@ var _ = Describe("cluster update - BaseDNS", func() { }) }) It("Should throw an error with invalid top-level domain", func() { - _, err = userBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ ClusterUpdateParams: &models.V2ClusterUpdateParams{ BaseDNSDomain: swag.String("abc.com.c"), }, @@ -1137,7 +1138,7 @@ var _ = Describe("cluster update - BaseDNS", func() { Expect(err).To(HaveOccurred()) }) It("Should throw an error with invalid char prefix domain", func() { - _, err = userBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ ClusterUpdateParams: &models.V2ClusterUpdateParams{ BaseDNSDomain: swag.String("-abc.com"), }, @@ -1161,7 +1162,7 @@ var _ = Describe("cluster install", func() { ) BeforeEach(func() { - registerClusterReply, err := userBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ + registerClusterReply, err := utils_test.TestContext.UserBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ NewClusterParams: &models.ClusterCreateParams{ BaseDNSDomain: "example.com", ClusterNetworks: []*models.ClusterNetwork{{Cidr: models.Subnet(clusterCIDR), HostPrefix: 23}}, @@ -1169,7 +1170,7 @@ var _ = Describe("cluster install", func() { Name: swag.String("test-cluster"), OpenshiftVersion: swag.String(openshiftVersion), PullSecret: swag.String(pullSecret), - SSHPublicKey: sshPublicKey, + SSHPublicKey: utils_test.SshPublicKey, }, }) Expect(err).NotTo(HaveOccurred()) @@ -1178,11 +1179,11 @@ var _ = Describe("cluster install", func() { infraEnvID = registerInfraEnv(cluster.ID, models.ImageTypeMinimalIso).ID }) AfterEach(func() { - deregisterResources() - clearDB() + utils_test.TestContext.DeregisterResources() + utils_test.TestContext.ClearDB() }) getSuggestedRole := func(id strfmt.UUID) models.HostRole { - reply, err := userBMClient.Installer.V2GetHost(ctx, &installer.V2GetHostParams{ + reply, err := utils_test.TestContext.UserBMClient.Installer.V2GetHost(ctx, &installer.V2GetHostParams{ InfraEnvID: *infraEnvID, HostID: id, }) @@ -1192,27 +1193,27 @@ var _ = Describe("cluster install", func() { It("auto-assign", func() { By("register 3 hosts all with master hw information cluster expected to be ready") clusterID := *cluster.ID - hosts, ips := register3nodes(ctx, clusterID, *infraEnvID, defaultCIDRv4) + hosts, ips := utils_test.TestContext.Register3nodes(ctx, clusterID, *infraEnvID, defaultCIDRv4) h1, h2, h3 := hosts[0], hosts[1], hosts[2] - waitForClusterState(ctx, clusterID, models.ClusterStatusReady, defaultWaitForClusterStateTimeout, + waitForClusterState(ctx, clusterID, models.ClusterStatusReady, utils_test.DefaultWaitForClusterStateTimeout, IgnoreStateInfo) By("change first host hw info to worker and expect the cluster to become insufficient") - generateHWPostStepReply(ctx, h1, getValidWorkerHwInfoWithCIDR(ips[0]), "h1") - waitForClusterState(ctx, clusterID, models.ClusterStatusInsufficient, defaultWaitForClusterStateTimeout, + utils_test.TestContext.GenerateHWPostStepReply(ctx, h1, getValidWorkerHwInfoWithCIDR(ips[0]), "h1") + waitForClusterState(ctx, clusterID, models.ClusterStatusInsufficient, utils_test.DefaultWaitForClusterStateTimeout, IgnoreStateInfo) By("add two more hosts with minimal master inventory expect the cluster to be ready") newIPs := hostutil.GenerateIPv4Addresses(3, ips[2]) - h4 := ®isterHost(*infraEnvID).Host - generateEssentialHostStepsWithInventory(ctx, h4, "h4", getMinimalMasterInventory(newIPs[0])) + h4 := &utils_test.TestContext.RegisterHost(*infraEnvID).Host + utils_test.TestContext.GenerateEssentialHostStepsWithInventory(ctx, h4, "h4", getMinimalMasterInventory(newIPs[0])) - h5 := ®isterHost(*infraEnvID).Host - generateEssentialHostStepsWithInventory(ctx, h5, "h5", getMinimalMasterInventory(newIPs[1])) + h5 := &utils_test.TestContext.RegisterHost(*infraEnvID).Host + utils_test.TestContext.GenerateEssentialHostStepsWithInventory(ctx, h5, "h5", getMinimalMasterInventory(newIPs[1])) generateFullMeshConnectivity(ctx, ips[0], h1, h2, h3, h4, h5) - waitForHostState(ctx, models.HostStatusKnown, defaultWaitForHostStateTimeout, h4, h5) - waitForClusterState(ctx, clusterID, models.ClusterStatusReady, defaultWaitForClusterStateTimeout, + waitForHostState(ctx, models.HostStatusKnown, utils_test.DefaultWaitForHostStateTimeout, h4, h5) + waitForClusterState(ctx, clusterID, models.ClusterStatusReady, utils_test.DefaultWaitForClusterStateTimeout, IgnoreStateInfo) By("expect h4 and h5 to be auto-assigned as masters") @@ -1220,27 +1221,27 @@ var _ = Describe("cluster install", func() { Expect(getSuggestedRole(*h5.ID)).Should(Equal(models.HostRoleMaster)) By("add hosts with worker inventory expect the cluster to be ready") - h6 := ®isterHost(*infraEnvID).Host - waitForClusterState(ctx, clusterID, models.ClusterStatusInsufficient, defaultWaitForClusterStateTimeout, + h6 := &utils_test.TestContext.RegisterHost(*infraEnvID).Host + waitForClusterState(ctx, clusterID, models.ClusterStatusInsufficient, utils_test.DefaultWaitForClusterStateTimeout, IgnoreStateInfo) - generateEssentialHostStepsWithInventory(ctx, h6, "h6", getValidWorkerHwInfoWithCIDR(newIPs[2])) + utils_test.TestContext.GenerateEssentialHostStepsWithInventory(ctx, h6, "h6", getValidWorkerHwInfoWithCIDR(newIPs[2])) generateFullMeshConnectivity(ctx, ips[0], h1, h2, h3, h4, h5, h6) - waitForHostState(ctx, models.HostStatusKnown, defaultWaitForHostStateTimeout, h6) + waitForHostState(ctx, models.HostStatusKnown, utils_test.DefaultWaitForHostStateTimeout, h6) - waitForClusterState(ctx, clusterID, models.ClusterStatusReady, defaultWaitForClusterStateTimeout, + waitForClusterState(ctx, clusterID, models.ClusterStatusReady, utils_test.DefaultWaitForClusterStateTimeout, IgnoreStateInfo) By("start installation and validate roles") - _, err := userBMClient.Installer.V2InstallCluster(ctx, &installer.V2InstallClusterParams{ClusterID: clusterID}) + _, err := utils_test.TestContext.UserBMClient.Installer.V2InstallCluster(ctx, &installer.V2InstallClusterParams{ClusterID: clusterID}) Expect(err).NotTo(HaveOccurred()) - generateEssentialPrepareForInstallationSteps(ctx, h1, h2, h3, h4, h5, h6) + utils_test.TestContext.GenerateEssentialPrepareForInstallationSteps(ctx, h1, h2, h3, h4, h5, h6) waitForClusterState(context.Background(), clusterID, models.ClusterStatusInstalling, 3*time.Minute, IgnoreStateInfo) getHostRole := func(id strfmt.UUID) models.HostRole { var reply *installer.V2GetHostOK - reply, err = userBMClient.Installer.V2GetHost(ctx, &installer.V2GetHostParams{ + reply, err = utils_test.TestContext.UserBMClient.Installer.V2GetHost(ctx, &installer.V2GetHostParams{ InfraEnvID: *infraEnvID, HostID: id, }) @@ -1251,7 +1252,7 @@ var _ = Describe("cluster install", func() { Expect(getHostRole(*h6.ID)).Should(Equal(models.HostRoleWorker)) Expect(getHostRole(*h4.ID)).Should(Equal(models.HostRoleMaster)) Expect(getHostRole(*h5.ID)).Should(Equal(models.HostRoleMaster)) - getReply, err := userBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) + getReply, err := utils_test.TestContext.UserBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) Expect(err).NotTo(HaveOccurred()) mastersCount := 0 workersCount := 0 @@ -1267,7 +1268,7 @@ var _ = Describe("cluster install", func() { Expect(workersCount).Should(Equal(3)) By("check auto-assign usage report") - verifyUsageSet(getReply.Payload.FeatureUsage, + utils_test.VerifyUsageSet(getReply.Payload.FeatureUsage, models.Usage{Name: usage.AutoAssignRoleUsage}) }) @@ -1279,16 +1280,16 @@ var _ = Describe("cluster install", func() { hosts := make([]*models.Host, 6) for i := 0; i < 3; i++ { - hwInventory := getDefaultInventory(ips[i]) + hwInventory := utils_test.GetDefaultInventory(ips[i]) hwInventory.CPU.Flags = []string{"vmx"} - hosts[i] = ®isterHost(*infraEnvID).Host - generateEssentialHostStepsWithInventory(ctx, hosts[i], fmt.Sprintf("hhh%d", i+1), hwInventory) + hosts[i] = &utils_test.TestContext.RegisterHost(*infraEnvID).Host + utils_test.TestContext.GenerateEssentialHostStepsWithInventory(ctx, hosts[i], fmt.Sprintf("hhh%d", i+1), hwInventory) } - updateVipParams(ctx, clusterID) + utils_test.TestContext.UpdateVipParams(ctx, clusterID) generateFullMeshConnectivity(ctx, ips[0], hosts[0], hosts[1], hosts[2]) - waitForHostState(ctx, models.HostStatusKnown, defaultWaitForHostStateTimeout, hosts[0], hosts[1], hosts[2]) - waitForClusterState(ctx, clusterID, models.ClusterStatusReady, defaultWaitForClusterStateTimeout, + waitForHostState(ctx, models.HostStatusKnown, utils_test.DefaultWaitForHostStateTimeout, hosts[0], hosts[1], hosts[2]) + waitForClusterState(ctx, clusterID, models.ClusterStatusReady, utils_test.DefaultWaitForClusterStateTimeout, IgnoreStateInfo) By("add three more hosts with minimal master inventory expect the cluster to be ready") @@ -1296,12 +1297,12 @@ var _ = Describe("cluster install", func() { minHwInventory := getMinimalMasterInventory(ips[i]) minHwInventory.CPU.Flags = []string{"vmx"} minHwInventory.CPU.Count += 1 - hosts[i] = ®isterHost(*infraEnvID).Host - generateEssentialHostStepsWithInventory(ctx, hosts[i], fmt.Sprintf("hhh%d", i+1), minHwInventory) + hosts[i] = &utils_test.TestContext.RegisterHost(*infraEnvID).Host + utils_test.TestContext.GenerateEssentialHostStepsWithInventory(ctx, hosts[i], fmt.Sprintf("hhh%d", i+1), minHwInventory) } generateFullMeshConnectivity(ctx, ips[0], hosts...) - waitForHostState(ctx, models.HostStatusKnown, defaultWaitForHostStateTimeout, hosts...) - waitForClusterState(ctx, clusterID, models.ClusterStatusReady, defaultWaitForClusterStateTimeout, + waitForHostState(ctx, models.HostStatusKnown, utils_test.DefaultWaitForHostStateTimeout, hosts...) + waitForClusterState(ctx, clusterID, models.ClusterStatusReady, utils_test.DefaultWaitForClusterStateTimeout, IgnoreStateInfo) By("expect h4, h5 and h6 to be auto-assigned as masters") @@ -1310,7 +1311,7 @@ var _ = Describe("cluster install", func() { Expect(getSuggestedRole(*hosts[5].ID)).Should(Equal(models.HostRoleMaster)) By("add cnv operators") - _, err := userBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ + _, err := utils_test.TestContext.UserBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ ClusterID: clusterID, ClusterUpdateParams: &models.V2ClusterUpdateParams{ OlmOperators: []*models.OperatorCreateParams{ @@ -1319,8 +1320,8 @@ var _ = Describe("cluster install", func() { }, }) Expect(err).ToNot(HaveOccurred()) - waitForHostState(ctx, models.HostStatusKnown, 3*defaultWaitForHostStateTimeout, hosts...) - waitForClusterState(ctx, clusterID, models.ClusterStatusReady, defaultWaitForClusterStateTimeout, + waitForHostState(ctx, models.HostStatusKnown, 3*utils_test.DefaultWaitForHostStateTimeout, hosts...) + waitForClusterState(ctx, clusterID, models.ClusterStatusReady, utils_test.DefaultWaitForClusterStateTimeout, IgnoreStateInfo) By("expect h1, h2 and h3 to be auto-assigned as masters") @@ -1332,28 +1333,28 @@ var _ = Describe("cluster install", func() { It("Schedulable masters", func() { By("register 3 hosts all with master hw information cluster expected to be ready") clusterID := *cluster.ID - hosts, ips := register3nodes(ctx, clusterID, *infraEnvID, defaultCIDRv4) + hosts, ips := utils_test.TestContext.Register3nodes(ctx, clusterID, *infraEnvID, defaultCIDRv4) h1, h2, h3 := hosts[0], hosts[1], hosts[2] for _, h := range hosts { - generateDomainResolution(ctx, h, "test-cluster", "example.com") + utils_test.TestContext.GenerateDomainResolution(ctx, h, "test-cluster", "example.com") } - waitForClusterState(ctx, clusterID, models.ClusterStatusReady, defaultWaitForClusterStateTimeout, + waitForClusterState(ctx, clusterID, models.ClusterStatusReady, utils_test.DefaultWaitForClusterStateTimeout, IgnoreStateInfo) By("add two more hosts with worker inventory expect the cluster to be ready") - h4 := ®isterHost(*infraEnvID).Host - h5 := ®isterHost(*infraEnvID).Host + h4 := &utils_test.TestContext.RegisterHost(*infraEnvID).Host + h5 := &utils_test.TestContext.RegisterHost(*infraEnvID).Host newIPs := hostutil.GenerateIPv4Addresses(2, ips[2]) - generateEssentialHostStepsWithInventory(ctx, h4, "h4", getValidWorkerHwInfoWithCIDR(newIPs[0])) - generateEssentialHostStepsWithInventory(ctx, h5, "h5", getValidWorkerHwInfoWithCIDR(newIPs[1])) - generateDomainResolution(ctx, h4, "test-cluster", "example.com") - generateDomainResolution(ctx, h5, "test-cluster", "example.com") + utils_test.TestContext.GenerateEssentialHostStepsWithInventory(ctx, h4, "h4", getValidWorkerHwInfoWithCIDR(newIPs[0])) + utils_test.TestContext.GenerateEssentialHostStepsWithInventory(ctx, h5, "h5", getValidWorkerHwInfoWithCIDR(newIPs[1])) + utils_test.TestContext.GenerateDomainResolution(ctx, h4, "test-cluster", "example.com") + utils_test.TestContext.GenerateDomainResolution(ctx, h5, "test-cluster", "example.com") generateFullMeshConnectivity(ctx, ips[0], h1, h2, h3, h4, h5) - waitForHostState(ctx, models.HostStatusKnown, defaultWaitForHostStateTimeout, h4, h5) - waitForClusterState(ctx, clusterID, models.ClusterStatusReady, defaultWaitForClusterStateTimeout, + waitForHostState(ctx, models.HostStatusKnown, utils_test.DefaultWaitForHostStateTimeout, h4, h5) + waitForClusterState(ctx, clusterID, models.ClusterStatusReady, utils_test.DefaultWaitForClusterStateTimeout, IgnoreStateInfo) - updateClusterReply, err := userBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ + updateClusterReply, err := utils_test.TestContext.UserBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ ClusterUpdateParams: &models.V2ClusterUpdateParams{ SchedulableMasters: swag.Bool(true), }, @@ -1365,20 +1366,20 @@ var _ = Describe("cluster install", func() { Expect(updateClusterReply.Payload.Hosts[i].RequestedHostname).Should(Not(BeEmpty())) } - waitForClusterState(ctx, clusterID, models.ClusterStatusReady, defaultWaitForClusterStateTimeout, + waitForClusterState(ctx, clusterID, models.ClusterStatusReady, utils_test.DefaultWaitForClusterStateTimeout, IgnoreStateInfo) By("start installation") - _, err = userBMClient.Installer.V2InstallCluster(ctx, &installer.V2InstallClusterParams{ClusterID: clusterID}) + _, err = utils_test.TestContext.UserBMClient.Installer.V2InstallCluster(ctx, &installer.V2InstallClusterParams{ClusterID: clusterID}) Expect(err).NotTo(HaveOccurred()) - generateEssentialPrepareForInstallationSteps(ctx, h1, h2, h3, h4, h5) + utils_test.TestContext.GenerateEssentialPrepareForInstallationSteps(ctx, h1, h2, h3, h4, h5) waitForClusterState(context.Background(), clusterID, models.ClusterStatusInstalling, 3*time.Minute, IgnoreStateInfo) }) Context("usage", func() { It("report usage on default features with SNO", func() { - registerClusterReply, err := userBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ + registerClusterReply, err := utils_test.TestContext.UserBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ NewClusterParams: &models.ClusterCreateParams{ BaseDNSDomain: "example.com", ClusterNetworks: []*models.ClusterNetwork{{Cidr: models.Subnet(clusterCIDR), HostPrefix: 23}}, @@ -1386,7 +1387,7 @@ var _ = Describe("cluster install", func() { Name: swag.String("sno-cluster"), OpenshiftVersion: swag.String(snoVersion), PullSecret: swag.String(pullSecret), - SSHPublicKey: sshPublicKey, + SSHPublicKey: utils_test.SshPublicKey, VipDhcpAllocation: swag.Bool(false), NetworkType: swag.String("OVNKubernetes"), HighAvailabilityMode: swag.String(models.ClusterHighAvailabilityModeNone), @@ -1394,13 +1395,13 @@ var _ = Describe("cluster install", func() { }) Expect(err).NotTo(HaveOccurred()) cluster = registerClusterReply.GetPayload() - getReply, err := userBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: *cluster.ID}) + getReply, err := utils_test.TestContext.UserBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: *cluster.ID}) Expect(err).NotTo(HaveOccurred()) log.Infof("usage after create: %s\n", getReply.Payload.FeatureUsage) - verifyUsageSet(getReply.Payload.FeatureUsage, + utils_test.VerifyUsageSet(getReply.Payload.FeatureUsage, models.Usage{Name: usage.HighAvailabilityModeUsage}, models.Usage{Name: usage.HyperthreadingUsage, Data: map[string]interface{}{"hyperthreading_enabled": models.ClusterHyperthreadingAll}}) - verifyUsageNotSet(getReply.Payload.FeatureUsage, + utils_test.VerifyUsageNotSet(getReply.Payload.FeatureUsage, strings.ToUpper("console"), usage.VipDhcpAllocationUsage, usage.CPUArchitectureARM64, @@ -1411,7 +1412,7 @@ var _ = Describe("cluster install", func() { It("report usage on update cluster", func() { clusterID := *cluster.ID - h := ®isterHost(*infraEnvID).Host + h := &utils_test.TestContext.RegisterHost(*infraEnvID).Host inventory, err := common.UnmarshalInventory(defaultInventory()) Expect(err).ToNot(HaveOccurred()) inventory.SystemVendor.Virtual = true @@ -1424,7 +1425,7 @@ var _ = Describe("cluster install", func() { ovn := "OVNKubernetes" hostname := "h1" hyperthreading := "none" - _, err = userBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ HostUpdateParams: &models.HostUpdateParams{ HostName: &hostname, }, @@ -1432,7 +1433,7 @@ var _ = Describe("cluster install", func() { InfraEnvID: h.InfraEnvID, }) Expect(err).NotTo(HaveOccurred()) - _, err = userBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ ClusterUpdateParams: &models.V2ClusterUpdateParams{ VipDhcpAllocation: swag.Bool(false), AdditionalNtpSource: &ntpSources, @@ -1446,14 +1447,14 @@ var _ = Describe("cluster install", func() { ClusterID: clusterID, }) Expect(err).ShouldNot(HaveOccurred()) - getReply, err := userBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) + getReply, err := utils_test.TestContext.UserBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) Expect(err).NotTo(HaveOccurred()) - verifyUsageNotSet(getReply.Payload.FeatureUsage, + utils_test.VerifyUsageNotSet(getReply.Payload.FeatureUsage, usage.SDNNetworkTypeUsage, usage.DualStackUsage, usage.DualStackVipsUsage, usage.HyperthreadingUsage) - verifyUsageSet(getReply.Payload.FeatureUsage, + utils_test.VerifyUsageSet(getReply.Payload.FeatureUsage, models.Usage{Name: usage.OVNNetworkTypeUsage}, models.Usage{ Name: usage.ClusterManagedNetworkWithVMs, @@ -1489,7 +1490,7 @@ var _ = Describe("cluster install", func() { Context("dual-stack usage", func() { It("report usage new dual-stack cluster", func() { - registerClusterReply, err := userBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ + registerClusterReply, err := utils_test.TestContext.UserBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ NewClusterParams: &models.ClusterCreateParams{ APIVips: []*models.APIVip{{IP: "1.2.3.8"}}, IngressVips: []*models.IngressVip{{IP: "1.2.3.9"}}, @@ -1506,7 +1507,7 @@ var _ = Describe("cluster install", func() { Name: swag.String("sno-cluster"), OpenshiftVersion: swag.String(snoVersion), PullSecret: swag.String(pullSecret), - SSHPublicKey: sshPublicKey, + SSHPublicKey: utils_test.SshPublicKey, VipDhcpAllocation: swag.Bool(false), NetworkType: swag.String("OVNKubernetes"), HighAvailabilityMode: swag.String(models.ClusterHighAvailabilityModeNone), @@ -1514,13 +1515,13 @@ var _ = Describe("cluster install", func() { }) Expect(err).NotTo(HaveOccurred()) cluster = registerClusterReply.GetPayload() - getReply, err := userBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: *cluster.ID}) + getReply, err := utils_test.TestContext.UserBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: *cluster.ID}) Expect(err).NotTo(HaveOccurred()) log.Infof("usage after create: %s\n", getReply.Payload.FeatureUsage) - verifyUsageSet(getReply.Payload.FeatureUsage, + utils_test.VerifyUsageSet(getReply.Payload.FeatureUsage, models.Usage{Name: usage.HighAvailabilityModeUsage}, models.Usage{Name: usage.DualStackUsage}) - verifyUsageNotSet(getReply.Payload.FeatureUsage, + utils_test.VerifyUsageNotSet(getReply.Payload.FeatureUsage, strings.ToUpper("console"), usage.VipDhcpAllocationUsage, usage.CPUArchitectureARM64, @@ -1529,7 +1530,7 @@ var _ = Describe("cluster install", func() { }) It("report usage new dual-stack cluster with dual-stack VIPs", func() { - registerClusterReply, err := userBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ + registerClusterReply, err := utils_test.TestContext.UserBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ NewClusterParams: &models.ClusterCreateParams{ APIVips: []*models.APIVip{{IP: "1.2.3.8"}, {IP: "1001:db8::8"}}, IngressVips: []*models.IngressVip{{IP: "1.2.3.9"}, {IP: "1001:db8::9"}}, @@ -1546,7 +1547,7 @@ var _ = Describe("cluster install", func() { Name: swag.String("sno-cluster"), OpenshiftVersion: swag.String(dualstackVipsOpenShiftVersion), PullSecret: swag.String(pullSecret), - SSHPublicKey: sshPublicKey, + SSHPublicKey: utils_test.SshPublicKey, VipDhcpAllocation: swag.Bool(false), NetworkType: swag.String("OVNKubernetes"), HighAvailabilityMode: swag.String(models.ClusterHighAvailabilityModeNone), @@ -1554,14 +1555,14 @@ var _ = Describe("cluster install", func() { }) Expect(err).NotTo(HaveOccurred()) cluster = registerClusterReply.GetPayload() - getReply, err := userBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: *cluster.ID}) + getReply, err := utils_test.TestContext.UserBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: *cluster.ID}) Expect(err).NotTo(HaveOccurred()) log.Infof("usage after create: %s\n", getReply.Payload.FeatureUsage) - verifyUsageSet(getReply.Payload.FeatureUsage, + utils_test.VerifyUsageSet(getReply.Payload.FeatureUsage, models.Usage{Name: usage.HighAvailabilityModeUsage}, models.Usage{Name: usage.DualStackUsage}, models.Usage{Name: usage.DualStackVipsUsage}) - verifyUsageNotSet(getReply.Payload.FeatureUsage, + utils_test.VerifyUsageNotSet(getReply.Payload.FeatureUsage, strings.ToUpper("console"), usage.VipDhcpAllocationUsage, usage.CPUArchitectureARM64, @@ -1570,7 +1571,7 @@ var _ = Describe("cluster install", func() { It("unset dual-stack usage on update cluster", func() { clusterID := *cluster.ID - _, err := userBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ + _, err := utils_test.TestContext.UserBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ ClusterUpdateParams: &models.V2ClusterUpdateParams{ VipDhcpAllocation: swag.Bool(false), NetworkType: swag.String("OVNKubernetes"), @@ -1582,9 +1583,9 @@ var _ = Describe("cluster install", func() { ClusterID: clusterID, }) Expect(err).ShouldNot(HaveOccurred()) - getReply, err := userBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) + getReply, err := utils_test.TestContext.UserBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) Expect(err).NotTo(HaveOccurred()) - verifyUsageNotSet(getReply.Payload.FeatureUsage, + utils_test.VerifyUsageNotSet(getReply.Payload.FeatureUsage, usage.VipDhcpAllocationUsage, usage.SDNNetworkTypeUsage, usage.DualStackUsage, @@ -1598,8 +1599,8 @@ var _ = Describe("cluster install", func() { clusterID := *cluster.ID apiVip := "1.2.3.8" ingressVip := "1.2.3.100" - _ = registerNode(ctx, *infraEnvID, "test-host", defaultCIDRv4) - c, err := userBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ + _ = utils_test.TestContext.RegisterNode(ctx, *infraEnvID, "test-host", defaultCIDRv4) + c, err := utils_test.TestContext.UserBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ ClusterUpdateParams: &models.V2ClusterUpdateParams{ APIVips: []*models.APIVip{{IP: models.IP(apiVip), ClusterID: clusterID}}, IngressVips: []*models.IngressVip{{IP: models.IP(ingressVip), ClusterID: clusterID}}, @@ -1618,8 +1619,8 @@ var _ = Describe("cluster install", func() { clusterID := *cluster.ID apiVip := "1.2.3.8" ingressVip := "1.2.3.100" - host := registerNode(ctx, *infraEnvID, "test-host", defaultCIDRv4) - c, err := userBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ + host := utils_test.TestContext.RegisterNode(ctx, *infraEnvID, "test-host", defaultCIDRv4) + c, err := utils_test.TestContext.UserBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ ClusterUpdateParams: &models.V2ClusterUpdateParams{ APIVips: []*models.APIVip{{IP: models.IP(apiVip), ClusterID: clusterID}}, IngressVips: []*models.IngressVip{{IP: models.IP(ingressVip), ClusterID: clusterID}}, @@ -1631,19 +1632,19 @@ var _ = Describe("cluster install", func() { Expect(string(c.Payload.APIVips[0].IP)).Should(Equal(apiVip)) Expect(string(c.Payload.IngressVips[0].IP)).To(Equal(ingressVip)) Expect(waitForMachineNetworkCIDR( - ctx, clusterID, "1.2.3.0/24", defaultWaitForMachineNetworkCIDRTimeout)).ShouldNot(HaveOccurred()) - _, err1 := userBMClient.Installer.V2DeregisterHost(ctx, &installer.V2DeregisterHostParams{ + ctx, clusterID, "1.2.3.0/24", utils_test.DefaultWaitForMachineNetworkCIDRTimeout)).ShouldNot(HaveOccurred()) + _, err1 := utils_test.TestContext.UserBMClient.Installer.V2DeregisterHost(ctx, &installer.V2DeregisterHostParams{ InfraEnvID: *infraEnvID, HostID: *host.ID, }) Expect(err1).ShouldNot(HaveOccurred()) Expect(waitForMachineNetworkCIDR( - ctx, clusterID, "", defaultWaitForMachineNetworkCIDRTimeout)).ShouldNot(HaveOccurred()) + ctx, clusterID, "", utils_test.DefaultWaitForMachineNetworkCIDRTimeout)).ShouldNot(HaveOccurred()) }) It("MachineNetworkCIDR no vips - no allocation", func() { clusterID := *cluster.ID - c, err := userBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ + c, err := utils_test.TestContext.UserBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ ClusterUpdateParams: &models.V2ClusterUpdateParams{ VipDhcpAllocation: swag.Bool(false), }, @@ -1654,10 +1655,10 @@ var _ = Describe("cluster install", func() { Expect(len(c.Payload.APIVips)).Should(Equal(0)) Expect(len(c.Payload.IngressVips)).Should(Equal(0)) Expect(c.Payload.MachineNetworks).Should(BeEmpty()) - _ = registerNode(ctx, *infraEnvID, "test-host", defaultCIDRv4) + _ = utils_test.TestContext.RegisterNode(ctx, *infraEnvID, "test-host", defaultCIDRv4) Expect(waitForMachineNetworkCIDR( - ctx, clusterID, "1.2.3.0/24", defaultWaitForMachineNetworkCIDRTimeout)).Should(HaveOccurred()) - c1, err1 := userBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) + ctx, clusterID, "1.2.3.0/24", utils_test.DefaultWaitForMachineNetworkCIDRTimeout)).Should(HaveOccurred()) + c1, err1 := utils_test.TestContext.UserBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) Expect(err1).NotTo(HaveOccurred()) Expect(c1.Payload.MachineNetworks).Should(BeEmpty()) }) @@ -1665,7 +1666,7 @@ var _ = Describe("cluster install", func() { It("MachineNetworkCIDR no hosts - no allocation", func() { clusterID := *cluster.ID apiVip := "1.2.3.8" - _, err := userBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ + _, err := utils_test.TestContext.UserBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ ClusterUpdateParams: &models.V2ClusterUpdateParams{ APIVips: []*models.APIVip{{IP: models.IP(apiVip), ClusterID: clusterID}}, VipDhcpAllocation: swag.Bool(false), @@ -1674,8 +1675,8 @@ var _ = Describe("cluster install", func() { }) Expect(err).To(HaveOccurred()) Expect(waitForMachineNetworkCIDR( - ctx, clusterID, "1.2.3.0/24", defaultWaitForMachineNetworkCIDRTimeout)).Should(HaveOccurred()) - c1, err1 := userBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) + ctx, clusterID, "1.2.3.0/24", utils_test.DefaultWaitForMachineNetworkCIDRTimeout)).Should(HaveOccurred()) + c1, err1 := utils_test.TestContext.UserBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) Expect(err1).NotTo(HaveOccurred()) Expect(c1.Payload.MachineNetworks).Should(BeEmpty()) }) @@ -1689,7 +1690,7 @@ var _ = Describe("cluster install", func() { BeforeEach(func() { clusterID = *cluster.ID registerHostsAndSetRoles(clusterID, *infraEnvID, 6, cluster.Name, cluster.BaseDNSDomain) - cluster = getCluster(clusterID) + cluster = utils_test.TestContext.GetCluster(clusterID) for _, h := range cluster.Hosts { if h.Role == models.HostRoleWorker { misbehavingHostID = *h.ID @@ -1703,45 +1704,45 @@ var _ = Describe("cluster install", func() { It(fmt.Sprintf("full flow with single host in stage %s", string(stage)), func() { By("installing cluster", func() { installCluster(clusterID) - cluster = getCluster(clusterID) + cluster = utils_test.TestContext.GetCluster(clusterID) }) By("move cluster to finalizing", func() { for _, h := range cluster.Hosts { if *h.ID != misbehavingHostID { - updateProgress(*h.ID, h.InfraEnvID, models.HostStageDone) + utils_test.TestContext.UpdateProgress(*h.ID, h.InfraEnvID, models.HostStageDone) } else { - updateProgress(*h.ID, h.InfraEnvID, models.HostStageRebooting) + utils_test.TestContext.UpdateProgress(*h.ID, h.InfraEnvID, models.HostStageRebooting) } } - waitForClusterState(ctx, clusterID, models.ClusterStatusFinalizing, defaultWaitForClusterStateTimeout, + waitForClusterState(ctx, clusterID, models.ClusterStatusFinalizing, utils_test.DefaultWaitForClusterStateTimeout, IgnoreStateInfo) }) By("complete installation. state should be still finalizing", func() { - completeInstallation(agentBMClient, clusterID) - waitForClusterState(ctx, clusterID, models.ClusterStatusFinalizing, defaultWaitForClusterStateTimeout, + completeInstallation(utils_test.TestContext.AgentBMClient, clusterID) + waitForClusterState(ctx, clusterID, models.ClusterStatusFinalizing, utils_test.DefaultWaitForClusterStateTimeout, IgnoreStateInfo) }) By("register host. move to pending-user-action", func() { - c1 := getCluster(clusterID) - _ = registerHostByUUID(*infraEnvID, misbehavingHostID) - waitForClusterState(ctx, clusterID, models.ClusterStatusInstallingPendingUserAction, defaultWaitForClusterStateTimeout, + c1 := utils_test.TestContext.GetCluster(clusterID) + _ = utils_test.TestContext.RegisterHostByUUID(*infraEnvID, misbehavingHostID) + waitForClusterState(ctx, clusterID, models.ClusterStatusInstallingPendingUserAction, utils_test.DefaultWaitForClusterStateTimeout, IgnoreStateInfo) - host := getHostV2(*infraEnvID, misbehavingHostID) + host := utils_test.TestContext.GetHostV2(*infraEnvID, misbehavingHostID) Expect(swag.StringValue(host.Status)).To(Equal(models.HostStatusInstallingPendingUserAction)) - c2 := getCluster(clusterID) + c2 := utils_test.TestContext.GetCluster(clusterID) Expect(c1.Progress.TotalPercentage).To(BeNumerically("<=", c2.Progress.TotalPercentage)) }) By("move to configuring. cluster should be back in finalizing", func() { - updateProgress(misbehavingHostID, *infraEnvID, models.HostStageConfiguring) - waitForClusterState(ctx, clusterID, models.ClusterStatusFinalizing, defaultWaitForClusterStateTimeout, + utils_test.TestContext.UpdateProgress(misbehavingHostID, *infraEnvID, models.HostStageConfiguring) + waitForClusterState(ctx, clusterID, models.ClusterStatusFinalizing, utils_test.DefaultWaitForClusterStateTimeout, IgnoreStateInfo) }) By(fmt.Sprintf("update host to to stage %s. Cluster should be installed", string(stage)), func() { - updateProgress(misbehavingHostID, *infraEnvID, stage) - waitForClusterState(ctx, clusterID, models.ClusterStatusInstalled, defaultWaitForClusterStateTimeout, + utils_test.TestContext.UpdateProgress(misbehavingHostID, *infraEnvID, stage) + waitForClusterState(ctx, clusterID, models.ClusterStatusInstalled, utils_test.DefaultWaitForClusterStateTimeout, IgnoreStateInfo) }) }) @@ -1757,12 +1758,12 @@ var _ = Describe("cluster install", func() { Context("NTP cases", func() { It("Update NTP source", func() { - c, err := userBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) + c, err := utils_test.TestContext.UserBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) Expect(err).NotTo(HaveOccurred()) hosts := c.GetPayload().Hosts By("Verify NTP step", func() { - step := getStepFromListByStepType(getNextSteps(*infraEnvID, *hosts[0].ID), models.StepTypeNtpSynchronizer) + step := utils_test.GetStepFromListByStepType(utils_test.TestContext.GetNextSteps(*infraEnvID, *hosts[0].ID), models.StepTypeNtpSynchronizer) Expect(step).ShouldNot(BeNil()) requestStr := step.Args[len(step.Args)-1] @@ -1775,7 +1776,7 @@ var _ = Describe("cluster install", func() { By("Update NTP source", func() { newSource := "5.5.5.5" - reply, err := userBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ + reply, err := utils_test.TestContext.UserBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ ClusterUpdateParams: &models.V2ClusterUpdateParams{ AdditionalNtpSource: &newSource, }, @@ -1784,13 +1785,13 @@ var _ = Describe("cluster install", func() { Expect(err).ShouldNot(HaveOccurred()) Expect(reply.Payload.AdditionalNtpSource).Should(Equal(newSource)) - step := getStepFromListByStepType(getNextSteps(*infraEnvID, *hosts[0].ID), models.StepTypeNtpSynchronizer) + step := utils_test.GetStepFromListByStepType(utils_test.TestContext.GetNextSteps(*infraEnvID, *hosts[0].ID), models.StepTypeNtpSynchronizer) Expect(step).ShouldNot(BeNil()) requestStr := step.Args[len(step.Args)-1] var ntpRequest models.NtpSynchronizationRequest - generateDomainNameResolutionReply(ctx, hosts[0], *common.TestDomainNameResolutionsSuccess) + utils_test.TestContext.GenerateDomainNameResolutionReply(ctx, hosts[0], *common.TestDomainNameResolutionsSuccess) Expect(json.Unmarshal([]byte(requestStr), &ntpRequest)).ShouldNot(HaveOccurred()) Expect(*ntpRequest.NtpSource).Should(Equal(newSource)) @@ -1800,22 +1801,22 @@ var _ = Describe("cluster install", func() { It("Unsynced host", func() { Skip("IsNTPSynced isn't mandatory validation for host isSufficientForInstall") - c, err := userBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) + c, err := utils_test.TestContext.UserBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) Expect(err).NotTo(HaveOccurred()) hosts := c.GetPayload().Hosts By("unsync", func() { - generateNTPPostStepReply(ctx, hosts[0], []*models.NtpSource{ + utils_test.TestContext.GenerateNTPPostStepReply(ctx, hosts[0], []*models.NtpSource{ {SourceName: common.TestNTPSourceSynced.SourceName, SourceState: models.SourceStateUnreachable}, }) - generateDomainNameResolutionReply(ctx, hosts[0], *common.TestDomainNameResolutionsSuccess) - waitForHostState(ctx, models.HostStatusInsufficient, defaultWaitForHostStateTimeout, hosts[0]) + utils_test.TestContext.GenerateDomainNameResolutionReply(ctx, hosts[0], *common.TestDomainNameResolutionsSuccess) + waitForHostState(ctx, models.HostStatusInsufficient, utils_test.DefaultWaitForHostStateTimeout, hosts[0]) }) By("Set new NTP source", func() { newSource := "5.5.5.5" - _, err := userBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ + _, err := utils_test.TestContext.UserBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ ClusterUpdateParams: &models.V2ClusterUpdateParams{ AdditionalNtpSource: &newSource, }, @@ -1823,25 +1824,25 @@ var _ = Describe("cluster install", func() { }) Expect(err).ShouldNot(HaveOccurred()) - generateNTPPostStepReply(ctx, hosts[0], []*models.NtpSource{ + utils_test.TestContext.GenerateNTPPostStepReply(ctx, hosts[0], []*models.NtpSource{ {SourceName: common.TestNTPSourceSynced.SourceName, SourceState: models.SourceStateUnreachable}, {SourceName: newSource, SourceState: models.SourceStateSynced}, }) }) - generateDomainNameResolutionReply(ctx, hosts[0], *common.TestDomainNameResolutionsSuccess) + utils_test.TestContext.GenerateDomainNameResolutionReply(ctx, hosts[0], *common.TestDomainNameResolutionsSuccess) - waitForHostState(ctx, models.HostStatusKnown, defaultWaitForHostStateTimeout, hosts[0]) + waitForHostState(ctx, models.HostStatusKnown, utils_test.DefaultWaitForHostStateTimeout, hosts[0]) }) }) It("register host while installing", func() { installCluster(clusterID) - waitForClusterState(ctx, clusterID, models.ClusterStatusInstalling, defaultWaitForClusterStateTimeout, + waitForClusterState(ctx, clusterID, models.ClusterStatusInstalling, utils_test.DefaultWaitForClusterStateTimeout, IgnoreStateInfo) - _, err := agentBMClient.Installer.V2RegisterHost(context.Background(), &installer.V2RegisterHostParams{ + _, err := utils_test.TestContext.AgentBMClient.Installer.V2RegisterHost(context.Background(), &installer.V2RegisterHostParams{ InfraEnvID: *infraEnvID, NewHostParams: &models.HostCreateParams{ - HostID: strToUUID(uuid.New().String()), + HostID: utils_test.StrToUUID(uuid.New().String()), }, }) Expect(err).To(BeAssignableToTypeOf(installer.NewV2RegisterHostConflict())) @@ -1850,31 +1851,31 @@ var _ = Describe("cluster install", func() { It("register host while cluster in error state", func() { FailCluster(ctx, clusterID, *infraEnvID, masterFailure) //Wait for cluster to get to error state - waitForClusterState(ctx, clusterID, models.ClusterStatusError, defaultWaitForClusterStateTimeout, + waitForClusterState(ctx, clusterID, models.ClusterStatusError, utils_test.DefaultWaitForClusterStateTimeout, IgnoreStateInfo) - _, err := agentBMClient.Installer.V2RegisterHost(context.Background(), &installer.V2RegisterHostParams{ + _, err := utils_test.TestContext.AgentBMClient.Installer.V2RegisterHost(context.Background(), &installer.V2RegisterHostParams{ InfraEnvID: *infraEnvID, NewHostParams: &models.HostCreateParams{ - HostID: strToUUID(uuid.New().String()), + HostID: utils_test.StrToUUID(uuid.New().String()), }, }) Expect(err).To(BeAssignableToTypeOf(installer.NewV2RegisterHostConflict())) }) It("triggering cluster install if not in appropriate state should leave last preparation status intact", func() { - clusterInstallationReply, err := userBMClient.Installer.V2InstallCluster(ctx, &installer.V2InstallClusterParams{ClusterID: clusterID}) + clusterInstallationReply, err := utils_test.TestContext.UserBMClient.Installer.V2InstallCluster(ctx, &installer.V2InstallClusterParams{ClusterID: clusterID}) Expect(err).NotTo(HaveOccurred()) c := clusterInstallationReply.GetPayload() Expect(*c.Status).Should(Equal(models.ClusterStatusPreparingForInstallation)) - generateEssentialPrepareForInstallationSteps(ctx, c.Hosts...) - waitForLastInstallationCompletionStatus(clusterID, models.LastInstallationPreparationStatusSuccess) - waitForClusterState(ctx, clusterID, models.ClusterStatusInstalling, defaultWaitForClusterStateTimeout, IgnoreStateInfo) + utils_test.TestContext.GenerateEssentialPrepareForInstallationSteps(ctx, c.Hosts...) + utils_test.TestContext.WaitForLastInstallationCompletionStatus(clusterID, models.LastInstallationPreparationStatusSuccess) + waitForClusterState(ctx, clusterID, models.ClusterStatusInstalling, utils_test.DefaultWaitForClusterStateTimeout, IgnoreStateInfo) - _, err = userBMClient.Installer.V2InstallCluster(ctx, &installer.V2InstallClusterParams{ClusterID: clusterID}) + _, err = utils_test.TestContext.UserBMClient.Installer.V2InstallCluster(ctx, &installer.V2InstallClusterParams{ClusterID: clusterID}) Expect(err).To(HaveOccurred()) // MGMT-19217: The LastInstallationPreparation fields should not have been changed by handling of the additional (rejected) install request. - getClusterReply, err := userBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) + getClusterReply, err := utils_test.TestContext.UserBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) Expect(err).NotTo(HaveOccurred()) c = getClusterReply.GetPayload() Expect(c.LastInstallationPreparation.Status).To(Equal(models.LastInstallationPreparationStatusSuccess)) @@ -1884,21 +1885,21 @@ var _ = Describe("cluster install", func() { It("fail installation if there is only a single worker that manages to install", func() { FailCluster(ctx, clusterID, *infraEnvID, workerFailure) //Wait for cluster to get to error state - waitForClusterState(ctx, clusterID, models.ClusterStatusError, defaultWaitForClusterStateTimeout, + waitForClusterState(ctx, clusterID, models.ClusterStatusError, utils_test.DefaultWaitForClusterStateTimeout, IgnoreStateInfo) }) It("register existing host while cluster in installing state", func() { c := installCluster(clusterID) hostID := c.Hosts[0].ID - _, err := agentBMClient.Installer.V2RegisterHost(context.Background(), &installer.V2RegisterHostParams{ + _, err := utils_test.TestContext.AgentBMClient.Installer.V2RegisterHost(context.Background(), &installer.V2RegisterHostParams{ InfraEnvID: *infraEnvID, NewHostParams: &models.HostCreateParams{ HostID: hostID, }, }) Expect(err).To(BeNil()) - host := getHostV2(*infraEnvID, *hostID) + host := utils_test.TestContext.GetHostV2(*infraEnvID, *hostID) Expect(*host.Status).To(Equal("error")) }) @@ -1906,13 +1907,13 @@ var _ = Describe("cluster install", func() { c := installCluster(clusterID) hostID := c.Hosts[0].ID - Expect(isStepTypeInList(getNextSteps(*infraEnvID, *hostID), models.StepTypeInstall)).Should(BeTrue()) + Expect(utils_test.IsStepTypeInList(utils_test.TestContext.GetNextSteps(*infraEnvID, *hostID), models.StepTypeInstall)).Should(BeTrue()) installProgress := models.HostStageRebooting - updateProgress(*hostID, *infraEnvID, installProgress) + utils_test.TestContext.UpdateProgress(*hostID, *infraEnvID, installProgress) By("Verify the db has been updated", func() { - hostInDb := getHostV2(*infraEnvID, *hostID) + hostInDb := utils_test.TestContext.GetHostV2(*infraEnvID, *hostID) Expect(*hostInDb.Status).Should(Equal(models.HostStatusInstallingInProgress)) Expect(*hostInDb.StatusInfo).Should(Equal(string(installProgress))) Expect(hostInDb.InstallationDiskID).ShouldNot(BeEmpty()) @@ -1921,38 +1922,38 @@ var _ = Describe("cluster install", func() { }) By("Try to register", func() { - _, err := agentBMClient.Installer.V2RegisterHost(context.Background(), &installer.V2RegisterHostParams{ + _, err := utils_test.TestContext.AgentBMClient.Installer.V2RegisterHost(context.Background(), &installer.V2RegisterHostParams{ InfraEnvID: *infraEnvID, NewHostParams: &models.HostCreateParams{ HostID: hostID, }, }) Expect(err).To(BeNil()) - hostInDb := getHostV2(*infraEnvID, *hostID) + hostInDb := utils_test.TestContext.GetHostV2(*infraEnvID, *hostID) Expect(*hostInDb.Status).Should(Equal(models.HostStatusInstallingPendingUserAction)) waitForClusterState( ctx, clusterID, models.ClusterStatusInstallingPendingUserAction, - defaultWaitForClusterStateTimeout, + utils_test.DefaultWaitForClusterStateTimeout, clusterInstallingPendingUserActionStateInfo) }) By("Updating progress after fixing boot order", func() { installProgress = models.HostStageConfiguring - updateProgress(*hostID, *infraEnvID, installProgress) + utils_test.TestContext.UpdateProgress(*hostID, *infraEnvID, installProgress) }) By("Verify the db has been updated", func() { - hostInDb := getHostV2(*infraEnvID, *hostID) + hostInDb := utils_test.TestContext.GetHostV2(*infraEnvID, *hostID) Expect(*hostInDb.Status).Should(Equal(models.HostStatusInstallingInProgress)) Expect(*hostInDb.StatusInfo).Should(Equal(string(installProgress))) waitForClusterState( ctx, clusterID, models.ClusterStatusInstalling, - defaultWaitForClusterStateTimeout, + utils_test.DefaultWaitForClusterStateTimeout, clusterInstallingStateInfo) }) }) @@ -1961,7 +1962,7 @@ var _ = Describe("cluster install", func() { By("Installing cluster till finalize") setClusterAsFinalizing(ctx, clusterID) By("Completing installation installation") - completeInstallationAndVerify(ctx, agentBMClient, clusterID, true) + completeInstallationAndVerify(ctx, utils_test.TestContext.AgentBMClient, clusterID, true) }) It("install_cluster install command failed", func() { @@ -1979,7 +1980,7 @@ var _ = Describe("cluster install", func() { } // post failure to execute the install command - _, err := agentBMClient.Installer.V2PostStepReply(ctx, &installer.V2PostStepReplyParams{ + _, err := utils_test.TestContext.AgentBMClient.Installer.V2PostStepReply(ctx, &installer.V2PostStepReplyParams{ InfraEnvID: *infraEnvID, HostID: masterID, Reply: &models.StepReply{ @@ -1993,7 +1994,7 @@ var _ = Describe("cluster install", func() { Expect(err).NotTo(HaveOccurred()) By("Verifying installation failed") - waitForClusterState(ctx, clusterID, models.ClusterStatusError, defaultWaitForClusterStateTimeout, clusterErrorInfo) + waitForClusterState(ctx, clusterID, models.ClusterStatusError, utils_test.DefaultWaitForClusterStateTimeout, clusterErrorInfo) }) It("install_cluster assisted-installer already running", func() { @@ -2007,7 +2008,7 @@ var _ = Describe("cluster install", func() { } // post failure to execute the install command due to a running assisted-installer - _, err := agentBMClient.Installer.V2PostStepReply(ctx, &installer.V2PostStepReplyParams{ + _, err := utils_test.TestContext.AgentBMClient.Installer.V2PostStepReply(ctx, &installer.V2PostStepReplyParams{ InfraEnvID: *infraEnvID, HostID: *c.Hosts[0].ID, Reply: &models.StepReply{ @@ -2036,7 +2037,7 @@ var _ = Describe("cluster install", func() { } It("reset log fields before installation", func() { By("set log fields to a non-zero value") - cluster = getCluster(clusterID) + cluster = utils_test.TestContext.GetCluster(clusterID) db.Model(cluster).Updates(map[string]interface{}{ "logs_info": "requested", "controller_logs_started_at": strfmt.DateTime(time.Now()), @@ -2074,27 +2075,27 @@ var _ = Describe("cluster install", func() { requested := models.LogsStateRequested completed := models.LogsStateCompleted for _, host := range c.Hosts { - updateHostLogProgress(host.InfraEnvID, *host.ID, requested) + utils_test.TestContext.UpdateHostLogProgress(host.InfraEnvID, *host.ID, requested) } - updateClusterLogProgress(clusterID, requested) + utils_test.TestContext.UpdateClusterLogProgress(clusterID, requested) - c = getCluster(clusterID) + c = utils_test.TestContext.GetCluster(clusterID) verifyLogProgress(c, requested, requested) By("report log progress by cluster during finalizing") for _, host := range c.Hosts { - updateHostLogProgress(host.InfraEnvID, *host.ID, completed) - updateProgress(*host.ID, host.InfraEnvID, models.HostStageDone) + utils_test.TestContext.UpdateHostLogProgress(host.InfraEnvID, *host.ID, completed) + utils_test.TestContext.UpdateProgress(*host.ID, host.InfraEnvID, models.HostStageDone) } - waitForClusterState(ctx, clusterID, models.ClusterStatusFinalizing, defaultWaitForClusterStateTimeout, clusterFinalizingStateInfo) - updateClusterLogProgress(clusterID, requested) - c = getCluster(clusterID) + waitForClusterState(ctx, clusterID, models.ClusterStatusFinalizing, utils_test.DefaultWaitForClusterStateTimeout, clusterFinalizingStateInfo) + utils_test.TestContext.UpdateClusterLogProgress(clusterID, requested) + c = utils_test.TestContext.GetCluster(clusterID) verifyLogProgress(c, completed, requested) By("report log progress by cluster after installation") - completeInstallationAndVerify(ctx, agentBMClient, clusterID, true) - updateClusterLogProgress(clusterID, completed) - c = getCluster(clusterID) + completeInstallationAndVerify(ctx, utils_test.TestContext.AgentBMClient, clusterID, true) + utils_test.TestContext.UpdateClusterLogProgress(clusterID, completed) + c = utils_test.TestContext.GetCluster(clusterID) verifyLogProgress(c, completed, completed) }) }) @@ -2109,7 +2110,7 @@ var _ = Describe("cluster install", func() { CurrentStage: step, } - _, err := agentBMClient.Installer.V2UpdateHostInstallProgress(ctx, &installer.V2UpdateHostInstallProgressParams{ + _, err := utils_test.TestContext.AgentBMClient.Installer.V2UpdateHostInstallProgress(ctx, &installer.V2UpdateHostInstallProgressParams{ InfraEnvID: hosts[0].InfraEnvID, HostProgress: installProgress, HostID: *hosts[0].ID, @@ -2123,8 +2124,8 @@ var _ = Describe("cluster install", func() { By("progress_to_other_host", func() { installProgress := models.HostStageWritingImageToDisk installInfo := "68%" - updateHostProgressWithInfo(*hosts[0].ID, *infraEnvID, installProgress, installInfo) - hostFromDB := getHostV2(*infraEnvID, *hosts[0].ID) + utils_test.TestContext.UpdateHostProgressWithInfo(*hosts[0].ID, *infraEnvID, installProgress, installInfo) + hostFromDB := utils_test.TestContext.GetHostV2(*infraEnvID, *hosts[0].ID) Expect(*hostFromDB.Status).Should(Equal(models.HostStatusInstallingInProgress)) Expect(*hostFromDB.StatusInfo).Should(Equal(string(installProgress))) @@ -2134,8 +2135,8 @@ var _ = Describe("cluster install", func() { By("report_done", func() { installProgress := models.HostStageDone - updateProgress(*hosts[0].ID, *infraEnvID, installProgress) - hostFromDB := getHostV2(*infraEnvID, *hosts[0].ID) + utils_test.TestContext.UpdateProgress(*hosts[0].ID, *infraEnvID, installProgress) + hostFromDB := utils_test.TestContext.GetHostV2(*infraEnvID, *hosts[0].ID) Expect(*hostFromDB.Status).Should(Equal(models.HostStatusInstalled)) Expect(*hostFromDB.StatusInfo).Should(Equal(string(installProgress))) @@ -2148,7 +2149,7 @@ var _ = Describe("cluster install", func() { CurrentStage: models.HostStageFailed, } - _, err := agentBMClient.Installer.V2UpdateHostInstallProgress(ctx, &installer.V2UpdateHostInstallProgressParams{ + _, err := utils_test.TestContext.AgentBMClient.Installer.V2UpdateHostInstallProgress(ctx, &installer.V2UpdateHostInstallProgressParams{ InfraEnvID: hosts[0].InfraEnvID, HostProgress: installProgress, HostID: *hosts[0].ID, @@ -2161,8 +2162,8 @@ var _ = Describe("cluster install", func() { By("progress_to_some_host", func() { installProgress := models.HostStageWritingImageToDisk - updateProgress(*hosts[1].ID, hosts[1].InfraEnvID, installProgress) - hostFromDB := getHostV2(*infraEnvID, *hosts[1].ID) + utils_test.TestContext.UpdateProgress(*hosts[1].ID, hosts[1].InfraEnvID, installProgress) + hostFromDB := utils_test.TestContext.GetHostV2(*infraEnvID, *hosts[1].ID) Expect(*hostFromDB.Status).Should(Equal(models.HostStatusInstallingInProgress)) Expect(*hostFromDB.StatusInfo).Should(Equal(string(installProgress))) @@ -2175,7 +2176,7 @@ var _ = Describe("cluster install", func() { CurrentStage: models.HostStageInstalling, } - _, err := agentBMClient.Installer.V2UpdateHostInstallProgress(ctx, &installer.V2UpdateHostInstallProgressParams{ + _, err := utils_test.TestContext.AgentBMClient.Installer.V2UpdateHostInstallProgress(ctx, &installer.V2UpdateHostInstallProgressParams{ InfraEnvID: hosts[1].InfraEnvID, HostProgress: installProgress, HostID: *hosts[1].ID, @@ -2187,8 +2188,8 @@ var _ = Describe("cluster install", func() { By("report_failed_on_same_host", func() { installProgress := models.HostStageFailed installInfo := "because some error" - updateHostProgressWithInfo(*hosts[1].ID, *infraEnvID, installProgress, installInfo) - hostFromDB := getHostV2(*infraEnvID, *hosts[1].ID) + utils_test.TestContext.UpdateHostProgressWithInfo(*hosts[1].ID, *infraEnvID, installProgress, installInfo) + hostFromDB := utils_test.TestContext.GetHostV2(*infraEnvID, *hosts[1].ID) Expect(*hostFromDB.Status).Should(Equal(models.HostStatusError)) Expect(*hostFromDB.StatusInfo).Should(Equal(fmt.Sprintf("%s - %s", installProgress, installInfo))) @@ -2201,7 +2202,7 @@ var _ = Describe("cluster install", func() { CurrentStage: models.HostStageDone, } - _, err := agentBMClient.Installer.V2UpdateHostInstallProgress(ctx, &installer.V2UpdateHostInstallProgressParams{ + _, err := utils_test.TestContext.AgentBMClient.Installer.V2UpdateHostInstallProgress(ctx, &installer.V2UpdateHostInstallProgressParams{ InfraEnvID: hosts[1].InfraEnvID, HostProgress: installProgress, HostID: *hosts[1].ID, @@ -2211,12 +2212,12 @@ var _ = Describe("cluster install", func() { }) By("verify_everything_changed_error", func() { - waitForClusterState(ctx, clusterID, models.ClusterStatusError, defaultWaitForClusterStateTimeout, + waitForClusterState(ctx, clusterID, models.ClusterStatusError, utils_test.DefaultWaitForClusterStateTimeout, IgnoreStateInfo) - rep, err := userBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) + rep, err := utils_test.TestContext.UserBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) Expect(err).NotTo(HaveOccurred()) c := rep.GetPayload() - waitForHostState(ctx, models.HostStatusError, defaultWaitForHostStateTimeout, c.Hosts...) + waitForHostState(ctx, models.HostStatusError, utils_test.DefaultWaitForHostStateTimeout, c.Hosts...) }) }) @@ -2227,19 +2228,19 @@ var _ = Describe("cluster install", func() { Expect(err).NotTo(HaveOccurred()) defer os.Remove(file.Name()) - _, err = agentBMClient.Installer.V2DownloadClusterFiles(ctx, &installer.V2DownloadClusterFilesParams{ClusterID: clusterID, FileName: "bootstrap.ign"}, file) + _, err = utils_test.TestContext.AgentBMClient.Installer.V2DownloadClusterFiles(ctx, &installer.V2DownloadClusterFilesParams{ClusterID: clusterID, FileName: "bootstrap.ign"}, file) Expect(err).To(BeAssignableToTypeOf(installer.NewV2DownloadClusterFilesConflict())) installCluster(clusterID) missingClusterId := strfmt.UUID(uuid.New().String()) - _, err = agentBMClient.Installer.V2DownloadClusterFiles(ctx, &installer.V2DownloadClusterFilesParams{ClusterID: missingClusterId, FileName: "bootstrap.ign"}, file) + _, err = utils_test.TestContext.AgentBMClient.Installer.V2DownloadClusterFiles(ctx, &installer.V2DownloadClusterFilesParams{ClusterID: missingClusterId, FileName: "bootstrap.ign"}, file) Expect(err).To(BeAssignableToTypeOf(installer.NewV2DownloadClusterFilesNotFound())) - _, err = agentBMClient.Installer.V2DownloadClusterFiles(ctx, &installer.V2DownloadClusterFilesParams{ClusterID: clusterID, FileName: "not_real_file"}, file) + _, err = utils_test.TestContext.AgentBMClient.Installer.V2DownloadClusterFiles(ctx, &installer.V2DownloadClusterFilesParams{ClusterID: clusterID, FileName: "not_real_file"}, file) Expect(err).Should(HaveOccurred()) - _, err = agentBMClient.Installer.V2DownloadClusterFiles(ctx, &installer.V2DownloadClusterFilesParams{ClusterID: clusterID, FileName: "bootstrap.ign"}, file) + _, err = utils_test.TestContext.AgentBMClient.Installer.V2DownloadClusterFiles(ctx, &installer.V2DownloadClusterFilesParams{ClusterID: clusterID, FileName: "bootstrap.ign"}, file) Expect(err).NotTo(HaveOccurred()) s, err := file.Stat() Expect(err).NotTo(HaveOccurred()) @@ -2253,17 +2254,17 @@ var _ = Describe("cluster install", func() { FailCluster(ctx, clusterID, *infraEnvID, masterFailure) //Wait for cluster to get to error state - waitForClusterState(ctx, clusterID, models.ClusterStatusError, defaultWaitForClusterStateTimeout, + waitForClusterState(ctx, clusterID, models.ClusterStatusError, utils_test.DefaultWaitForClusterStateTimeout, IgnoreStateInfo) - _, err = userBMClient.Installer.V2DownloadClusterFiles(ctx, &installer.V2DownloadClusterFilesParams{ClusterID: clusterID, FileName: "bootstrap.ign"}, file) + _, err = utils_test.TestContext.UserBMClient.Installer.V2DownloadClusterFiles(ctx, &installer.V2DownloadClusterFilesParams{ClusterID: clusterID, FileName: "bootstrap.ign"}, file) Expect(err).NotTo(HaveOccurred()) s, err := file.Stat() Expect(err).NotTo(HaveOccurred()) Expect(s.Size()).ShouldNot(Equal(0)) By("Download install-config.yaml") - _, err = userBMClient.Installer.V2DownloadClusterFiles(ctx, &installer.V2DownloadClusterFilesParams{ClusterID: clusterID, FileName: "install-config.yaml"}, file) + _, err = utils_test.TestContext.UserBMClient.Installer.V2DownloadClusterFiles(ctx, &installer.V2DownloadClusterFilesParams{ClusterID: clusterID, FileName: "install-config.yaml"}, file) Expect(err).NotTo(HaveOccurred()) s, err = file.Stat() Expect(err).NotTo(HaveOccurred()) @@ -2274,19 +2275,19 @@ var _ = Describe("cluster install", func() { By("Test getting credentials for not found cluster") { missingClusterId := strfmt.UUID(uuid.New().String()) - _, err := userBMClient.Installer.V2GetCredentials(ctx, &installer.V2GetCredentialsParams{ClusterID: missingClusterId}) + _, err := utils_test.TestContext.UserBMClient.Installer.V2GetCredentials(ctx, &installer.V2GetCredentialsParams{ClusterID: missingClusterId}) Expect(err).To(BeAssignableToTypeOf(installer.NewV2GetCredentialsNotFound())) } By("Test getting credentials before console operator is available") { - _, err := userBMClient.Installer.V2GetCredentials(ctx, &installer.V2GetCredentialsParams{ClusterID: clusterID}) + _, err := utils_test.TestContext.UserBMClient.Installer.V2GetCredentials(ctx, &installer.V2GetCredentialsParams{ClusterID: clusterID}) Expect(err).To(BeAssignableToTypeOf(installer.NewV2GetCredentialsConflict())) } By("Test happy flow") { setClusterAsFinalizing(ctx, clusterID) - completeInstallationAndVerify(ctx, agentBMClient, clusterID, true) - creds, err := userBMClient.Installer.V2GetCredentials(ctx, &installer.V2GetCredentialsParams{ClusterID: clusterID}) + completeInstallationAndVerify(ctx, utils_test.TestContext.AgentBMClient, clusterID, true) + creds, err := utils_test.TestContext.UserBMClient.Installer.V2GetCredentials(ctx, &installer.V2GetCredentialsParams{ClusterID: clusterID}) Expect(err).NotTo(HaveOccurred()) Expect(creds.GetPayload().Username).To(Equal(bminventory.DefaultUser)) Expect(creds.GetPayload().ConsoleURL).To(Equal(common.GetConsoleUrl(cluster.Name, cluster.BaseDNSDomain))) @@ -2296,8 +2297,8 @@ var _ = Describe("cluster install", func() { It("Transform installed cluster to day2", func() { setClusterAsFinalizing(ctx, clusterID) - completeInstallationAndVerify(ctx, agentBMClient, clusterID, true) - clusterDay2, err := userBMClient.Installer.TransformClusterToDay2(ctx, &installer.TransformClusterToDay2Params{ClusterID: clusterID}) + completeInstallationAndVerify(ctx, utils_test.TestContext.AgentBMClient, clusterID, true) + clusterDay2, err := utils_test.TestContext.UserBMClient.Installer.TransformClusterToDay2(ctx, &installer.TransformClusterToDay2Params{ClusterID: clusterID}) Expect(err).NotTo(HaveOccurred()) Expect(swag.StringValue(clusterDay2.GetPayload().Status)).Should(Equal(models.ClusterStatusAddingHosts)) Expect(swag.StringValue(clusterDay2.GetPayload().Kind)).Should(Equal(models.ClusterKindAddHostsCluster)) @@ -2306,10 +2307,10 @@ var _ = Describe("cluster install", func() { It("Upload and Download logs", func() { By("Download before upload") { - nodes, _ := register3nodes(ctx, clusterID, *infraEnvID, defaultCIDRv4) + nodes, _ := utils_test.TestContext.Register3nodes(ctx, clusterID, *infraEnvID, defaultCIDRv4) file, err := os.CreateTemp("", "tmp") Expect(err).NotTo(HaveOccurred()) - _, err = userBMClient.Installer.V2DownloadClusterLogs(ctx, &installer.V2DownloadClusterLogsParams{ClusterID: clusterID, HostID: nodes[1].ID}, file) + _, err = utils_test.TestContext.UserBMClient.Installer.V2DownloadClusterLogs(ctx, &installer.V2DownloadClusterLogsParams{ClusterID: clusterID, HostID: nodes[1].ID}, file) Expect(err).NotTo(HaveOccurred()) } @@ -2318,13 +2319,13 @@ var _ = Describe("cluster install", func() { { kubeconfigFile, err := os.Open("test_kubeconfig") Expect(err).NotTo(HaveOccurred()) - _, _ = register3nodes(ctx, clusterID, *infraEnvID, defaultCIDRv4) - _, err = agentBMClient.Installer.V2UploadLogs(ctx, &installer.V2UploadLogsParams{ClusterID: clusterID, LogsType: string(models.LogsTypeController), Upfile: kubeconfigFile}) + _, _ = utils_test.TestContext.Register3nodes(ctx, clusterID, *infraEnvID, defaultCIDRv4) + _, err = utils_test.TestContext.AgentBMClient.Installer.V2UploadLogs(ctx, &installer.V2UploadLogsParams{ClusterID: clusterID, LogsType: string(models.LogsTypeController), Upfile: kubeconfigFile}) Expect(err).NotTo(HaveOccurred()) logsType := string(models.LogsTypeController) file, err := os.CreateTemp("", "tmp") Expect(err).NotTo(HaveOccurred()) - _, err = userBMClient.Installer.V2DownloadClusterLogs(ctx, &installer.V2DownloadClusterLogsParams{ClusterID: clusterID, LogsType: &logsType}, file) + _, err = utils_test.TestContext.UserBMClient.Installer.V2DownloadClusterLogs(ctx, &installer.V2DownloadClusterLogsParams{ClusterID: clusterID, LogsType: &logsType}, file) Expect(err).NotTo(HaveOccurred()) s, err := file.Stat() Expect(err).NotTo(HaveOccurred()) @@ -2336,8 +2337,8 @@ var _ = Describe("cluster install", func() { kubeconfigFile, err := os.Open("test_kubeconfig") Expect(err).NotTo(HaveOccurred()) logsType := string(models.LogsTypeHost) - hosts, _ := register3nodes(ctx, clusterID, *infraEnvID, defaultCIDRv4) - _, err = agentBMClient.Installer.V2UploadLogs(ctx, &installer.V2UploadLogsParams{ + hosts, _ := utils_test.TestContext.Register3nodes(ctx, clusterID, *infraEnvID, defaultCIDRv4) + _, err = utils_test.TestContext.AgentBMClient.Installer.V2UploadLogs(ctx, &installer.V2UploadLogsParams{ ClusterID: clusterID, HostID: hosts[0].ID, InfraEnvID: infraEnvID, @@ -2347,7 +2348,7 @@ var _ = Describe("cluster install", func() { file, err := os.CreateTemp("", "tmp") Expect(err).NotTo(HaveOccurred()) - _, err = userBMClient.Installer.V2DownloadClusterLogs(ctx, &installer.V2DownloadClusterLogsParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2DownloadClusterLogs(ctx, &installer.V2DownloadClusterLogsParams{ ClusterID: clusterID, HostID: hosts[0].ID, LogsType: &logsType, @@ -2368,19 +2369,19 @@ var _ = Describe("cluster install", func() { cmd := exec.Command("head", "-c", "200MB", "/dev/urandom") err = cmd.Run() Expect(err).NotTo(HaveOccurred()) - nodes, _ := register3nodes(ctx, clusterID, *infraEnvID, defaultCIDRv4) + nodes, _ := utils_test.TestContext.Register3nodes(ctx, clusterID, *infraEnvID, defaultCIDRv4) // test hosts logs kubeconfigFile, err := os.Open(filePath) Expect(err).NotTo(HaveOccurred()) - _, err = agentBMClient.Installer.V2UploadLogs(ctx, &installer.V2UploadLogsParams{ClusterID: clusterID, HostID: nodes[1].ID, + _, err = utils_test.TestContext.AgentBMClient.Installer.V2UploadLogs(ctx, &installer.V2UploadLogsParams{ClusterID: clusterID, HostID: nodes[1].ID, InfraEnvID: &nodes[1].InfraEnvID, Upfile: kubeconfigFile, LogsType: string(models.LogsTypeHost)}) Expect(err).NotTo(HaveOccurred()) - h := getHostV2(*infraEnvID, *nodes[1].ID) + h := utils_test.TestContext.GetHostV2(*infraEnvID, *nodes[1].ID) Expect(h.LogsCollectedAt).ShouldNot(Equal(strfmt.DateTime(time.Time{}))) logsType := string(models.LogsTypeHost) file, err := os.CreateTemp("", "tmp") Expect(err).NotTo(HaveOccurred()) - _, err = userBMClient.Installer.V2DownloadClusterLogs(ctx, &installer.V2DownloadClusterLogsParams{ClusterID: clusterID, + _, err = utils_test.TestContext.UserBMClient.Installer.V2DownloadClusterLogs(ctx, &installer.V2DownloadClusterLogsParams{ClusterID: clusterID, HostID: nodes[1].ID, LogsType: &logsType}, file) Expect(err).NotTo(HaveOccurred()) s, err := file.Stat() @@ -2389,15 +2390,15 @@ var _ = Describe("cluster install", func() { // test controller logs kubeconfigFile, err = os.Open(filePath) Expect(err).NotTo(HaveOccurred()) - _, err = agentBMClient.Installer.V2UploadLogs(ctx, &installer.V2UploadLogsParams{ClusterID: clusterID, + _, err = utils_test.TestContext.AgentBMClient.Installer.V2UploadLogs(ctx, &installer.V2UploadLogsParams{ClusterID: clusterID, Upfile: kubeconfigFile, LogsType: string(models.LogsTypeController)}) Expect(err).NotTo(HaveOccurred()) - c := getCluster(clusterID) + c := utils_test.TestContext.GetCluster(clusterID) Expect(c.ControllerLogsCollectedAt).ShouldNot(Equal(strfmt.DateTime(time.Time{}))) logsType = string(models.LogsTypeController) file, err = os.CreateTemp("", "tmp") Expect(err).NotTo(HaveOccurred()) - _, err = userBMClient.Installer.V2DownloadClusterLogs(ctx, &installer.V2DownloadClusterLogsParams{ClusterID: clusterID, + _, err = utils_test.TestContext.UserBMClient.Installer.V2DownloadClusterLogs(ctx, &installer.V2DownloadClusterLogsParams{ClusterID: clusterID, LogsType: &logsType}, file) Expect(err).NotTo(HaveOccurred()) s, err = file.Stat() @@ -2408,7 +2409,7 @@ var _ = Describe("cluster install", func() { uploadManifest := func(content string, folder string, filename string) { base64Content := base64.StdEncoding.EncodeToString([]byte(content)) - response, err := userBMClient.Manifests.V2CreateClusterManifest(ctx, &manifests.V2CreateClusterManifestParams{ + response, err := utils_test.TestContext.UserBMClient.Manifests.V2CreateClusterManifest(ctx, &manifests.V2CreateClusterManifestParams{ ClusterID: clusterID, CreateManifestParams: &models.CreateManifestParams{ Content: &base64Content, @@ -2432,18 +2433,18 @@ name: exampleNamespace2` It("Download cluster logs", func() { // Add some manifest files and then verify that these are added to the log... - nodes, _ := register3nodes(ctx, clusterID, *infraEnvID, defaultCIDRv4) + nodes, _ := utils_test.TestContext.Register3nodes(ctx, clusterID, *infraEnvID, defaultCIDRv4) for _, host := range nodes { kubeconfigFile, err := os.Open("test_kubeconfig") Expect(err).NotTo(HaveOccurred()) - _, err = agentBMClient.Installer.V2UploadLogs(ctx, &installer.V2UploadLogsParams{ClusterID: clusterID, + _, err = utils_test.TestContext.AgentBMClient.Installer.V2UploadLogs(ctx, &installer.V2UploadLogsParams{ClusterID: clusterID, InfraEnvID: &host.InfraEnvID, HostID: host.ID, LogsType: string(models.LogsTypeHost), Upfile: kubeconfigFile}) Expect(err).NotTo(HaveOccurred()) kubeconfigFile.Close() } kubeconfigFile, err := os.Open("test_kubeconfig") Expect(err).NotTo(HaveOccurred()) - _, err = agentBMClient.Installer.V2UploadLogs(ctx, &installer.V2UploadLogsParams{ClusterID: clusterID, + _, err = utils_test.TestContext.AgentBMClient.Installer.V2UploadLogs(ctx, &installer.V2UploadLogsParams{ClusterID: clusterID, LogsType: string(models.LogsTypeController), Upfile: kubeconfigFile}) Expect(err).NotTo(HaveOccurred()) kubeconfigFile.Close() @@ -2456,7 +2457,7 @@ name: exampleNamespace2` Expect(err).NotTo(HaveOccurred()) defer file.Close() logsType := string(models.LogsTypeAll) - _, err = userBMClient.Installer.V2DownloadClusterLogs(ctx, &installer.V2DownloadClusterLogsParams{ClusterID: clusterID, LogsType: &logsType}, file) + _, err = utils_test.TestContext.UserBMClient.Installer.V2DownloadClusterLogs(ctx, &installer.V2DownloadClusterLogsParams{ClusterID: clusterID, LogsType: &logsType}, file) Expect(err).NotTo(HaveOccurred()) s, err := file.Stat() Expect(err).NotTo(HaveOccurred()) @@ -2484,12 +2485,12 @@ name: exampleNamespace2` By("Upload ingress ca for not existent clusterid") { missingClusterId := strfmt.UUID(uuid.New().String()) - _, err := agentBMClient.Installer.V2UploadClusterIngressCert(ctx, &installer.V2UploadClusterIngressCertParams{ClusterID: missingClusterId, IngressCertParams: "dummy"}) + _, err := utils_test.TestContext.AgentBMClient.Installer.V2UploadClusterIngressCert(ctx, &installer.V2UploadClusterIngressCertParams{ClusterID: missingClusterId, IngressCertParams: "dummy"}) Expect(err).To(BeAssignableToTypeOf(installer.NewV2UploadClusterIngressCertNotFound())) } By("Test getting upload ingress ca in wrong state") { - _, err := agentBMClient.Installer.V2UploadClusterIngressCert(ctx, &installer.V2UploadClusterIngressCertParams{ClusterID: clusterID, IngressCertParams: "dummy"}) + _, err := utils_test.TestContext.AgentBMClient.Installer.V2UploadClusterIngressCert(ctx, &installer.V2UploadClusterIngressCertParams{ClusterID: clusterID, IngressCertParams: "dummy"}) Expect(err).To(BeAssignableToTypeOf(installer.NewV2UploadClusterIngressCertBadRequest())) } By("Test happy flow") @@ -2498,7 +2499,7 @@ name: exampleNamespace2` // Download kubeconfig before uploading kubeconfigNoIngress, err := os.CreateTemp("", "tmp") Expect(err).NotTo(HaveOccurred()) - _, err = userBMClient.Installer.V2DownloadClusterCredentials(ctx, &installer.V2DownloadClusterCredentialsParams{ClusterID: clusterID, FileName: "kubeconfig-noingress"}, kubeconfigNoIngress) + _, err = utils_test.TestContext.UserBMClient.Installer.V2DownloadClusterCredentials(ctx, &installer.V2DownloadClusterCredentialsParams{ClusterID: clusterID, FileName: "kubeconfig-noingress"}, kubeconfigNoIngress) Expect(err).ToNot(HaveOccurred()) sni, err := kubeconfigNoIngress.Stat() Expect(err).NotTo(HaveOccurred()) @@ -2506,26 +2507,26 @@ name: exampleNamespace2` By("Trying to download kubeconfig file before it exists") Expect(err).NotTo(HaveOccurred()) - _, err = userBMClient.Installer.V2GetCredentials(ctx, &installer.V2GetCredentialsParams{ClusterID: clusterID}) + _, err = utils_test.TestContext.UserBMClient.Installer.V2GetCredentials(ctx, &installer.V2GetCredentialsParams{ClusterID: clusterID}) Expect(err).Should(HaveOccurred()) Expect(err).To(BeAssignableToTypeOf(installer.NewV2GetCredentialsConflict())) By("Upload ingress ca") - res, err := agentBMClient.Installer.V2UploadClusterIngressCert(ctx, &installer.V2UploadClusterIngressCertParams{ClusterID: clusterID, IngressCertParams: models.IngressCertParams(ingressCa)}) + res, err := utils_test.TestContext.AgentBMClient.Installer.V2UploadClusterIngressCert(ctx, &installer.V2UploadClusterIngressCertParams{ClusterID: clusterID, IngressCertParams: models.IngressCertParams(ingressCa)}) Expect(err).NotTo(HaveOccurred()) Expect(res).To(BeAssignableToTypeOf(installer.NewV2UploadClusterIngressCertCreated())) // Download kubeconfig after uploading - completeInstallationAndVerify(ctx, agentBMClient, clusterID, true) + completeInstallationAndVerify(ctx, utils_test.TestContext.AgentBMClient, clusterID, true) Expect(err).NotTo(HaveOccurred()) - _, err = userBMClient.Installer.V2GetCredentials(ctx, &installer.V2GetCredentialsParams{ClusterID: clusterID}) + _, err = utils_test.TestContext.UserBMClient.Installer.V2GetCredentials(ctx, &installer.V2GetCredentialsParams{ClusterID: clusterID}) Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred()) } By("Try to upload ingress ca second time, do nothing and return ok") { // Try to upload ingress ca second time - res, err := agentBMClient.Installer.V2UploadClusterIngressCert(ctx, &installer.V2UploadClusterIngressCertParams{ClusterID: clusterID, IngressCertParams: models.IngressCertParams(ingressCa)}) + res, err := utils_test.TestContext.AgentBMClient.Installer.V2UploadClusterIngressCert(ctx, &installer.V2UploadClusterIngressCertParams{ClusterID: clusterID, IngressCertParams: models.IngressCertParams(ingressCa)}) Expect(err).NotTo(HaveOccurred()) Expect(res).To(BeAssignableToTypeOf(installer.NewV2UploadClusterIngressCertCreated())) } @@ -2533,21 +2534,21 @@ name: exampleNamespace2` It("on cluster error - verify all hosts are aborted", func() { FailCluster(ctx, clusterID, *infraEnvID, masterFailure) - waitForClusterState(ctx, clusterID, models.ClusterStatusError, defaultWaitForClusterStateTimeout, clusterErrorInfo) - rep, err := userBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) + waitForClusterState(ctx, clusterID, models.ClusterStatusError, utils_test.DefaultWaitForClusterStateTimeout, clusterErrorInfo) + rep, err := utils_test.TestContext.UserBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) Expect(err).NotTo(HaveOccurred()) c := rep.GetPayload() - waitForHostState(ctx, models.HostStatusError, defaultWaitForHostStateTimeout, c.Hosts...) + waitForHostState(ctx, models.HostStatusError, utils_test.DefaultWaitForHostStateTimeout, c.Hosts...) }) Context("cancel installation", func() { It("cancel running installation", func() { c := installCluster(clusterID) - waitForHostState(ctx, models.HostStatusInstalling, defaultWaitForHostStateTimeout, c.Hosts...) - _, err := userBMClient.Installer.V2CancelInstallation(ctx, &installer.V2CancelInstallationParams{ClusterID: clusterID}) + waitForHostState(ctx, models.HostStatusInstalling, utils_test.DefaultWaitForHostStateTimeout, c.Hosts...) + _, err := utils_test.TestContext.UserBMClient.Installer.V2CancelInstallation(ctx, &installer.V2CancelInstallationParams{ClusterID: clusterID}) Expect(err).NotTo(HaveOccurred()) - waitForClusterState(ctx, clusterID, models.ClusterStatusCancelled, defaultWaitForClusterStateTimeout, clusterCanceledInfo) - rep, err := userBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) + waitForClusterState(ctx, clusterID, models.ClusterStatusCancelled, utils_test.DefaultWaitForClusterStateTimeout, clusterCanceledInfo) + rep, err := utils_test.TestContext.UserBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) Expect(err).NotTo(HaveOccurred()) c = rep.GetPayload() Expect(swag.StringValue(c.Status)).Should(Equal(models.ClusterStatusCancelled)) @@ -2558,9 +2559,9 @@ name: exampleNamespace2` Expect(c.InstallCompletedAt).Should(Equal(c.StatusUpdatedAt)) }) It("cancel installation conflicts", func() { - _, err := userBMClient.Installer.V2CancelInstallation(ctx, &installer.V2CancelInstallationParams{ClusterID: clusterID}) + _, err := utils_test.TestContext.UserBMClient.Installer.V2CancelInstallation(ctx, &installer.V2CancelInstallationParams{ClusterID: clusterID}) Expect(err).To(BeAssignableToTypeOf(installer.NewV2CancelInstallationConflict())) - rep, err := userBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) + rep, err := utils_test.TestContext.UserBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) Expect(err).NotTo(HaveOccurred()) c := rep.GetPayload() Expect(swag.StringValue(c.Status)).Should(Equal(models.ClusterStatusReady)) @@ -2568,17 +2569,17 @@ name: exampleNamespace2` It("cancel failed cluster", func() { By("verify cluster is in error") FailCluster(ctx, clusterID, *infraEnvID, masterFailure) - waitForClusterState(ctx, clusterID, models.ClusterStatusError, defaultWaitForClusterStateTimeout, + waitForClusterState(ctx, clusterID, models.ClusterStatusError, utils_test.DefaultWaitForClusterStateTimeout, clusterErrorInfo) - rep, err := userBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) + rep, err := utils_test.TestContext.UserBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) Expect(err).ShouldNot(HaveOccurred()) c := rep.GetPayload() Expect(swag.StringValue(c.Status)).Should(Equal(models.ClusterStatusError)) - waitForHostState(ctx, models.HostStatusError, defaultWaitForHostStateTimeout, c.Hosts...) + waitForHostState(ctx, models.HostStatusError, utils_test.DefaultWaitForHostStateTimeout, c.Hosts...) By("cancel installation, check cluster and hosts statuses") - _, err = userBMClient.Installer.V2CancelInstallation(ctx, &installer.V2CancelInstallationParams{ClusterID: clusterID}) + _, err = utils_test.TestContext.UserBMClient.Installer.V2CancelInstallation(ctx, &installer.V2CancelInstallationParams{ClusterID: clusterID}) Expect(err).ShouldNot(HaveOccurred()) - rep, err = userBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) + rep, err = utils_test.TestContext.UserBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) Expect(err).ShouldNot(HaveOccurred()) c = rep.GetPayload() Expect(swag.StringValue(c.Status)).Should(Equal(models.ClusterStatusCancelled)) @@ -2590,57 +2591,57 @@ name: exampleNamespace2` c := installCluster(clusterID) Expect(len(c.Hosts)).Should(Equal(5)) - updateProgress(*c.Hosts[0].ID, c.Hosts[0].InfraEnvID, "Installing") - updateProgress(*c.Hosts[1].ID, c.Hosts[1].InfraEnvID, "Done") + utils_test.TestContext.UpdateProgress(*c.Hosts[0].ID, c.Hosts[0].InfraEnvID, "Installing") + utils_test.TestContext.UpdateProgress(*c.Hosts[1].ID, c.Hosts[1].InfraEnvID, "Done") - h1 := getHostV2(*infraEnvID, *c.Hosts[0].ID) + h1 := utils_test.TestContext.GetHostV2(*infraEnvID, *c.Hosts[0].ID) Expect(*h1.Status).Should(Equal(models.HostStatusInstallingInProgress)) - h2 := getHostV2(*infraEnvID, *c.Hosts[1].ID) + h2 := utils_test.TestContext.GetHostV2(*infraEnvID, *c.Hosts[1].ID) Expect(*h2.Status).Should(Equal(models.HostStatusInstalled)) - _, err := userBMClient.Installer.V2CancelInstallation(ctx, &installer.V2CancelInstallationParams{ClusterID: clusterID}) + _, err := utils_test.TestContext.UserBMClient.Installer.V2CancelInstallation(ctx, &installer.V2CancelInstallationParams{ClusterID: clusterID}) Expect(err).ShouldNot(HaveOccurred()) - waitForHostState(ctx, models.HostStatusCancelled, defaultWaitForClusterStateTimeout, c.Hosts...) + waitForHostState(ctx, models.HostStatusCancelled, utils_test.DefaultWaitForClusterStateTimeout, c.Hosts...) }) It("cancel host - wrong boot order", func() { c := installCluster(clusterID) hostID := c.Hosts[0].ID - Expect(isStepTypeInList(getNextSteps(*infraEnvID, *hostID), models.StepTypeInstall)).Should(BeTrue()) - updateProgress(*hostID, *infraEnvID, models.HostStageRebooting) + Expect(utils_test.IsStepTypeInList(utils_test.TestContext.GetNextSteps(*infraEnvID, *hostID), models.StepTypeInstall)).Should(BeTrue()) + utils_test.TestContext.UpdateProgress(*hostID, *infraEnvID, models.HostStageRebooting) - _, err := agentBMClient.Installer.V2RegisterHost(context.Background(), &installer.V2RegisterHostParams{ + _, err := utils_test.TestContext.AgentBMClient.Installer.V2RegisterHost(context.Background(), &installer.V2RegisterHostParams{ InfraEnvID: *infraEnvID, NewHostParams: &models.HostCreateParams{ HostID: hostID, }, }) Expect(err).ShouldNot(HaveOccurred()) - hostInDb := getHostV2(*infraEnvID, *hostID) + hostInDb := utils_test.TestContext.GetHostV2(*infraEnvID, *hostID) Expect(*hostInDb.Status).Should(Equal(models.HostStatusInstallingPendingUserAction)) waitForClusterState( ctx, clusterID, models.ClusterStatusInstallingPendingUserAction, - defaultWaitForClusterStateTimeout, + utils_test.DefaultWaitForClusterStateTimeout, clusterInstallingPendingUserActionStateInfo) - _, err = userBMClient.Installer.V2CancelInstallation(ctx, &installer.V2CancelInstallationParams{ClusterID: clusterID}) + _, err = utils_test.TestContext.UserBMClient.Installer.V2CancelInstallation(ctx, &installer.V2CancelInstallationParams{ClusterID: clusterID}) Expect(err).ShouldNot(HaveOccurred()) - waitForHostState(ctx, models.HostStatusCancelled, defaultWaitForHostStateTimeout, c.Hosts...) + waitForHostState(ctx, models.HostStatusCancelled, utils_test.DefaultWaitForHostStateTimeout, c.Hosts...) }) It("cancel installation - cluster in finalizing status", func() { setClusterAsFinalizing(ctx, clusterID) - _, err := userBMClient.Installer.V2CancelInstallation(ctx, &installer.V2CancelInstallationParams{ClusterID: clusterID}) + _, err := utils_test.TestContext.UserBMClient.Installer.V2CancelInstallation(ctx, &installer.V2CancelInstallationParams{ClusterID: clusterID}) Expect(err).ShouldNot(HaveOccurred()) - rep, err := userBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) + rep, err := utils_test.TestContext.UserBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) Expect(err).NotTo(HaveOccurred()) c := rep.GetPayload() Expect(c).NotTo(BeNil()) - waitForHostState(ctx, models.HostStatusCancelled, defaultWaitForHostStateTimeout, c.Hosts...) + waitForHostState(ctx, models.HostStatusCancelled, utils_test.DefaultWaitForHostStateTimeout, c.Hosts...) }) }) Context("reset installation", func() { @@ -2658,15 +2659,15 @@ name: exampleNamespace2` It("reset cluster and register hosts", func() { By("verify reset success") installCluster(clusterID) - _, err := userBMClient.Installer.V2CancelInstallation(ctx, &installer.V2CancelInstallationParams{ClusterID: clusterID}) + _, err := utils_test.TestContext.UserBMClient.Installer.V2CancelInstallation(ctx, &installer.V2CancelInstallationParams{ClusterID: clusterID}) Expect(err).NotTo(HaveOccurred()) - _, err = userBMClient.Installer.V2ResetCluster(ctx, &installer.V2ResetClusterParams{ClusterID: clusterID}) + _, err = utils_test.TestContext.UserBMClient.Installer.V2ResetCluster(ctx, &installer.V2ResetClusterParams{ClusterID: clusterID}) Expect(err).NotTo(HaveOccurred()) - _, err = userBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) + _, err = utils_test.TestContext.UserBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) Expect(err).NotTo(HaveOccurred()) By("verify cluster state") - rep, err := userBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) + rep, err := utils_test.TestContext.UserBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) Expect(err).NotTo(HaveOccurred()) c := rep.GetPayload() Expect(swag.StringValue(c.Status)).Should(Equal(models.ClusterStatusInsufficient)) @@ -2680,27 +2681,27 @@ name: exampleNamespace2` for i, host := range c.Hosts { if enableReset { Expect(swag.StringValue(host.Status)).Should(Equal(models.HostStatusResetting)) - steps := getNextSteps(*infraEnvID, *host.ID) + steps := utils_test.TestContext.GetNextSteps(*infraEnvID, *host.ID) Expect(len(steps.Instructions)).Should(Equal(0)) } else { - waitForHostState(ctx, models.HostStatusResettingPendingUserAction, defaultWaitForHostStateTimeout, host) + waitForHostState(ctx, models.HostStatusResettingPendingUserAction, utils_test.DefaultWaitForHostStateTimeout, host) } verifyHostProgressReset(host.Progress) - _, err = agentBMClient.Installer.V2RegisterHost(ctx, &installer.V2RegisterHostParams{ + _, err = utils_test.TestContext.AgentBMClient.Installer.V2RegisterHost(ctx, &installer.V2RegisterHostParams{ InfraEnvID: *infraEnvID, NewHostParams: &models.HostCreateParams{ HostID: host.ID, }, }) Expect(err).ShouldNot(HaveOccurred()) - waitForHostState(ctx, models.HostStatusDiscovering, defaultWaitForHostStateTimeout, host) - generateEssentialHostSteps(ctx, host, fmt.Sprintf("host-after-reset-%d", i), ips[i]) + waitForHostState(ctx, models.HostStatusDiscovering, utils_test.DefaultWaitForHostStateTimeout, host) + utils_test.TestContext.GenerateEssentialHostSteps(ctx, host, fmt.Sprintf("host-after-reset-%d", i), ips[i]) } generateFullMeshConnectivity(ctx, ips[0], c.Hosts...) for _, host := range c.Hosts { - waitForHostState(ctx, models.HostStatusKnown, defaultWaitForHostStateTimeout, host) - host = getHostV2(*infraEnvID, *host.ID) + waitForHostState(ctx, models.HostStatusKnown, utils_test.DefaultWaitForHostStateTimeout, host) + host = utils_test.TestContext.GetHostV2(*infraEnvID, *host.ID) Expect(host.Progress.CurrentStage).Should(Equal(models.HostStage(""))) Expect(host.Progress.ProgressInfo).Should(Equal("")) Expect(host.Bootstrap).Should(Equal(false)) @@ -2712,11 +2713,11 @@ name: exampleNamespace2` By("verify reset success") installCluster(clusterID) - _, err := userBMClient.Installer.V2CancelInstallation(ctx, &installer.V2CancelInstallationParams{ClusterID: clusterID}) + _, err := utils_test.TestContext.UserBMClient.Installer.V2CancelInstallation(ctx, &installer.V2CancelInstallationParams{ClusterID: clusterID}) Expect(err).NotTo(HaveOccurred()) - _, err = userBMClient.Installer.V2ResetCluster(ctx, &installer.V2ResetClusterParams{ClusterID: clusterID}) + _, err = utils_test.TestContext.UserBMClient.Installer.V2ResetCluster(ctx, &installer.V2ResetClusterParams{ClusterID: clusterID}) Expect(err).NotTo(HaveOccurred()) - rep, err := userBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) + rep, err := utils_test.TestContext.UserBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) Expect(err).NotTo(HaveOccurred()) c := rep.GetPayload() for _, h := range c.Hosts { @@ -2728,7 +2729,7 @@ name: exampleNamespace2` Expect(bootstrapID).ShouldNot(Equal(nil)) By("verify cluster state") - rep, err = userBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) + rep, err = utils_test.TestContext.UserBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) Expect(err).NotTo(HaveOccurred()) c = rep.GetPayload() Expect(swag.StringValue(c.Status)).Should(Equal(models.ClusterStatusInsufficient)) @@ -2737,32 +2738,32 @@ name: exampleNamespace2` ips := hostutil.GenerateIPv4Addresses(len(c.Hosts), defaultCIDRv4) for i, host := range c.Hosts { Expect(swag.StringValue(host.Status)).Should(Equal(models.HostStatusResetting)) - steps := getNextSteps(*infraEnvID, *host.ID) + steps := utils_test.TestContext.GetNextSteps(*infraEnvID, *host.ID) Expect(len(steps.Instructions)).Should(Equal(0)) - _, err = agentBMClient.Installer.V2RegisterHost(ctx, &installer.V2RegisterHostParams{ + _, err = utils_test.TestContext.AgentBMClient.Installer.V2RegisterHost(ctx, &installer.V2RegisterHostParams{ InfraEnvID: *infraEnvID, NewHostParams: &models.HostCreateParams{ HostID: host.ID, }, }) Expect(err).ShouldNot(HaveOccurred()) - waitForHostState(ctx, models.HostStatusDiscovering, defaultWaitForHostStateTimeout, host) - generateEssentialHostSteps(ctx, host, fmt.Sprintf("host-after-reset-%d", i), ips[i]) + waitForHostState(ctx, models.HostStatusDiscovering, utils_test.DefaultWaitForHostStateTimeout, host) + utils_test.TestContext.GenerateEssentialHostSteps(ctx, host, fmt.Sprintf("host-after-reset-%d", i), ips[i]) } generateFullMeshConnectivity(ctx, ips[0], c.Hosts...) for _, host := range c.Hosts { - waitForHostState(ctx, models.HostStatusKnown, defaultWaitForHostStateTimeout, host) + waitForHostState(ctx, models.HostStatusKnown, utils_test.DefaultWaitForHostStateTimeout, host) if host.Bootstrap { - _, err = userBMClient.Installer.V2DeregisterHost(ctx, &installer.V2DeregisterHostParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2DeregisterHost(ctx, &installer.V2DeregisterHostParams{ InfraEnvID: host.InfraEnvID, HostID: *host.ID, }) Expect(err).NotTo(HaveOccurred()) } } - h := registerNode(ctx, *infraEnvID, "hostname", defaultCIDRv4) - _, err = userBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ + h := utils_test.TestContext.RegisterNode(ctx, *infraEnvID, "hostname", defaultCIDRv4) + _, err = utils_test.TestContext.UserBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ HostUpdateParams: &models.HostUpdateParams{ HostRole: swag.String(string(models.HostRoleMaster)), }, @@ -2772,7 +2773,7 @@ name: exampleNamespace2` Expect(err).NotTo(HaveOccurred()) By("check for a new bootstrap") - waitForClusterState(ctx, clusterID, models.ClusterStatusReady, defaultWaitForClusterStateTimeout, + waitForClusterState(ctx, clusterID, models.ClusterStatusReady, utils_test.DefaultWaitForClusterStateTimeout, clusterReadyStateInfo) c = installCluster(clusterID) for _, h := range c.Hosts { @@ -2784,20 +2785,20 @@ name: exampleNamespace2` } }) It("reset ready/installing cluster", func() { - _, err := userBMClient.Installer.V2ResetCluster(ctx, &installer.V2ResetClusterParams{ClusterID: clusterID}) + _, err := utils_test.TestContext.UserBMClient.Installer.V2ResetCluster(ctx, &installer.V2ResetClusterParams{ClusterID: clusterID}) Expect(err).To(BeAssignableToTypeOf(installer.NewV2ResetClusterConflict())) c := installCluster(clusterID) - waitForHostState(ctx, models.HostStatusInstalling, defaultWaitForHostStateTimeout, c.Hosts...) - _, err = userBMClient.Installer.V2ResetCluster(ctx, &installer.V2ResetClusterParams{ClusterID: clusterID}) + waitForHostState(ctx, models.HostStatusInstalling, utils_test.DefaultWaitForHostStateTimeout, c.Hosts...) + _, err = utils_test.TestContext.UserBMClient.Installer.V2ResetCluster(ctx, &installer.V2ResetClusterParams{ClusterID: clusterID}) Expect(err).NotTo(HaveOccurred()) - rep, err := userBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) + rep, err := utils_test.TestContext.UserBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) Expect(err).NotTo(HaveOccurred()) c = rep.GetPayload() for _, host := range c.Hosts { if enableReset { Expect(swag.StringValue(host.Status)).Should(Equal(models.HostStatusResetting)) } else { - waitForHostState(ctx, models.HostStatusResettingPendingUserAction, defaultWaitForHostStateTimeout, host) + waitForHostState(ctx, models.HostStatusResettingPendingUserAction, utils_test.DefaultWaitForHostStateTimeout, host) } } }) @@ -2806,52 +2807,52 @@ name: exampleNamespace2` Expect(swag.StringValue(c.Status)).Should(Equal(models.ClusterStatusInstalling)) Expect(len(c.Hosts)).Should(Equal(5)) - updateProgress(*c.Hosts[0].ID, c.Hosts[0].InfraEnvID, "Installing") - updateProgress(*c.Hosts[1].ID, c.Hosts[1].InfraEnvID, "Done") + utils_test.TestContext.UpdateProgress(*c.Hosts[0].ID, c.Hosts[0].InfraEnvID, "Installing") + utils_test.TestContext.UpdateProgress(*c.Hosts[1].ID, c.Hosts[1].InfraEnvID, "Done") - h1 := getHostV2(*infraEnvID, *c.Hosts[0].ID) + h1 := utils_test.TestContext.GetHostV2(*infraEnvID, *c.Hosts[0].ID) Expect(*h1.Status).Should(Equal(models.HostStatusInstallingInProgress)) - h2 := getHostV2(*infraEnvID, *c.Hosts[1].ID) + h2 := utils_test.TestContext.GetHostV2(*infraEnvID, *c.Hosts[1].ID) Expect(*h2.Status).Should(Equal(models.HostStatusInstalled)) - _, err := userBMClient.Installer.V2ResetCluster(ctx, &installer.V2ResetClusterParams{ClusterID: clusterID}) + _, err := utils_test.TestContext.UserBMClient.Installer.V2ResetCluster(ctx, &installer.V2ResetClusterParams{ClusterID: clusterID}) Expect(err).NotTo(HaveOccurred()) - waitForHostState(ctx, models.HostStatusResettingPendingUserAction, defaultWaitForClusterStateTimeout, c.Hosts...) + waitForHostState(ctx, models.HostStatusResettingPendingUserAction, utils_test.DefaultWaitForClusterStateTimeout, c.Hosts...) }) It("reset cluster - wrong boot order", func() { c := installCluster(clusterID) Expect(len(c.Hosts)).Should(Equal(5)) - updateProgress(*c.Hosts[0].ID, c.Hosts[0].InfraEnvID, models.HostStageRebooting) - _, err := userBMClient.Installer.V2CancelInstallation(ctx, &installer.V2CancelInstallationParams{ClusterID: clusterID}) + utils_test.TestContext.UpdateProgress(*c.Hosts[0].ID, c.Hosts[0].InfraEnvID, models.HostStageRebooting) + _, err := utils_test.TestContext.UserBMClient.Installer.V2CancelInstallation(ctx, &installer.V2CancelInstallationParams{ClusterID: clusterID}) Expect(err).NotTo(HaveOccurred()) - _, err = userBMClient.Installer.V2ResetCluster(ctx, &installer.V2ResetClusterParams{ClusterID: clusterID}) + _, err = utils_test.TestContext.UserBMClient.Installer.V2ResetCluster(ctx, &installer.V2ResetClusterParams{ClusterID: clusterID}) Expect(err).NotTo(HaveOccurred()) - waitForClusterState(ctx, clusterID, models.ClusterStatusInsufficient, defaultWaitForClusterStateTimeout, clusterResetStateInfo) + waitForClusterState(ctx, clusterID, models.ClusterStatusInsufficient, utils_test.DefaultWaitForClusterStateTimeout, clusterResetStateInfo) for _, host := range c.Hosts { - waitForHostState(ctx, models.HostStatusResettingPendingUserAction, defaultWaitForHostStateTimeout, host) - _, err = agentBMClient.Installer.V2RegisterHost(ctx, &installer.V2RegisterHostParams{ + waitForHostState(ctx, models.HostStatusResettingPendingUserAction, utils_test.DefaultWaitForHostStateTimeout, host) + _, err = utils_test.TestContext.AgentBMClient.Installer.V2RegisterHost(ctx, &installer.V2RegisterHostParams{ InfraEnvID: *infraEnvID, NewHostParams: &models.HostCreateParams{ HostID: host.ID, }, }) Expect(err).ShouldNot(HaveOccurred()) - waitForHostState(ctx, models.HostStatusDiscovering, defaultWaitForHostStateTimeout, host) + waitForHostState(ctx, models.HostStatusDiscovering, utils_test.DefaultWaitForHostStateTimeout, host) } }) It("reset installation - cluster in finalizing status", func() { setClusterAsFinalizing(ctx, clusterID) - _, err := userBMClient.Installer.V2ResetCluster(ctx, &installer.V2ResetClusterParams{ClusterID: clusterID}) + _, err := utils_test.TestContext.UserBMClient.Installer.V2ResetCluster(ctx, &installer.V2ResetClusterParams{ClusterID: clusterID}) Expect(err).ShouldNot(HaveOccurred()) - rep, err := userBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) + rep, err := utils_test.TestContext.UserBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) Expect(err).NotTo(HaveOccurred()) c := rep.GetPayload() Expect(c).NotTo(BeNil()) - waitForHostState(ctx, models.HostStatusResettingPendingUserAction, defaultWaitForHostStateTimeout, c.Hosts...) + waitForHostState(ctx, models.HostStatusResettingPendingUserAction, utils_test.DefaultWaitForHostStateTimeout, c.Hosts...) }) It("reset cluster doesn't delete user generated manifests", func() { @@ -2871,7 +2872,7 @@ spec: ManifestSource: constants.ManifestSourceUserSupplied, } // All manifests created via the API are considered to be "user generated" - response, err := userBMClient.Manifests.V2CreateClusterManifest(ctx, &manifests.V2CreateClusterManifestParams{ + response, err := utils_test.TestContext.UserBMClient.Manifests.V2CreateClusterManifest(ctx, &manifests.V2CreateClusterManifestParams{ ClusterID: clusterID, CreateManifestParams: &models.CreateManifestParams{ Content: &base64Content, @@ -2886,20 +2887,20 @@ spec: Expect(swag.StringValue(c.Status)).Should(Equal(models.ClusterStatusInstalling)) Expect(len(c.Hosts)).Should(Equal(5)) - updateProgress(*c.Hosts[0].ID, c.Hosts[0].InfraEnvID, "Installing") - updateProgress(*c.Hosts[1].ID, c.Hosts[1].InfraEnvID, "Done") + utils_test.TestContext.UpdateProgress(*c.Hosts[0].ID, c.Hosts[0].InfraEnvID, "Installing") + utils_test.TestContext.UpdateProgress(*c.Hosts[1].ID, c.Hosts[1].InfraEnvID, "Done") - h1 := getHostV2(*infraEnvID, *c.Hosts[0].ID) + h1 := utils_test.TestContext.GetHostV2(*infraEnvID, *c.Hosts[0].ID) Expect(*h1.Status).Should(Equal(models.HostStatusInstallingInProgress)) - h2 := getHostV2(*infraEnvID, *c.Hosts[1].ID) + h2 := utils_test.TestContext.GetHostV2(*infraEnvID, *c.Hosts[1].ID) Expect(*h2.Status).Should(Equal(models.HostStatusInstalled)) - _, err = userBMClient.Installer.V2ResetCluster(ctx, &installer.V2ResetClusterParams{ClusterID: clusterID}) + _, err = utils_test.TestContext.UserBMClient.Installer.V2ResetCluster(ctx, &installer.V2ResetClusterParams{ClusterID: clusterID}) Expect(err).NotTo(HaveOccurred()) - waitForHostState(ctx, models.HostStatusResettingPendingUserAction, defaultWaitForClusterStateTimeout, c.Hosts...) + waitForHostState(ctx, models.HostStatusResettingPendingUserAction, utils_test.DefaultWaitForClusterStateTimeout, c.Hosts...) // verify manifest remains after cluster reset - response2, err := userBMClient.Manifests.V2ListClusterManifests(ctx, &manifests.V2ListClusterManifestsParams{ + response2, err := utils_test.TestContext.UserBMClient.Manifests.V2ListClusterManifests(ctx, &manifests.V2ListClusterManifestsParams{ ClusterID: *cluster.ID, }) Expect(err).ShouldNot(HaveOccurred()) @@ -2907,9 +2908,9 @@ spec: }) AfterEach(func() { - reply, err := userBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) + reply, err := utils_test.TestContext.UserBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) Expect(err).NotTo(HaveOccurred()) - Expect(reply.GetPayload().OpenshiftClusterID).To(Equal(*strToUUID(""))) + Expect(reply.GetPayload().OpenshiftClusterID).To(Equal(*utils_test.StrToUUID(""))) }) }) }) @@ -2917,7 +2918,7 @@ spec: Context("NoProxy with Wildcard", func() { It("OpenshiftVersion does support NoProxy wildcard", func() { - _, err := userBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ + _, err := utils_test.TestContext.UserBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ NewClusterParams: &models.ClusterCreateParams{ BaseDNSDomain: "example.com", ClusterNetworks: []*models.ClusterNetwork{{Cidr: models.Subnet(clusterCIDR), HostPrefix: 23}}, @@ -2926,7 +2927,7 @@ spec: OpenshiftVersion: &openshiftVersion, NoProxy: swag.String("*"), PullSecret: swag.String(pullSecret), - SSHPublicKey: sshPublicKey, + SSHPublicKey: utils_test.SshPublicKey, VipDhcpAllocation: swag.Bool(false), NetworkType: swag.String("OVNKubernetes"), HighAvailabilityMode: swag.String(models.ClusterHighAvailabilityModeNone), @@ -2938,21 +2939,21 @@ spec: It("install cluster requirement", func() { clusterID := *cluster.ID - waitForClusterState(ctx, clusterID, models.ClusterStatusPendingForInput, defaultWaitForClusterStateTimeout, + waitForClusterState(ctx, clusterID, models.ClusterStatusPendingForInput, utils_test.DefaultWaitForClusterStateTimeout, clusterPendingForInputStateInfo) checkUpdateAtWhileStatic(ctx, clusterID) - hosts, ips := register3nodes(ctx, clusterID, *infraEnvID, defaultCIDRv4) + hosts, ips := utils_test.TestContext.Register3nodes(ctx, clusterID, *infraEnvID, defaultCIDRv4) newIPs := hostutil.GenerateIPv4Addresses(2, ips[2]) - h4 := ®isterHost(*infraEnvID).Host - h5 := registerNode(ctx, *infraEnvID, "h5", newIPs[0]) + h4 := &utils_test.TestContext.RegisterHost(*infraEnvID).Host + h5 := utils_test.TestContext.RegisterNode(ctx, *infraEnvID, "h5", newIPs[0]) apiVip := "1.2.3.5" ingressVip := "1.2.3.6" By("Two hosts are masters, one host is without role -> state must be insufficient") - _, err := userBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ + _, err := utils_test.TestContext.UserBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ HostUpdateParams: &models.HostUpdateParams{ HostRole: swag.String(string(models.HostRoleMaster)), }, @@ -2960,7 +2961,7 @@ spec: InfraEnvID: *infraEnvID, }) Expect(err).NotTo(HaveOccurred()) - _, err = userBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ HostUpdateParams: &models.HostUpdateParams{ HostRole: swag.String(string(models.HostRoleMaster)), }, @@ -2969,7 +2970,7 @@ spec: }) Expect(err).NotTo(HaveOccurred()) - _, err = userBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ ClusterUpdateParams: &models.V2ClusterUpdateParams{ APIVips: []*models.APIVip{{IP: models.IP(apiVip), ClusterID: clusterID}}, IngressVips: []*models.IngressVip{{IP: models.IP(ingressVip), ClusterID: clusterID}}, @@ -2977,10 +2978,10 @@ spec: ClusterID: clusterID, }) Expect(err).NotTo(HaveOccurred()) - waitForClusterState(ctx, clusterID, models.ClusterStatusInsufficient, defaultWaitForClusterStateTimeout, clusterInsufficientStateInfo) + waitForClusterState(ctx, clusterID, models.ClusterStatusInsufficient, utils_test.DefaultWaitForClusterStateTimeout, clusterInsufficientStateInfo) // add host and 2 workers (h4 has no inventory) --> insufficient state due to single worker - _, err = userBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ HostUpdateParams: &models.HostUpdateParams{ HostRole: swag.String(string(models.HostRoleMaster)), }, @@ -2988,7 +2989,7 @@ spec: InfraEnvID: *infraEnvID, }) Expect(err).NotTo(HaveOccurred()) - _, err = userBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ HostUpdateParams: &models.HostUpdateParams{ HostRole: swag.String(string(models.HostRoleWorker)), }, @@ -2996,7 +2997,7 @@ spec: InfraEnvID: *infraEnvID, }) Expect(err).NotTo(HaveOccurred()) - _, err = userBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ HostUpdateParams: &models.HostUpdateParams{ HostRole: swag.String(string(models.HostRoleWorker)), }, @@ -3004,13 +3005,13 @@ spec: InfraEnvID: *infraEnvID, }) Expect(err).NotTo(HaveOccurred()) - waitForClusterState(ctx, clusterID, models.ClusterStatusInsufficient, defaultWaitForClusterStateTimeout, clusterInsufficientStateInfo) + waitForClusterState(ctx, clusterID, models.ClusterStatusInsufficient, utils_test.DefaultWaitForClusterStateTimeout, clusterInsufficientStateInfo) // update host4 again (now it has inventory) -> state must be ready - generateEssentialHostSteps(ctx, h4, "h4", newIPs[1]) + utils_test.TestContext.GenerateEssentialHostSteps(ctx, h4, "h4", newIPs[1]) // update role for the host4 to master -> state must be ready generateFullMeshConnectivity(ctx, ips[0], hosts[0], hosts[1], hosts[2], h4, h5) - _, err = userBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ HostUpdateParams: &models.HostUpdateParams{ HostRole: swag.String(string(models.HostRoleWorker)), }, @@ -3025,16 +3026,16 @@ spec: clusterID := *cluster.ID waitForClusterState(ctx, clusterID, models.ClusterStatusPendingForInput, 60*time.Second, clusterPendingForInputStateInfo) ips := hostutil.GenerateIPv4Addresses(6, defaultCIDRv4) - wh1 := registerNode(ctx, *infraEnvID, "wh1", ips[0]) - wh2 := registerNode(ctx, *infraEnvID, "wh2", ips[1]) - wh3 := registerNode(ctx, *infraEnvID, "wh3", ips[2]) + wh1 := utils_test.TestContext.RegisterNode(ctx, *infraEnvID, "wh1", ips[0]) + wh2 := utils_test.TestContext.RegisterNode(ctx, *infraEnvID, "wh2", ips[1]) + wh3 := utils_test.TestContext.RegisterNode(ctx, *infraEnvID, "wh3", ips[2]) generateFullMeshConnectivity(ctx, ips[0], wh1, wh2, wh3) apiVip := "1.2.3.5" ingressVip := "1.2.3.6" By("All hosts are workers -> state must be insufficient") - _, err := userBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ + _, err := utils_test.TestContext.UserBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ HostUpdateParams: &models.HostUpdateParams{ HostRole: swag.String(string(models.HostRoleWorker)), }, @@ -3042,7 +3043,7 @@ spec: InfraEnvID: *infraEnvID, }) Expect(err).NotTo(HaveOccurred()) - _, err = userBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ HostUpdateParams: &models.HostUpdateParams{ HostRole: swag.String(string(models.HostRoleWorker)), }, @@ -3050,7 +3051,7 @@ spec: InfraEnvID: *infraEnvID, }) Expect(err).NotTo(HaveOccurred()) - _, err = userBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ HostUpdateParams: &models.HostUpdateParams{ HostRole: swag.String(string(models.HostRoleWorker)), }, @@ -3058,7 +3059,7 @@ spec: InfraEnvID: *infraEnvID, }) Expect(err).NotTo(HaveOccurred()) - _, err = userBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ ClusterUpdateParams: &models.V2ClusterUpdateParams{ VipDhcpAllocation: swag.Bool(false), APIVips: []*models.APIVip{{IP: models.IP(apiVip), ClusterID: clusterID}}, @@ -3067,8 +3068,8 @@ spec: ClusterID: clusterID, }) Expect(err).NotTo(HaveOccurred()) - waitForClusterState(ctx, clusterID, models.ClusterStatusInsufficient, defaultWaitForClusterStateTimeout, clusterInsufficientStateInfo) - clusterReply, getErr := userBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ + waitForClusterState(ctx, clusterID, models.ClusterStatusInsufficient, utils_test.DefaultWaitForClusterStateTimeout, clusterInsufficientStateInfo) + clusterReply, getErr := utils_test.TestContext.UserBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ ClusterID: clusterID, }) Expect(getErr).ToNot(HaveOccurred()) @@ -3078,12 +3079,12 @@ spec: Expect(len(clusterReply.Payload.HostNetworks)).To(Equal(1)) Expect(clusterReply.Payload.HostNetworks[0].Cidr).To(Equal("1.2.3.0/24")) - mh1 := registerNode(ctx, *infraEnvID, "mh1", ips[3]) - generateFAPostStepReply(ctx, mh1, validFreeAddresses) - mh2 := registerNode(ctx, *infraEnvID, "mh2", ips[4]) - mh3 := registerNode(ctx, *infraEnvID, "mh3", ips[5]) + mh1 := utils_test.TestContext.RegisterNode(ctx, *infraEnvID, "mh1", ips[3]) + utils_test.TestContext.GenerateFAPostStepReply(ctx, mh1, validFreeAddresses) + mh2 := utils_test.TestContext.RegisterNode(ctx, *infraEnvID, "mh2", ips[4]) + mh3 := utils_test.TestContext.RegisterNode(ctx, *infraEnvID, "mh3", ips[5]) generateFullMeshConnectivity(ctx, ips[0], mh1, mh2, mh3, wh1, wh2, wh3) - clusterReply, _ = userBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ + clusterReply, _ = utils_test.TestContext.UserBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ ClusterID: clusterID, }) @@ -3100,7 +3101,7 @@ spec: Expect(*mh3.ID).To(BeElementOf(hids...)) By("Only two masters -> state must be insufficient") - _, err = userBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ HostUpdateParams: &models.HostUpdateParams{ HostRole: swag.String(string(models.HostRoleMaster)), }, @@ -3108,7 +3109,7 @@ spec: InfraEnvID: *infraEnvID, }) Expect(err).NotTo(HaveOccurred()) - _, err = userBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ HostUpdateParams: &models.HostUpdateParams{ HostRole: swag.String(string(models.HostRoleMaster)), }, @@ -3116,7 +3117,7 @@ spec: InfraEnvID: *infraEnvID, }) Expect(err).NotTo(HaveOccurred()) - _, err = userBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ HostUpdateParams: &models.HostUpdateParams{ HostRole: swag.String(string(models.HostRoleWorker)), }, @@ -3124,10 +3125,10 @@ spec: InfraEnvID: *infraEnvID, }) Expect(err).NotTo(HaveOccurred()) - waitForClusterState(ctx, clusterID, models.ClusterStatusInsufficient, defaultWaitForClusterStateTimeout, clusterInsufficientStateInfo) + waitForClusterState(ctx, clusterID, models.ClusterStatusInsufficient, utils_test.DefaultWaitForClusterStateTimeout, clusterInsufficientStateInfo) By("Three master hosts -> state must be ready") - _, err = userBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ HostUpdateParams: &models.HostUpdateParams{ HostRole: swag.String(string(models.HostRoleMaster)), }, @@ -3136,11 +3137,11 @@ spec: }) Expect(err).NotTo(HaveOccurred()) - waitForHostState(ctx, models.HostStatusKnown, defaultWaitForHostStateTimeout, mh3) - waitForClusterState(ctx, clusterID, models.ClusterStatusReady, defaultWaitForClusterStateTimeout, clusterReadyStateInfo) + waitForHostState(ctx, models.HostStatusKnown, utils_test.DefaultWaitForHostStateTimeout, mh3) + waitForClusterState(ctx, clusterID, models.ClusterStatusReady, utils_test.DefaultWaitForClusterStateTimeout, clusterReadyStateInfo) By("Back to two master hosts -> state must be insufficient") - _, err = userBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ HostUpdateParams: &models.HostUpdateParams{ HostRole: swag.String(string(models.HostRoleWorker)), }, @@ -3149,12 +3150,12 @@ spec: }) Expect(err).NotTo(HaveOccurred()) - cluster = getCluster(clusterID) + cluster = utils_test.TestContext.GetCluster(clusterID) Expect(swag.StringValue(cluster.Status)).Should(Equal(models.ClusterStatusInsufficient)) Expect(swag.StringValue(cluster.StatusInfo)).Should(Equal(clusterInsufficientStateInfo)) By("Three master hosts -> state must be ready") - _, err = userBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ HostUpdateParams: &models.HostUpdateParams{ HostRole: swag.String(string(models.HostRoleMaster)), }, @@ -3163,11 +3164,11 @@ spec: }) Expect(err).NotTo(HaveOccurred()) - waitForHostState(ctx, models.HostStatusKnown, defaultWaitForHostStateTimeout, mh3) + waitForHostState(ctx, models.HostStatusKnown, utils_test.DefaultWaitForHostStateTimeout, mh3) waitForClusterState(ctx, clusterID, "ready", 60*time.Second, clusterReadyStateInfo) By("Back to two master hosts -> state must be insufficient") - _, err = userBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ HostUpdateParams: &models.HostUpdateParams{ HostRole: swag.String(string(models.HostRoleWorker)), }, @@ -3176,14 +3177,14 @@ spec: }) Expect(err).NotTo(HaveOccurred()) - cluster = getCluster(clusterID) + cluster = utils_test.TestContext.GetCluster(clusterID) Expect(swag.StringValue(cluster.Status)).Should(Equal(models.ClusterStatusInsufficient)) Expect(swag.StringValue(cluster.StatusInfo)).Should(Equal(clusterInsufficientStateInfo)) - _, err = userBMClient.Installer.V2DeregisterCluster(ctx, &installer.V2DeregisterClusterParams{ClusterID: clusterID}) + _, err = utils_test.TestContext.UserBMClient.Installer.V2DeregisterCluster(ctx, &installer.V2DeregisterClusterParams{ClusterID: clusterID}) Expect(err).NotTo(HaveOccurred()) - _, err = userBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) + _, err = utils_test.TestContext.UserBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) Expect(err).To(BeAssignableToTypeOf(installer.NewV2GetClusterNotFound())) }) @@ -3205,11 +3206,11 @@ spec: SystemVendor: &models.SystemVendor{Manufacturer: "manu", ProductName: "prod", SerialNumber: "3534"}, Routes: common.TestDefaultRouteConfiguration, } - h1 := ®isterHost(*infraEnvID).Host - generateEssentialHostStepsWithInventory(ctx, h1, "h1", hwInfo) + h1 := &utils_test.TestContext.RegisterHost(*infraEnvID).Host + utils_test.TestContext.GenerateEssentialHostStepsWithInventory(ctx, h1, "h1", hwInfo) apiVip := "1.2.3.8" ingressVip := "1.2.3.9" - _, err := userBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ + _, err := utils_test.TestContext.UserBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ ClusterUpdateParams: &models.V2ClusterUpdateParams{ VipDhcpAllocation: swag.Bool(false), APIVips: []*models.APIVip{{IP: models.IP(apiVip), ClusterID: clusterID}}, @@ -3221,14 +3222,14 @@ spec: By("Register 3 more hosts with valid hw info") ips := hostutil.GenerateIPv4Addresses(3, defaultCIDRv4) - h2 := registerNode(ctx, *infraEnvID, "h2", ips[0]) - h3 := registerNode(ctx, *infraEnvID, "h3", ips[1]) - h4 := registerNode(ctx, *infraEnvID, "h4", ips[2]) + h2 := utils_test.TestContext.RegisterNode(ctx, *infraEnvID, "h2", ips[0]) + h3 := utils_test.TestContext.RegisterNode(ctx, *infraEnvID, "h3", ips[1]) + h4 := utils_test.TestContext.RegisterNode(ctx, *infraEnvID, "h4", ips[2]) generateFullMeshConnectivity(ctx, ips[0], h1, h2, h3, h4) - waitForHostState(ctx, models.HostStatusKnown, defaultWaitForClusterStateTimeout, h1) + waitForHostState(ctx, models.HostStatusKnown, utils_test.DefaultWaitForClusterStateTimeout, h1) - _, err = userBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ HostUpdateParams: &models.HostUpdateParams{ HostRole: swag.String(string(models.HostRoleMaster)), }, @@ -3236,7 +3237,7 @@ spec: InfraEnvID: *infraEnvID, }) Expect(err).NotTo(HaveOccurred()) - _, err = userBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ HostUpdateParams: &models.HostUpdateParams{ HostRole: swag.String(string(models.HostRoleMaster)), }, @@ -3244,7 +3245,7 @@ spec: InfraEnvID: *infraEnvID, }) Expect(err).NotTo(HaveOccurred()) - _, err = userBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ HostUpdateParams: &models.HostUpdateParams{ HostRole: swag.String(string(models.HostRoleMaster)), }, @@ -3252,7 +3253,7 @@ spec: InfraEnvID: *infraEnvID, }) Expect(err).NotTo(HaveOccurred()) - _, err = userBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ HostUpdateParams: &models.HostUpdateParams{ HostRole: swag.String(string(models.HostRoleWorker)), }, @@ -3262,7 +3263,7 @@ spec: Expect(err).NotTo(HaveOccurred()) By("validate that host 1 is insufficient") - waitForHostState(ctx, models.HostStatusInsufficient, defaultWaitForClusterStateTimeout, h1) + waitForHostState(ctx, models.HostStatusInsufficient, utils_test.DefaultWaitForClusterStateTimeout, h1) }) It("install_cluster with edge worker", func() { @@ -3293,17 +3294,17 @@ spec: } By("Register edge worker with 16Gb disk") - h1 := ®isterHost(*infraEnvID).Host - generateEssentialHostStepsWithInventory(ctx, h1, "h1", hwInfo) + h1 := &utils_test.TestContext.RegisterHost(*infraEnvID).Host + utils_test.TestContext.GenerateEssentialHostStepsWithInventory(ctx, h1, "h1", hwInfo) By("Register rergular worker with 16Gb disk") hwInfo.SystemVendor.ProductName = "ding dong soc" - h5 := ®isterHost(*infraEnvID).Host - generateEssentialHostStepsWithInventory(ctx, h5, "h5", hwInfo) + h5 := &utils_test.TestContext.RegisterHost(*infraEnvID).Host + utils_test.TestContext.GenerateEssentialHostStepsWithInventory(ctx, h5, "h5", hwInfo) apiVip := "1.2.3.8" ingressVip := "1.2.3.9" - _, err := userBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ + _, err := utils_test.TestContext.UserBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ ClusterUpdateParams: &models.V2ClusterUpdateParams{ VipDhcpAllocation: swag.Bool(false), APIVips: []*models.APIVip{{IP: models.IP(apiVip), ClusterID: clusterID}}, @@ -3315,13 +3316,13 @@ spec: By("Register 3 more hosts with valid hw info") ips := hostutil.GenerateIPv4Addresses(3, defaultCIDRv4) - h2 := registerNode(ctx, *infraEnvID, "h2", ips[0]) - h3 := registerNode(ctx, *infraEnvID, "h3", ips[1]) - h4 := registerNode(ctx, *infraEnvID, "h4", ips[2]) + h2 := utils_test.TestContext.RegisterNode(ctx, *infraEnvID, "h2", ips[0]) + h3 := utils_test.TestContext.RegisterNode(ctx, *infraEnvID, "h3", ips[1]) + h4 := utils_test.TestContext.RegisterNode(ctx, *infraEnvID, "h4", ips[2]) generateFullMeshConnectivity(ctx, ips[0], h1, h2, h3, h4, h5) - _, err = userBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ HostUpdateParams: &models.HostUpdateParams{ HostRole: swag.String(string(models.HostRoleWorker)), }, @@ -3330,7 +3331,7 @@ spec: }) Expect(err).NotTo(HaveOccurred()) - _, err = userBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ HostUpdateParams: &models.HostUpdateParams{ HostRole: swag.String(string(models.HostRoleWorker)), }, @@ -3339,7 +3340,7 @@ spec: }) Expect(err).NotTo(HaveOccurred()) - _, err = userBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ HostUpdateParams: &models.HostUpdateParams{ HostRole: swag.String(string(models.HostRoleMaster)), }, @@ -3348,7 +3349,7 @@ spec: }) Expect(err).NotTo(HaveOccurred()) - _, err = userBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ HostUpdateParams: &models.HostUpdateParams{ HostRole: swag.String(string(models.HostRoleMaster)), }, @@ -3357,7 +3358,7 @@ spec: }) Expect(err).NotTo(HaveOccurred()) - _, err = userBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ HostUpdateParams: &models.HostUpdateParams{ HostRole: swag.String(string(models.HostRoleMaster)), }, @@ -3367,19 +3368,19 @@ spec: Expect(err).NotTo(HaveOccurred()) By("validate that host 5 that is not edge worker is insufficient") - waitForHostState(ctx, models.HostStatusInsufficient, defaultWaitForClusterStateTimeout, h5) - h5 = getHostV2(*infraEnvID, *h5.ID) + waitForHostState(ctx, models.HostStatusInsufficient, utils_test.DefaultWaitForClusterStateTimeout, h5) + h5 = utils_test.TestContext.GetHostV2(*infraEnvID, *h5.ID) Expect(h5.ValidationsInfo).Should(ContainSubstring("No eligible disks were found")) By("validate that edge worker is passing the validation") - waitForHostState(ctx, models.HostStatusKnown, defaultWaitForClusterStateTimeout, h1) + waitForHostState(ctx, models.HostStatusKnown, utils_test.DefaultWaitForClusterStateTimeout, h1) }) It("unique_hostname_validation", func() { clusterID := *cluster.ID //define h1 as known master - hosts, ips := register3nodes(ctx, clusterID, *infraEnvID, defaultCIDRv4) - _, err := userBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ + hosts, ips := utils_test.TestContext.Register3nodes(ctx, clusterID, *infraEnvID, defaultCIDRv4) + _, err := utils_test.TestContext.UserBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ HostUpdateParams: &models.HostUpdateParams{ HostRole: swag.String(string(models.HostRoleMaster)), }, @@ -3388,9 +3389,9 @@ spec: }) Expect(err).NotTo(HaveOccurred()) - h1 := getHostV2(*infraEnvID, *hosts[0].ID) - h2 := getHostV2(*infraEnvID, *hosts[1].ID) - h3 := getHostV2(*infraEnvID, *hosts[2].ID) + h1 := utils_test.TestContext.GetHostV2(*infraEnvID, *hosts[0].ID) + h2 := utils_test.TestContext.GetHostV2(*infraEnvID, *hosts[1].ID) + h3 := utils_test.TestContext.GetHostV2(*infraEnvID, *hosts[2].ID) generateFullMeshConnectivity(ctx, ips[0], h1, h2, h3) waitForHostState(ctx, "known", 60*time.Second, h1) Expect(h1.RequestedHostname).Should(Equal("h1")) @@ -3398,17 +3399,17 @@ spec: By("Registering host with same hostname") newIPs := hostutil.GenerateIPv4Addresses(2, ips[2]) //after name clash --> h1 and h4 are insufficient - h4 := registerNode(ctx, *infraEnvID, "h1", newIPs[0]) - h4 = getHostV2(*infraEnvID, *h4.ID) + h4 := utils_test.TestContext.RegisterNode(ctx, *infraEnvID, "h1", newIPs[0]) + h4 = utils_test.TestContext.GetHostV2(*infraEnvID, *h4.ID) generateFullMeshConnectivity(ctx, ips[0], h1, h2, h3, h4) waitForHostState(ctx, "insufficient", 60*time.Second, h1) Expect(h4.RequestedHostname).Should(Equal("h1")) - h1 = getHostV2(*infraEnvID, *h1.ID) + h1 = utils_test.TestContext.GetHostV2(*infraEnvID, *h1.ID) Expect(*h1.Status).Should(Equal("insufficient")) By("Verifying install command") //install cluster should fail because only 2 hosts are known - _, err = userBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ HostUpdateParams: &models.HostUpdateParams{ HostRole: swag.String(string(models.HostRoleMaster)), }, @@ -3416,7 +3417,7 @@ spec: InfraEnvID: *infraEnvID, }) Expect(err).NotTo(HaveOccurred()) - _, err = userBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ HostUpdateParams: &models.HostUpdateParams{ HostRole: swag.String(string(models.HostRoleMaster)), }, @@ -3424,7 +3425,7 @@ spec: InfraEnvID: *infraEnvID, }) Expect(err).NotTo(HaveOccurred()) - _, err = userBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ HostUpdateParams: &models.HostUpdateParams{ HostRole: swag.String(string(models.HostRoleMaster)), }, @@ -3432,7 +3433,7 @@ spec: InfraEnvID: *infraEnvID, }) Expect(err).NotTo(HaveOccurred()) - _, err = userBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ HostUpdateParams: &models.HostUpdateParams{ HostRole: swag.String(string(models.HostRoleWorker)), }, @@ -3441,14 +3442,14 @@ spec: }) Expect(err).NotTo(HaveOccurred()) - _, err = userBMClient.Installer.V2InstallCluster(ctx, &installer.V2InstallClusterParams{ClusterID: clusterID}) + _, err = utils_test.TestContext.UserBMClient.Installer.V2InstallCluster(ctx, &installer.V2InstallClusterParams{ClusterID: clusterID}) Expect(err).Should(HaveOccurred()) By("Registering one more host with same hostname") - disabledHost := registerNode(ctx, *infraEnvID, "h1", newIPs[1]) - disabledHost = getHostV2(*infraEnvID, *disabledHost.ID) - waitForHostState(ctx, models.HostStatusInsufficient, defaultWaitForHostStateTimeout, disabledHost) - _, err = userBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ + disabledHost := utils_test.TestContext.RegisterNode(ctx, *infraEnvID, "h1", newIPs[1]) + disabledHost = utils_test.TestContext.GetHostV2(*infraEnvID, *disabledHost.ID) + waitForHostState(ctx, models.HostStatusInsufficient, utils_test.DefaultWaitForHostStateTimeout, disabledHost) + _, err = utils_test.TestContext.UserBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ HostUpdateParams: &models.HostUpdateParams{ HostRole: swag.String(string(models.HostRoleWorker)), }, @@ -3458,22 +3459,22 @@ spec: Expect(err).NotTo(HaveOccurred()) By("Changing hostname, verify host is known now") - generateEssentialHostSteps(ctx, h4, "h4", newIPs[0]) - waitForHostState(ctx, models.HostStatusKnown, defaultWaitForHostStateTimeout, h4) - h4 = getHostV2(*infraEnvID, *h4.ID) + utils_test.TestContext.GenerateEssentialHostSteps(ctx, h4, "h4", newIPs[0]) + waitForHostState(ctx, models.HostStatusKnown, utils_test.DefaultWaitForHostStateTimeout, h4) + h4 = utils_test.TestContext.GetHostV2(*infraEnvID, *h4.ID) Expect(h4.RequestedHostname).Should(Equal("h4")) By("Remove host with the same hostname and verify h1 is known") - _, err = userBMClient.Installer.V2DeregisterHost(ctx, &installer.V2DeregisterHostParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2DeregisterHost(ctx, &installer.V2DeregisterHostParams{ InfraEnvID: disabledHost.InfraEnvID, HostID: *disabledHost.ID, }) Expect(err).NotTo(HaveOccurred()) - waitForHostState(ctx, models.HostStatusKnown, defaultWaitForHostStateTimeout, h1) + waitForHostState(ctx, models.HostStatusKnown, utils_test.DefaultWaitForHostStateTimeout, h1) By("add one more worker to get 2 functioning workers") - h5 := registerNode(ctx, *infraEnvID, "h5", newIPs[1]) - _, err = userBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ + h5 := utils_test.TestContext.RegisterNode(ctx, *infraEnvID, "h5", newIPs[1]) + _, err = utils_test.TestContext.UserBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ HostUpdateParams: &models.HostUpdateParams{ HostRole: swag.String(string(models.HostRoleWorker)), }, @@ -3487,7 +3488,7 @@ spec: waitForClusterState(ctx, clusterID, models.ClusterStatusReady, 60*time.Second, clusterReadyStateInfo) By("Verify install after disabling the host with same hostname") - _, err = userBMClient.Installer.V2InstallCluster(ctx, &installer.V2InstallClusterParams{ClusterID: clusterID}) + _, err = utils_test.TestContext.UserBMClient.Installer.V2InstallCluster(ctx, &installer.V2InstallClusterParams{ClusterID: clusterID}) Expect(err).NotTo(HaveOccurred()) }) @@ -3495,8 +3496,8 @@ spec: localhost := "localhost" clusterID := *cluster.ID - hosts, ips := register3nodes(ctx, clusterID, *infraEnvID, defaultCIDRv4) - _, err := userBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ + hosts, ips := utils_test.TestContext.Register3nodes(ctx, clusterID, *infraEnvID, defaultCIDRv4) + _, err := utils_test.TestContext.UserBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ HostUpdateParams: &models.HostUpdateParams{ HostRole: swag.String(string(models.HostRoleMaster)), }, @@ -3505,19 +3506,19 @@ spec: }) Expect(err).NotTo(HaveOccurred()) - h1 := getHostV2(*infraEnvID, *hosts[0].ID) + h1 := utils_test.TestContext.GetHostV2(*infraEnvID, *hosts[0].ID) waitForHostState(ctx, "known", 60*time.Second, h1) Expect(h1.RequestedHostname).Should(Equal("h1")) By("Changing hostname reply to localhost") - generateEssentialHostSteps(ctx, h1, localhost, ips[0]) + utils_test.TestContext.GenerateEssentialHostSteps(ctx, h1, localhost, ips[0]) waitForHostState(ctx, models.HostStatusInsufficient, 60*time.Second, h1) - h1Host := getHostV2(*infraEnvID, *h1.ID) + h1Host := utils_test.TestContext.GetHostV2(*infraEnvID, *h1.ID) Expect(h1Host.RequestedHostname).Should(Equal(localhost)) By("Setting hostname to valid name") hostname := "reqh0" - _, err = userBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ HostUpdateParams: &models.HostUpdateParams{ HostName: &hostname, }, @@ -3529,7 +3530,7 @@ spec: waitForHostState(ctx, models.HostStatusKnown, 60*time.Second, h1) By("Setting hostname to localhost should cause an API error") - _, err = userBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ HostUpdateParams: &models.HostUpdateParams{ HostName: &localhost, }, @@ -3566,8 +3567,8 @@ spec: It("set_requested_hostnames", func() { clusterID := *cluster.ID - hosts, ips := register3nodes(ctx, clusterID, *infraEnvID, defaultCIDRv4) - _, err := userBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ + hosts, ips := utils_test.TestContext.Register3nodes(ctx, clusterID, *infraEnvID, defaultCIDRv4) + _, err := utils_test.TestContext.UserBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ HostUpdateParams: &models.HostUpdateParams{ HostRole: swag.String(string(models.HostRoleMaster)), }, @@ -3575,7 +3576,7 @@ spec: InfraEnvID: *infraEnvID, }) Expect(err).NotTo(HaveOccurred()) - _, err = userBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ HostUpdateParams: &models.HostUpdateParams{ HostRole: swag.String(string(models.HostRoleMaster)), }, @@ -3583,7 +3584,7 @@ spec: InfraEnvID: *infraEnvID, }) Expect(err).NotTo(HaveOccurred()) - _, err = userBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ HostUpdateParams: &models.HostUpdateParams{ HostRole: swag.String(string(models.HostRoleMaster)), }, @@ -3592,13 +3593,13 @@ spec: }) Expect(err).NotTo(HaveOccurred()) - h1 := getHostV2(*infraEnvID, *hosts[0].ID) - h2 := getHostV2(*infraEnvID, *hosts[1].ID) - h3 := getHostV2(*infraEnvID, *hosts[2].ID) + h1 := utils_test.TestContext.GetHostV2(*infraEnvID, *hosts[0].ID) + h2 := utils_test.TestContext.GetHostV2(*infraEnvID, *hosts[1].ID) + h3 := utils_test.TestContext.GetHostV2(*infraEnvID, *hosts[2].ID) waitForHostState(ctx, models.HostStatusKnown, time.Minute, h1, h2, h3) // update requested hostnames hostname := "reqh0" - _, err = userBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ HostUpdateParams: &models.HostUpdateParams{ HostName: &hostname, }, @@ -3607,7 +3608,7 @@ spec: }) Expect(err).NotTo(HaveOccurred()) hostname = "reqh1" - _, err = userBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ HostUpdateParams: &models.HostUpdateParams{ HostName: &hostname, }, @@ -3617,9 +3618,9 @@ spec: Expect(err).NotTo(HaveOccurred()) // check hostnames were updated - h1 = getHostV2(*infraEnvID, *h1.ID) - h2 = getHostV2(*infraEnvID, *h2.ID) - h3 = getHostV2(*infraEnvID, *h3.ID) + h1 = utils_test.TestContext.GetHostV2(*infraEnvID, *h1.ID) + h2 = utils_test.TestContext.GetHostV2(*infraEnvID, *h2.ID) + h3 = utils_test.TestContext.GetHostV2(*infraEnvID, *h3.ID) Expect(h1.RequestedHostname).Should(Equal("reqh0")) Expect(h2.RequestedHostname).Should(Equal("reqh1")) Expect(*h1.Status).Should(Equal(models.HostStatusKnown)) @@ -3629,23 +3630,23 @@ spec: // register new host with the same name in inventory By("Registering new host with same hostname as in node's inventory") newIPs := hostutil.GenerateIPv4Addresses(2, ips[2]) - h4 := registerNode(ctx, *infraEnvID, "h3", newIPs[0]) + h4 := utils_test.TestContext.RegisterNode(ctx, *infraEnvID, "h3", newIPs[0]) generateFullMeshConnectivity(ctx, ips[0], h1, h2, h3, h4) - h4 = getHostV2(*infraEnvID, *h4.ID) + h4 = utils_test.TestContext.GetHostV2(*infraEnvID, *h4.ID) waitForHostState(ctx, models.HostStatusInsufficient, time.Minute, h3, h4) By("Check cluster install fails on validation") - _, err = userBMClient.Installer.V2InstallCluster(ctx, &installer.V2InstallClusterParams{ClusterID: clusterID}) + _, err = utils_test.TestContext.UserBMClient.Installer.V2InstallCluster(ctx, &installer.V2InstallClusterParams{ClusterID: clusterID}) Expect(err).Should(HaveOccurred()) By("Registering new host with same hostname as in node's requested_hostname") - h5 := registerNode(ctx, *infraEnvID, "reqh0", newIPs[1]) - h5 = getHostV2(*infraEnvID, *h5.ID) + h5 := utils_test.TestContext.RegisterNode(ctx, *infraEnvID, "reqh0", newIPs[1]) + h5 = utils_test.TestContext.GetHostV2(*infraEnvID, *h5.ID) generateFullMeshConnectivity(ctx, ips[0], h1, h2, h3, h4, h5) waitForHostState(ctx, models.HostStatusInsufficient, time.Minute, h1, h5) By("Change requested hostname of an insufficient node") - _, err = userBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ HostUpdateParams: &models.HostUpdateParams{ HostRole: swag.String(string(models.HostRoleWorker)), }, @@ -3653,7 +3654,7 @@ spec: InfraEnvID: *infraEnvID, }) Expect(err).NotTo(HaveOccurred()) - _, err = userBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ HostUpdateParams: &models.HostUpdateParams{ HostName: swag.String("reqh0new"), }, @@ -3664,7 +3665,7 @@ spec: waitForHostState(ctx, models.HostStatusKnown, time.Minute, h1, h5) By("change the requested hostname of the insufficient node") - _, err = userBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ HostUpdateParams: &models.HostUpdateParams{ HostRole: swag.String(string(models.HostRoleWorker)), }, @@ -3672,7 +3673,7 @@ spec: InfraEnvID: *infraEnvID, }) Expect(err).NotTo(HaveOccurred()) - _, err = userBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ HostUpdateParams: &models.HostUpdateParams{ HostName: swag.String("reqh2"), }, @@ -3683,7 +3684,7 @@ spec: waitForHostState(ctx, models.HostStatusKnown, time.Minute, h3) waitForClusterState(ctx, clusterID, models.ClusterStatusReady, time.Minute, clusterReadyStateInfo) - _, err = userBMClient.Installer.V2InstallCluster(ctx, &installer.V2InstallClusterParams{ClusterID: clusterID}) + _, err = utils_test.TestContext.UserBMClient.Installer.V2InstallCluster(ctx, &installer.V2InstallClusterParams{ClusterID: clusterID}) Expect(err).NotTo(HaveOccurred()) }) @@ -3753,7 +3754,7 @@ var _ = Describe("Preflight Cluster Requirements", func() { BeforeEach(func() { ctx = context.Background() - cID, err := registerCluster(ctx, userBMClient, "test-cluster", pullSecret) + cID, err := utils_test.TestContext.RegisterCluster(ctx, utils_test.TestContext.UserBMClient, "test-cluster", pullSecret) Expect(err).ToNot(HaveOccurred()) clusterID = cID @@ -3762,7 +3763,7 @@ var _ = Describe("Preflight Cluster Requirements", func() { It("should be reported for cluster", func() { params := installer.V2GetPreflightRequirementsParams{ClusterID: clusterID} - response, err := userBMClient.Installer.V2GetPreflightRequirements(ctx, ¶ms) + response, err := utils_test.TestContext.UserBMClient.Installer.V2GetPreflightRequirements(ctx, ¶ms) Expect(err).ToNot(HaveOccurred()) requirements := response.GetPayload() @@ -3835,7 +3836,7 @@ var _ = Describe("Preflight Cluster Requirements for lvms", func() { } ) It("should be reported for 4.12 cluster", func() { - var cluster, err = userBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ + var cluster, err = utils_test.TestContext.UserBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ NewClusterParams: &models.ClusterCreateParams{ Name: swag.String("test-cluster"), OpenshiftVersion: swag.String("4.12.0"), @@ -3848,7 +3849,7 @@ var _ = Describe("Preflight Cluster Requirements for lvms", func() { clusterID := *cluster.GetPayload().ID params := installer.V2GetPreflightRequirementsParams{ClusterID: clusterID} - response, err := userBMClient.Installer.V2GetPreflightRequirements(ctx, ¶ms) + response, err := utils_test.TestContext.UserBMClient.Installer.V2GetPreflightRequirements(ctx, ¶ms) Expect(err).ToNot(HaveOccurred()) requirements := response.GetPayload() for _, op := range requirements.Operators { @@ -3858,12 +3859,12 @@ var _ = Describe("Preflight Cluster Requirements for lvms", func() { Expect(*op.Requirements.Worker.Quantitative).To(BeEquivalentTo(masterLVMRequirementsBefore4_13)) } } - _, err = userBMClient.Installer.V2DeregisterCluster(ctx, &installer.V2DeregisterClusterParams{ClusterID: clusterID}) + _, err = utils_test.TestContext.UserBMClient.Installer.V2DeregisterCluster(ctx, &installer.V2DeregisterClusterParams{ClusterID: clusterID}) Expect(err).NotTo(HaveOccurred()) }) It("should be reported for 4.13 cluster", func() { - var cluster, err = userBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ + var cluster, err = utils_test.TestContext.UserBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ NewClusterParams: &models.ClusterCreateParams{ Name: swag.String("test-cluster"), OpenshiftVersion: swag.String("4.13.0"), @@ -3876,7 +3877,7 @@ var _ = Describe("Preflight Cluster Requirements for lvms", func() { clusterID := *cluster.GetPayload().ID params := installer.V2GetPreflightRequirementsParams{ClusterID: clusterID} - response, err := userBMClient.Installer.V2GetPreflightRequirements(ctx, ¶ms) + response, err := utils_test.TestContext.UserBMClient.Installer.V2GetPreflightRequirements(ctx, ¶ms) Expect(err).ToNot(HaveOccurred()) requirements := response.GetPayload() for _, op := range requirements.Operators { @@ -3886,7 +3887,7 @@ var _ = Describe("Preflight Cluster Requirements for lvms", func() { Expect(*op.Requirements.Worker.Quantitative).To(BeEquivalentTo(masterLVMRequirements)) } } - _, err = userBMClient.Installer.V2DeregisterCluster(ctx, &installer.V2DeregisterClusterParams{ClusterID: clusterID}) + _, err = utils_test.TestContext.UserBMClient.Installer.V2DeregisterCluster(ctx, &installer.V2DeregisterClusterParams{ClusterID: clusterID}) Expect(err).NotTo(HaveOccurred()) }) }) @@ -3905,8 +3906,8 @@ var _ = Describe("Multiple-VIPs Support", func() { ) AfterEach(func() { - deregisterResources() - clearDB() + utils_test.TestContext.DeregisterResources() + utils_test.TestContext.ClearDB() }) setClusterIdForApiVips := func(apiVips []*models.APIVip, clusterID *strfmt.UUID) { @@ -3926,7 +3927,7 @@ var _ = Describe("Multiple-VIPs Support", func() { apiVips := []*models.APIVip{{IP: models.IP(apiVip)}, {IP: models.IP("8.8.8.8")}} ingressVips := []*models.IngressVip{{IP: models.IP(ingressVip)}, {IP: models.IP("8.8.8.2")}} - _, err := userBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ + _, err := utils_test.TestContext.UserBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ NewClusterParams: &models.ClusterCreateParams{ BaseDNSDomain: "example.com", ClusterNetworks: []*models.ClusterNetwork{{Cidr: models.Subnet(clusterCIDR), HostPrefix: 23}}, @@ -3934,7 +3935,7 @@ var _ = Describe("Multiple-VIPs Support", func() { Name: swag.String("test-cluster"), OpenshiftVersion: swag.String(dualstackVipsOpenShiftVersion), PullSecret: swag.String(pullSecret), - SSHPublicKey: sshPublicKey, + SSHPublicKey: utils_test.SshPublicKey, APIVips: apiVips, IngressVips: ingressVips, }, @@ -3946,7 +3947,7 @@ var _ = Describe("Multiple-VIPs Support", func() { apiVips := []*models.APIVip{{IP: models.IP(apiVip)}, {IP: models.IP("2001:db8::1")}} ingressVips := []*models.IngressVip{{IP: models.IP(ingressVip)}, {IP: models.IP("2001:db8::2")}} - reply, err := userBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ + reply, err := utils_test.TestContext.UserBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ NewClusterParams: &models.ClusterCreateParams{ BaseDNSDomain: "example.com", ClusterNetworks: []*models.ClusterNetwork{{Cidr: models.Subnet(clusterCIDR), HostPrefix: 23}}, @@ -3954,7 +3955,7 @@ var _ = Describe("Multiple-VIPs Support", func() { Name: swag.String("test-cluster"), OpenshiftVersion: swag.String(dualstackVipsOpenShiftVersion), PullSecret: swag.String(pullSecret), - SSHPublicKey: sshPublicKey, + SSHPublicKey: utils_test.SshPublicKey, APIVips: apiVips, IngressVips: ingressVips, }, @@ -3974,7 +3975,7 @@ var _ = Describe("Multiple-VIPs Support", func() { apiVips := []*models.APIVip{{IP: models.IP(apiVipv6)}, {IP: models.IP("8.8.8.7")}} ingressVips := []*models.IngressVip{{IP: models.IP(ingressVipv6)}, {IP: models.IP("8.8.8.1")}} - _, err := userBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ + _, err := utils_test.TestContext.UserBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ NewClusterParams: &models.ClusterCreateParams{ BaseDNSDomain: "example.com", ClusterNetworks: []*models.ClusterNetwork{{Cidr: models.Subnet(clusterCIDR), HostPrefix: 23}}, @@ -3982,7 +3983,7 @@ var _ = Describe("Multiple-VIPs Support", func() { Name: swag.String("test-cluster"), OpenshiftVersion: swag.String(dualstackVipsOpenShiftVersion), PullSecret: swag.String(pullSecret), - SSHPublicKey: sshPublicKey, + SSHPublicKey: utils_test.SshPublicKey, APIVips: apiVips, IngressVips: ingressVips, }, @@ -3994,7 +3995,7 @@ var _ = Describe("Multiple-VIPs Support", func() { apiVips := []*models.APIVip{{IP: models.IP(apiVipv6)}, {IP: models.IP("2001:db8::3")}} ingressVips := []*models.IngressVip{{IP: models.IP(ingressVipv6)}, {IP: models.IP("2001:db8::4")}} - _, err := userBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ + _, err := utils_test.TestContext.UserBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ NewClusterParams: &models.ClusterCreateParams{ BaseDNSDomain: "example.com", ClusterNetworks: []*models.ClusterNetwork{{Cidr: models.Subnet(clusterCIDR), HostPrefix: 23}}, @@ -4002,7 +4003,7 @@ var _ = Describe("Multiple-VIPs Support", func() { Name: swag.String("test-cluster"), OpenshiftVersion: swag.String(dualstackVipsOpenShiftVersion), PullSecret: swag.String(pullSecret), - SSHPublicKey: sshPublicKey, + SSHPublicKey: utils_test.SshPublicKey, APIVips: apiVips, IngressVips: ingressVips, }, @@ -4014,7 +4015,7 @@ var _ = Describe("Multiple-VIPs Support", func() { apiVips := []*models.APIVip{{IP: models.IP(apiVip)}, {IP: models.IP("2001:db8::3")}, {IP: models.IP("8.8.8.3")}} ingressVips := []*models.IngressVip{{IP: models.IP(ingressVip)}, {IP: models.IP("2001:db8::4")}, {IP: models.IP("8.8.8.4")}} - _, err := userBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ + _, err := utils_test.TestContext.UserBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ NewClusterParams: &models.ClusterCreateParams{ BaseDNSDomain: "example.com", ClusterNetworks: []*models.ClusterNetwork{{Cidr: models.Subnet(clusterCIDR), HostPrefix: 23}}, @@ -4022,7 +4023,7 @@ var _ = Describe("Multiple-VIPs Support", func() { Name: swag.String("test-cluster"), OpenshiftVersion: swag.String(dualstackVipsOpenShiftVersion), PullSecret: swag.String(pullSecret), - SSHPublicKey: sshPublicKey, + SSHPublicKey: utils_test.SshPublicKey, APIVips: apiVips, IngressVips: ingressVips, }, @@ -4036,7 +4037,7 @@ var _ = Describe("Multiple-VIPs Support", func() { apiVips := []*models.APIVip{{IP: models.IP(apiVip)}, {IP: models.IP("1.1.1.333")}} ingressVips := []*models.IngressVip{{IP: models.IP(ingressVip)}, {IP: models.IP("1.1.1.311")}} - _, err := userBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ + _, err := utils_test.TestContext.UserBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ NewClusterParams: &models.ClusterCreateParams{ BaseDNSDomain: "example.com", ClusterNetworks: []*models.ClusterNetwork{{Cidr: models.Subnet(clusterCIDR), HostPrefix: 23}}, @@ -4044,7 +4045,7 @@ var _ = Describe("Multiple-VIPs Support", func() { Name: swag.String("test-cluster"), OpenshiftVersion: swag.String(dualstackVipsOpenShiftVersion), PullSecret: swag.String(pullSecret), - SSHPublicKey: sshPublicKey, + SSHPublicKey: utils_test.SshPublicKey, APIVips: apiVips, IngressVips: ingressVips, }, @@ -4057,7 +4058,7 @@ var _ = Describe("Multiple-VIPs Support", func() { apiVips := []*models.APIVip{{IP: models.IP(apiVip)}, {IP: models.IP("2001:db8::3")}} ingressVips := []*models.IngressVip{{IP: models.IP(ingressVip)}, {IP: models.IP("2001:db8::4")}} - _, err := userBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ + _, err := utils_test.TestContext.UserBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ NewClusterParams: &models.ClusterCreateParams{ BaseDNSDomain: "example.com", ClusterNetworks: []*models.ClusterNetwork{{Cidr: models.Subnet(clusterCIDR), HostPrefix: 23}}, @@ -4065,7 +4066,7 @@ var _ = Describe("Multiple-VIPs Support", func() { Name: swag.String("test-cluster"), OpenshiftVersion: swag.String(dualstackVipsOpenShiftVersion), PullSecret: swag.String(pullSecret), - SSHPublicKey: sshPublicKey, + SSHPublicKey: utils_test.SshPublicKey, APIVips: apiVips, IngressVips: ingressVips, }, @@ -4077,7 +4078,7 @@ var _ = Describe("Multiple-VIPs Support", func() { apiVips := []*models.APIVip{{IP: models.IP(apiVip)}, {IP: models.IP("2001:db8::3")}} ingressVips := []*models.IngressVip{{IP: models.IP(ingressVip)}} - _, err := userBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ + _, err := utils_test.TestContext.UserBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ NewClusterParams: &models.ClusterCreateParams{ BaseDNSDomain: "example.com", ClusterNetworks: []*models.ClusterNetwork{{Cidr: models.Subnet(clusterCIDR), HostPrefix: 23}}, @@ -4085,7 +4086,7 @@ var _ = Describe("Multiple-VIPs Support", func() { Name: swag.String("test-cluster"), OpenshiftVersion: swag.String(dualstackVipsOpenShiftVersion), PullSecret: swag.String(pullSecret), - SSHPublicKey: sshPublicKey, + SSHPublicKey: utils_test.SshPublicKey, APIVips: apiVips, IngressVips: ingressVips, }, @@ -4097,7 +4098,7 @@ var _ = Describe("Multiple-VIPs Support", func() { apiVips := []*models.APIVip{{IP: models.IP(apiVip)}, {IP: models.IP(apiVip)}} ingressVips := []*models.IngressVip{{IP: models.IP(ingressVip)}, {IP: models.IP("2001:db8::3")}} - _, err := userBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ + _, err := utils_test.TestContext.UserBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ NewClusterParams: &models.ClusterCreateParams{ BaseDNSDomain: "example.com", ClusterNetworks: []*models.ClusterNetwork{{Cidr: models.Subnet(clusterCIDR), HostPrefix: 23}}, @@ -4105,7 +4106,7 @@ var _ = Describe("Multiple-VIPs Support", func() { Name: swag.String("test-cluster"), OpenshiftVersion: swag.String(dualstackVipsOpenShiftVersion), PullSecret: swag.String(pullSecret), - SSHPublicKey: sshPublicKey, + SSHPublicKey: utils_test.SshPublicKey, APIVips: apiVips, IngressVips: ingressVips, }, @@ -4117,7 +4118,7 @@ var _ = Describe("Multiple-VIPs Support", func() { apiVips := []*models.APIVip{{IP: models.IP(apiVip)}, {IP: models.IP("2001:db8::3")}} ingressVips := []*models.IngressVip{{IP: models.IP(ingressVip)}, {IP: models.IP(ingressVip)}} - _, err := userBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ + _, err := utils_test.TestContext.UserBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ NewClusterParams: &models.ClusterCreateParams{ BaseDNSDomain: "example.com", ClusterNetworks: []*models.ClusterNetwork{{Cidr: models.Subnet(clusterCIDR), HostPrefix: 23}}, @@ -4125,7 +4126,7 @@ var _ = Describe("Multiple-VIPs Support", func() { Name: swag.String("test-cluster"), OpenshiftVersion: swag.String(dualstackVipsOpenShiftVersion), PullSecret: swag.String(pullSecret), - SSHPublicKey: sshPublicKey, + SSHPublicKey: utils_test.SshPublicKey, APIVips: apiVips, IngressVips: ingressVips, }, @@ -4137,7 +4138,7 @@ var _ = Describe("Multiple-VIPs Support", func() { apiVips := []*models.APIVip{{IP: models.IP(apiVip)}, {IP: models.IP("2001:db8::3")}} ingressVips := []*models.IngressVip{{IP: models.IP(apiVip)}, {IP: models.IP("2001:db8::4")}} - _, err := userBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ + _, err := utils_test.TestContext.UserBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ NewClusterParams: &models.ClusterCreateParams{ BaseDNSDomain: "example.com", ClusterNetworks: []*models.ClusterNetwork{{Cidr: models.Subnet(clusterCIDR), HostPrefix: 23}}, @@ -4145,7 +4146,7 @@ var _ = Describe("Multiple-VIPs Support", func() { Name: swag.String("test-cluster"), OpenshiftVersion: swag.String(dualstackVipsOpenShiftVersion), PullSecret: swag.String(pullSecret), - SSHPublicKey: sshPublicKey, + SSHPublicKey: utils_test.SshPublicKey, APIVips: apiVips, IngressVips: ingressVips, }, @@ -4157,7 +4158,7 @@ var _ = Describe("Multiple-VIPs Support", func() { Context("V2UpdateCluster", func() { BeforeEach(func() { - reply, err := userBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ + reply, err := utils_test.TestContext.UserBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ NewClusterParams: &models.ClusterCreateParams{ BaseDNSDomain: "example.com", ClusterNetworks: []*models.ClusterNetwork{{Cidr: models.Subnet(clusterCIDR), HostPrefix: 23}}, @@ -4165,7 +4166,7 @@ var _ = Describe("Multiple-VIPs Support", func() { Name: swag.String("test-cluster"), OpenshiftVersion: swag.String(dualstackVipsOpenShiftVersion), PullSecret: swag.String(pullSecret), - SSHPublicKey: sshPublicKey, + SSHPublicKey: utils_test.SshPublicKey, }, }) Expect(err).NotTo(HaveOccurred()) @@ -4174,12 +4175,12 @@ var _ = Describe("Multiple-VIPs Support", func() { cluster = &common.Cluster{Cluster: *reply.Payload} infraEnvID = registerInfraEnvSpecificVersion(cluster.ID, models.ImageTypeMinimalIso, cluster.OpenshiftVersion).ID - _, _ = register3nodes(ctx, *cluster.ID, *infraEnvID, defaultCIDRv4) + _, _ = utils_test.TestContext.Register3nodes(ctx, *cluster.ID, *infraEnvID, defaultCIDRv4) }) AfterEach(func() { - deregisterResources() - clearDB() + utils_test.TestContext.DeregisterResources() + utils_test.TestContext.ClearDB() }) It("Two APIVips and Two ingressVips - IPv6 first and IPv4 second - negative", func() { @@ -4188,7 +4189,7 @@ var _ = Describe("Multiple-VIPs Support", func() { apiVips := []*models.APIVip{{IP: models.IP(apiVip)}, {IP: models.IP("8.8.8.7")}} ingressVips := []*models.IngressVip{{IP: models.IP(ingressVip)}, {IP: models.IP("8.8.8.1")}} - _, err := userBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ + _, err := utils_test.TestContext.UserBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ ClusterUpdateParams: &models.V2ClusterUpdateParams{ VipDhcpAllocation: swag.Bool(false), APIVips: apiVips, @@ -4205,7 +4206,7 @@ var _ = Describe("Multiple-VIPs Support", func() { apiVips := []*models.APIVip{{IP: models.IP(apiVip)}, {IP: models.IP("2001:db8::3")}} ingressVips := []*models.IngressVip{{IP: models.IP(ingressVip)}, {IP: models.IP("2001:db8::4")}} - _, err := userBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ + _, err := utils_test.TestContext.UserBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ ClusterUpdateParams: &models.V2ClusterUpdateParams{ VipDhcpAllocation: swag.Bool(false), APIVips: apiVips, @@ -4222,7 +4223,7 @@ var _ = Describe("Multiple-VIPs Support", func() { apiVips := []*models.APIVip{{IP: models.IP(apiVip)}, {IP: models.IP("2001:db8::3")}, {IP: models.IP("8.8.8.3")}} ingressVips := []*models.IngressVip{{IP: models.IP(ingressVip)}, {IP: models.IP("2001:db8::4")}, {IP: models.IP("8.8.8.4")}} - _, err := userBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ + _, err := utils_test.TestContext.UserBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ ClusterUpdateParams: &models.V2ClusterUpdateParams{ VipDhcpAllocation: swag.Bool(false), APIVips: apiVips, @@ -4239,7 +4240,7 @@ var _ = Describe("Multiple-VIPs Support", func() { apiVips := []*models.APIVip{{IP: models.IP(apiVip)}, {IP: models.IP("1.1.1.333")}} ingressVips := []*models.IngressVip{{IP: models.IP(ingressVip)}, {IP: models.IP("1.1.1.311")}} - _, err := userBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ + _, err := utils_test.TestContext.UserBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ ClusterUpdateParams: &models.V2ClusterUpdateParams{ VipDhcpAllocation: swag.Bool(false), APIVips: apiVips, @@ -4256,7 +4257,7 @@ var _ = Describe("Multiple-VIPs Support", func() { apiVips := []*models.APIVip{{IP: models.IP(apiVip)}, {IP: models.IP("2001:db8::3")}} ingressVips := []*models.IngressVip{{IP: models.IP(ingressVip)}, {IP: models.IP("2001:db8::4")}} - _, err := userBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ + _, err := utils_test.TestContext.UserBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ ClusterUpdateParams: &models.V2ClusterUpdateParams{ VipDhcpAllocation: swag.Bool(false), APIVips: apiVips, @@ -4272,7 +4273,7 @@ var _ = Describe("Multiple-VIPs Support", func() { apiVips := []*models.APIVip{{IP: models.IP(apiVip)}, {IP: models.IP("2001:db8::3")}} ingressVips := []*models.IngressVip{{IP: models.IP(apiVip)}, {IP: models.IP("2001:db8::4")}} - _, err := userBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ + _, err := utils_test.TestContext.UserBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ ClusterUpdateParams: &models.V2ClusterUpdateParams{ VipDhcpAllocation: swag.Bool(false), APIVips: apiVips, @@ -4288,13 +4289,13 @@ var _ = Describe("Multiple-VIPs Support", func() { }) func checkUpdateAtWhileStatic(ctx context.Context, clusterID strfmt.UUID) { - clusterReply, getErr := userBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ + clusterReply, getErr := utils_test.TestContext.UserBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ ClusterID: clusterID, }) Expect(getErr).ToNot(HaveOccurred()) preSecondRefreshUpdatedTime := clusterReply.Payload.UpdatedAt time.Sleep(30 * time.Second) - clusterReply, getErr = userBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ + clusterReply, getErr = utils_test.TestContext.UserBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ ClusterID: clusterID, }) Expect(getErr).ToNot(HaveOccurred()) @@ -4322,8 +4323,8 @@ func FailCluster(ctx context.Context, clusterID, infraEnvID strfmt.UUID, reason installStep := models.HostStageFailed installInfo := "because some error" - updateHostProgressWithInfo(hostID, infraEnvID, installStep, installInfo) - host := getHostV2(infraEnvID, hostID) + utils_test.TestContext.UpdateHostProgressWithInfo(hostID, infraEnvID, installStep, installInfo) + host := utils_test.TestContext.GetHostV2(infraEnvID, hostID) Expect(*host.Status).Should(Equal("error")) Expect(*host.StatusInfo).Should(Equal(fmt.Sprintf("%s - %s", installStep, installInfo))) return hostID @@ -4338,13 +4339,13 @@ var _ = Describe("cluster install, with default network params", func() { BeforeEach(func() { By("Register cluster") - registerClusterReply, err := userBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ + registerClusterReply, err := utils_test.TestContext.UserBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ NewClusterParams: &models.ClusterCreateParams{ BaseDNSDomain: "example.com", Name: swag.String("test-cluster"), OpenshiftVersion: swag.String(openshiftVersion), PullSecret: swag.String(pullSecret), - SSHPublicKey: sshPublicKey, + SSHPublicKey: utils_test.SshPublicKey, }, }) Expect(err).NotTo(HaveOccurred()) @@ -4355,7 +4356,7 @@ var _ = Describe("cluster install, with default network params", func() { It("install cluster", func() { clusterID := *cluster.ID registerHostsAndSetRoles(clusterID, *infraEnvID, 5, cluster.Name, cluster.BaseDNSDomain) - rep, err := userBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) + rep, err := utils_test.TestContext.UserBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) Expect(err).NotTo(HaveOccurred()) c := rep.GetPayload() startTimeInstalling := c.InstallStartedAt @@ -4368,13 +4369,13 @@ var _ = Describe("cluster install, with default network params", func() { // fake installation completed for _, host := range c.Hosts { - updateProgress(*host.ID, host.InfraEnvID, models.HostStageDone) + utils_test.TestContext.UpdateProgress(*host.ID, host.InfraEnvID, models.HostStageDone) } - waitForClusterState(ctx, clusterID, "finalizing", defaultWaitForClusterStateTimeout, "Finalizing cluster installation") - completeInstallationAndVerify(ctx, agentBMClient, clusterID, true) + waitForClusterState(ctx, clusterID, "finalizing", utils_test.DefaultWaitForClusterStateTimeout, "Finalizing cluster installation") + completeInstallationAndVerify(ctx, utils_test.TestContext.AgentBMClient, clusterID, true) - rep, err = userBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) + rep, err = utils_test.TestContext.UserBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) Expect(err).NotTo(HaveOccurred()) c = rep.GetPayload() @@ -4387,7 +4388,7 @@ var _ = Describe("cluster install, with default network params", func() { It("first host", func() { clusterID := *cluster.ID registerHostsAndSetRoles(clusterID, *infraEnvID, 5, cluster.Name, cluster.BaseDNSDomain) - rep, err := userBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) + rep, err := utils_test.TestContext.UserBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) Expect(err).NotTo(HaveOccurred()) c := rep.GetPayload() startTimeInstalling := c.InstallStartedAt @@ -4399,7 +4400,7 @@ var _ = Describe("cluster install, with default network params", func() { It("all hosts", func() { clusterID := *cluster.ID registerHostsAndSetRoles(clusterID, *infraEnvID, 5, cluster.Name, cluster.BaseDNSDomain) - rep, err := userBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) + rep, err := utils_test.TestContext.UserBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) Expect(err).NotTo(HaveOccurred()) c := rep.GetPayload() startTimeInstalling := c.InstallStartedAt @@ -4411,7 +4412,7 @@ var _ = Describe("cluster install, with default network params", func() { It("last host", func() { clusterID := *cluster.ID registerHostsAndSetRoles(clusterID, *infraEnvID, 5, cluster.Name, cluster.BaseDNSDomain) - rep, err := userBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) + rep, err := utils_test.TestContext.UserBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) Expect(err).NotTo(HaveOccurred()) c := rep.GetPayload() startTimeInstalling := c.InstallStartedAt @@ -4430,14 +4431,14 @@ func registerHostsAndSetRoles(clusterID, infraenvID strfmt.UUID, numHosts int, c ips := hostutil.GenerateIPv4Addresses(numHosts, defaultCIDRv4) for i := 0; i < numHosts; i++ { hostname := fmt.Sprintf("h%d", i) - host := registerNode(ctx, infraenvID, hostname, ips[i]) + host := utils_test.TestContext.RegisterNode(ctx, infraenvID, hostname, ips[i]) var role models.HostRole if i < 3 { role = models.HostRoleMaster } else { role = models.HostRoleWorker } - _, err := userBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ + _, err := utils_test.TestContext.UserBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ HostUpdateParams: &models.HostUpdateParams{ HostRole: swag.String(string(role)), }, @@ -4448,17 +4449,17 @@ func registerHostsAndSetRoles(clusterID, infraenvID strfmt.UUID, numHosts int, c hosts = append(hosts, host) } for _, host := range hosts { - generateDomainResolution(ctx, host, clusterName, baseDNSDomain) - generateCommonDomainReply(ctx, host, clusterName, baseDNSDomain) + utils_test.TestContext.GenerateDomainResolution(ctx, host, clusterName, baseDNSDomain) + utils_test.TestContext.GenerateCommonDomainReply(ctx, host, clusterName, baseDNSDomain) } generateFullMeshConnectivity(ctx, ips[0], hosts...) - cluster := getCluster(clusterID) + cluster := utils_test.TestContext.GetCluster(clusterID) if cluster.DiskEncryption != nil && swag.StringValue(cluster.DiskEncryption.Mode) == models.DiskEncryptionModeTang { - generateTangPostStepReply(ctx, true, hosts...) + utils_test.TestContext.GenerateTangPostStepReply(ctx, true, hosts...) } if !swag.BoolValue(cluster.UserManagedNetworking) { - _, err := userBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ + _, err := utils_test.TestContext.UserBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ ClusterUpdateParams: &models.V2ClusterUpdateParams{ VipDhcpAllocation: swag.Bool(false), APIVips: []*models.APIVip{}, @@ -4469,7 +4470,7 @@ func registerHostsAndSetRoles(clusterID, infraenvID strfmt.UUID, numHosts int, c Expect(err).NotTo(HaveOccurred()) apiVip := "1.2.3.8" ingressVip := "1.2.3.9" - _, err = userBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ ClusterUpdateParams: &models.V2ClusterUpdateParams{ APIVips: []*models.APIVip{{IP: models.IP(apiVip), ClusterID: clusterID}}, IngressVips: []*models.IngressVip{{IP: models.IP(ingressVip), ClusterID: clusterID}}, @@ -4480,7 +4481,7 @@ func registerHostsAndSetRoles(clusterID, infraenvID strfmt.UUID, numHosts int, c Expect(err).NotTo(HaveOccurred()) } - waitForHostState(ctx, models.HostStatusKnown, defaultWaitForHostStateTimeout, hosts...) + waitForHostState(ctx, models.HostStatusKnown, utils_test.DefaultWaitForHostStateTimeout, hosts...) waitForClusterState(ctx, clusterID, models.ClusterStatusReady, 60*time.Second, clusterReadyStateInfo) return hosts @@ -4493,14 +4494,14 @@ func registerHostsAndSetRolesTang(clusterID, infraenvID strfmt.UUID, numHosts in ips := hostutil.GenerateIPv4Addresses(numHosts, defaultCIDRv4) for i := 0; i < numHosts; i++ { hostname := fmt.Sprintf("h%d", i) - host := registerNode(ctx, infraenvID, hostname, ips[i]) + host := utils_test.TestContext.RegisterNode(ctx, infraenvID, hostname, ips[i]) var role models.HostRole if i < 3 { role = models.HostRoleMaster } else { role = models.HostRoleWorker } - _, err := userBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ + _, err := utils_test.TestContext.UserBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ HostUpdateParams: &models.HostUpdateParams{ HostRole: swag.String(string(role)), }, @@ -4511,15 +4512,15 @@ func registerHostsAndSetRolesTang(clusterID, infraenvID strfmt.UUID, numHosts in hosts = append(hosts, host) } for _, host := range hosts { - generateDomainResolution(ctx, host, clusterName, baseDNSDomain) + utils_test.TestContext.GenerateDomainResolution(ctx, host, clusterName, baseDNSDomain) } generateFullMeshConnectivity(ctx, ips[0], hosts...) - cluster := getCluster(clusterID) + cluster := utils_test.TestContext.GetCluster(clusterID) if cluster.DiskEncryption != nil && swag.StringValue(cluster.DiskEncryption.Mode) == models.DiskEncryptionModeTang { - generateTangPostStepReply(ctx, tangValidated, hosts...) + utils_test.TestContext.GenerateTangPostStepReply(ctx, tangValidated, hosts...) } - _, err := userBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ + _, err := utils_test.TestContext.UserBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ ClusterUpdateParams: &models.V2ClusterUpdateParams{ VipDhcpAllocation: swag.Bool(false), APIVips: []*models.APIVip{}, @@ -4530,7 +4531,7 @@ func registerHostsAndSetRolesTang(clusterID, infraenvID strfmt.UUID, numHosts in Expect(err).NotTo(HaveOccurred()) apiVip := "1.2.3.8" ingressVip := "1.2.3.9" - _, err = userBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ ClusterUpdateParams: &models.V2ClusterUpdateParams{ APIVips: []*models.APIVip{{IP: models.IP(apiVip), ClusterID: clusterID}}, IngressVips: []*models.IngressVip{{IP: models.IP(ingressVip), ClusterID: clusterID}}, @@ -4540,7 +4541,7 @@ func registerHostsAndSetRolesTang(clusterID, infraenvID strfmt.UUID, numHosts in Expect(err).NotTo(HaveOccurred()) - waitForHostState(ctx, models.HostStatusKnown, defaultWaitForHostStateTimeout, hosts...) + waitForHostState(ctx, models.HostStatusKnown, utils_test.DefaultWaitForHostStateTimeout, hosts...) waitForClusterState(ctx, clusterID, models.ClusterStatusReady, 60*time.Second, clusterReadyStateInfo) return hosts @@ -4561,7 +4562,7 @@ func registerHostsAndSetRolesDHCP(clusterID, infraEnvID strfmt.UUID, numHosts in } b, err := json.Marshal(&r) Expect(err).ToNot(HaveOccurred()) - _, err = agentBMClient.Installer.V2PostStepReply(ctx, &installer.V2PostStepReplyParams{ + _, err = utils_test.TestContext.AgentBMClient.Installer.V2PostStepReply(ctx, &installer.V2PostStepReplyParams{ InfraEnvID: h.InfraEnvID, HostID: *h.ID, Reply: &models.StepReply{ @@ -4576,14 +4577,14 @@ func registerHostsAndSetRolesDHCP(clusterID, infraEnvID strfmt.UUID, numHosts in ips := hostutil.GenerateIPv4Addresses(numHosts, defaultCIDRv4) for i := 0; i < numHosts; i++ { hostname := fmt.Sprintf("h%d", i) - host := registerNode(ctx, infraEnvID, hostname, ips[i]) + host := utils_test.TestContext.RegisterNode(ctx, infraEnvID, hostname, ips[i]) var role models.HostRole if i < 3 { role = models.HostRoleMaster } else { role = models.HostRoleWorker } - _, err := userBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ + _, err := utils_test.TestContext.UserBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ HostUpdateParams: &models.HostUpdateParams{ HostRole: swag.String(string(role)), }, @@ -4594,7 +4595,7 @@ func registerHostsAndSetRolesDHCP(clusterID, infraEnvID strfmt.UUID, numHosts in hosts = append(hosts, host) } generateFullMeshConnectivity(ctx, ips[0], hosts...) - _, err := userBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ + _, err := utils_test.TestContext.UserBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ ClusterUpdateParams: &models.V2ClusterUpdateParams{ MachineNetworks: common.TestIPv4Networking.MachineNetworks, }, @@ -4603,7 +4604,7 @@ func registerHostsAndSetRolesDHCP(clusterID, infraEnvID strfmt.UUID, numHosts in Expect(err).ToNot(HaveOccurred()) for _, h := range hosts { generateDhcpStepReply(h, apiVip, ingressVip) - generateDomainResolution(ctx, h, clusterName, baseDNSDomain) + utils_test.TestContext.GenerateDomainResolution(ctx, h, clusterName, baseDNSDomain) } waitForClusterState(ctx, clusterID, models.ClusterStatusReady, 60*time.Second, clusterReadyStateInfo) @@ -4633,7 +4634,7 @@ func getClusterWorkers(c *models.Cluster) (workers []*models.Host) { func generateConnectivityPostStepReply(ctx context.Context, h *models.Host, connectivityReport *models.ConnectivityReport) { fa, err := json.Marshal(connectivityReport) Expect(err).NotTo(HaveOccurred()) - _, err = agentBMClient.Installer.V2PostStepReply(ctx, &installer.V2PostStepReplyParams{ + _, err = utils_test.TestContext.AgentBMClient.Installer.V2PostStepReply(ctx, &installer.V2PostStepReplyParams{ InfraEnvID: h.InfraEnvID, HostID: *h.ID, Reply: &models.StepReply{ @@ -4722,7 +4723,7 @@ var _ = Describe("Cluster registration default", func() { serviceCIDR = "172.30.0.0/16" ) It("RegisterCluster", func() { - registerClusterReply, err := userBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ + registerClusterReply, err := utils_test.TestContext.UserBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ NewClusterParams: &models.ClusterCreateParams{ BaseDNSDomain: "example.com", ClusterNetworks: []*models.ClusterNetwork{{Cidr: models.Subnet(clusterCIDR), HostPrefix: 23}}, @@ -4730,7 +4731,7 @@ var _ = Describe("Cluster registration default", func() { Name: swag.String("test-cluster"), OpenshiftVersion: swag.String(openshiftVersion), PullSecret: swag.String(pullSecret), - SSHPublicKey: sshPublicKey, + SSHPublicKey: utils_test.SshPublicKey, }, }) Expect(err).NotTo(HaveOccurred()) @@ -4753,7 +4754,7 @@ var _ = Describe("Installation progress", func() { By("register cluster", func() { // register cluster - registerClusterReply, err := userBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ + registerClusterReply, err := utils_test.TestContext.UserBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ NewClusterParams: &models.ClusterCreateParams{ BaseDNSDomain: "example.com", ClusterNetworks: []*models.ClusterNetwork{{Cidr: models.Subnet(clusterCIDR), HostPrefix: 23}}, @@ -4761,7 +4762,7 @@ var _ = Describe("Installation progress", func() { Name: swag.String("test-cluster"), OpenshiftVersion: swag.String(VipAutoAllocOpenshiftVersion), PullSecret: swag.String(pullSecret), - SSHPublicKey: sshPublicKey, + SSHPublicKey: utils_test.SshPublicKey, NetworkType: swag.String(models.ClusterCreateParamsNetworkTypeOpenShiftSDN), VipDhcpAllocation: swag.Bool(true), }, @@ -4776,7 +4777,7 @@ var _ = Describe("Installation progress", func() { // add OLM operators - updateClusterReply, err := userBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ + updateClusterReply, err := utils_test.TestContext.UserBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ ClusterID: *c.ID, ClusterUpdateParams: &models.V2ClusterUpdateParams{ OlmOperators: []*models.OperatorCreateParams{ @@ -4802,36 +4803,36 @@ var _ = Describe("Installation progress", func() { // intermediate report for _, h := range c.Hosts { - updateProgress(*h.ID, h.InfraEnvID, models.HostStageWritingImageToDisk) + utils_test.TestContext.UpdateProgress(*h.ID, h.InfraEnvID, models.HostStageWritingImageToDisk) } - c = getCluster(*c.ID) + c = utils_test.TestContext.GetCluster(*c.ID) expectProgressToBeInRange(c, []int{100, 100}, []int{1, 50}, []int{0, 0}) // last report for _, h := range c.Hosts { - updateProgress(*h.ID, h.InfraEnvID, models.HostStageDone) + utils_test.TestContext.UpdateProgress(*h.ID, h.InfraEnvID, models.HostStageDone) } - c = getCluster(*c.ID) + c = utils_test.TestContext.GetCluster(*c.ID) expectProgressToBe(c, 100, 100, 0) }) By("finalizing stage - report operators' progress", func() { - waitForClusterState(ctx, *c.ID, models.ClusterStatusFinalizing, defaultWaitForClusterStateTimeout, clusterFinalizingStateInfo) + waitForClusterState(ctx, *c.ID, models.ClusterStatusFinalizing, utils_test.DefaultWaitForClusterStateTimeout, clusterFinalizingStateInfo) - v2ReportMonitoredOperatorStatus(ctx, agentBMClient, *c.ID, operators.OperatorConsole.Name, models.OperatorStatusAvailable, "") - c = getCluster(*c.ID) + utils_test.TestContext.V2ReportMonitoredOperatorStatus(ctx, *c.ID, operators.OperatorConsole.Name, models.OperatorStatusAvailable, "") + c = utils_test.TestContext.GetCluster(*c.ID) expectProgressToBe(c, 100, 100, 33) - v2ReportMonitoredOperatorStatus(ctx, agentBMClient, *c.ID, lso.Operator.Name, models.OperatorStatusAvailable, "") - c = getCluster(*c.ID) + utils_test.TestContext.V2ReportMonitoredOperatorStatus(ctx, *c.ID, lso.Operator.Name, models.OperatorStatusAvailable, "") + c = utils_test.TestContext.GetCluster(*c.ID) expectProgressToBe(c, 100, 100, 66) - v2ReportMonitoredOperatorStatus(ctx, agentBMClient, *c.ID, odf.Operator.Name, models.OperatorStatusFailed, "") - c = getCluster(*c.ID) + utils_test.TestContext.V2ReportMonitoredOperatorStatus(ctx, *c.ID, odf.Operator.Name, models.OperatorStatusFailed, "") + c = utils_test.TestContext.GetCluster(*c.ID) expectProgressToBe(c, 100, 100, 100) }) }) @@ -4847,12 +4848,12 @@ var _ = Describe("disk encryption", func() { Context("DiskEncryption mode: "+models.DiskEncryptionModeTpmv2, func() { BeforeEach(func() { - registerClusterReply, err := userBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ + registerClusterReply, err := utils_test.TestContext.UserBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ NewClusterParams: &models.ClusterCreateParams{ Name: swag.String("test-cluster"), OpenshiftVersion: swag.String(VipAutoAllocOpenshiftVersion), PullSecret: swag.String(pullSecret), - SSHPublicKey: sshPublicKey, + SSHPublicKey: utils_test.SshPublicKey, BaseDNSDomain: "example.com", DiskEncryption: &models.DiskEncryption{ EnableOn: swag.String(models.DiskEncryptionEnableOnAll), @@ -4878,16 +4879,16 @@ var _ = Describe("disk encryption", func() { It("happy flow", func() { registerHostsAndSetRolesDHCP(*c.ID, *infraEnvID, 3, "test-cluster", "example.com") - reply, err := userBMClient.Installer.V2InstallCluster(ctx, &installer.V2InstallClusterParams{ClusterID: *c.ID}) + reply, err := utils_test.TestContext.UserBMClient.Installer.V2InstallCluster(ctx, &installer.V2InstallClusterParams{ClusterID: *c.ID}) Expect(err).NotTo(HaveOccurred()) c = reply.GetPayload() - generateEssentialPrepareForInstallationSteps(ctx, c.Hosts...) - waitForLastInstallationCompletionStatus(*c.ID, models.LastInstallationPreparationStatusSuccess) + utils_test.TestContext.GenerateEssentialPrepareForInstallationSteps(ctx, c.Hosts...) + utils_test.TestContext.WaitForLastInstallationCompletionStatus(*c.ID, models.LastInstallationPreparationStatusSuccess) }) It("host doesn't have minimal requirements for disk-encryption, TPM mode", func() { - h := ®isterHost(*infraEnvID).Host + h := &utils_test.TestContext.RegisterHost(*infraEnvID).Host nonValidTPMHwInfo := &models.Inventory{ CPU: &models.CPU{Count: 16}, Memory: &models.Memory{PhysicalBytes: int64(32 * units.GiB), UsableBytes: int64(32 * units.GiB)}, @@ -4905,11 +4906,11 @@ var _ = Describe("disk encryption", func() { Routes: common.TestDefaultRouteConfiguration, TpmVersion: models.InventoryTpmVersionNr12, } - generateEssentialHostStepsWithInventory(ctx, h, "test-host", nonValidTPMHwInfo) + utils_test.TestContext.GenerateEssentialHostStepsWithInventory(ctx, h, "test-host", nonValidTPMHwInfo) time.Sleep(60 * time.Second) waitForHostState(ctx, models.HostStatusInsufficient, 60*time.Second, h) - h = getHostV2(*infraEnvID, *h.ID) + h = utils_test.TestContext.GetHostV2(*infraEnvID, *h.ID) Expect(*h.StatusInfo).Should(ContainSubstring("The host's TPM version is not supported")) }) }) @@ -4918,13 +4919,13 @@ var _ = Describe("disk encryption", func() { BeforeEach(func() { tangServers := `[{"URL":"http://tang.example.com:7500","Thumbprint":"PLjNyRdGw03zlRoGjQYMahSZGu9"}]` - registerClusterReply, err := userBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ + registerClusterReply, err := utils_test.TestContext.UserBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ NewClusterParams: &models.ClusterCreateParams{ BaseDNSDomain: "example.com", Name: swag.String("test-cluster"), OpenshiftVersion: swag.String(VipAutoAllocOpenshiftVersion), PullSecret: swag.String(pullSecret), - SSHPublicKey: sshPublicKey, + SSHPublicKey: utils_test.SshPublicKey, VipDhcpAllocation: swag.Bool(true), DiskEncryption: &models.DiskEncryption{ EnableOn: swag.String(models.DiskEncryptionEnableOnAll), @@ -4949,7 +4950,7 @@ var _ = Describe("disk encryption", func() { It("install cluster - happy flow", func() { clusterID := *c.ID registerHostsAndSetRolesTang(clusterID, *infraEnvID, 5, c.Name, c.BaseDNSDomain, true) - rep, err := userBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) + rep, err := utils_test.TestContext.UserBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) Expect(err).NotTo(HaveOccurred()) c = rep.GetPayload() startTimeInstalling := c.InstallStartedAt @@ -4962,13 +4963,13 @@ var _ = Describe("disk encryption", func() { // fake installation completed for _, host := range c.Hosts { - updateProgress(*host.ID, host.InfraEnvID, models.HostStageDone) + utils_test.TestContext.UpdateProgress(*host.ID, host.InfraEnvID, models.HostStageDone) } - waitForClusterState(ctx, clusterID, "finalizing", defaultWaitForClusterStateTimeout, "Finalizing cluster installation") - completeInstallationAndVerify(ctx, agentBMClient, clusterID, true) + waitForClusterState(ctx, clusterID, "finalizing", utils_test.DefaultWaitForClusterStateTimeout, "Finalizing cluster installation") + completeInstallationAndVerify(ctx, utils_test.TestContext.AgentBMClient, clusterID, true) - rep, err = userBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) + rep, err = utils_test.TestContext.UserBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) Expect(err).NotTo(HaveOccurred()) c = rep.GetPayload() @@ -4996,14 +4997,14 @@ var _ = Describe("disk encryption", func() { TpmVersion: models.InventoryTpmVersionNr20, } - h := ®isterHost(*infraEnvID).Host + h := &utils_test.TestContext.RegisterHost(*infraEnvID).Host - generateEssentialHostStepsWithInventory(ctx, h, "test-host", inventoryBMInfo) - generateTangPostStepReply(ctx, false, h) + utils_test.TestContext.GenerateEssentialHostStepsWithInventory(ctx, h, "test-host", inventoryBMInfo) + utils_test.TestContext.GenerateTangPostStepReply(ctx, false, h) time.Sleep(60 * time.Second) waitForHostState(ctx, models.HostStatusInsufficient, 60*time.Second, h) - h = getHostV2(*infraEnvID, *h.ID) + h = utils_test.TestContext.GetHostV2(*infraEnvID, *h.ID) Expect(*h.StatusInfo).Should(ContainSubstring("Could not validate that all Tang servers are reachable and working")) }) }) @@ -5026,7 +5027,7 @@ var _ = Describe("Verify install-config manifest", func() { defer os.Remove(file.Name()) By("Download install-config.yaml") - _, err = userBMClient.Installer.V2DownloadClusterFiles(ctx, + _, err = utils_test.TestContext.UserBMClient.Installer.V2DownloadClusterFiles(ctx, &installer.V2DownloadClusterFilesParams{ ClusterID: clusterID, FileName: "install-config.yaml", @@ -5045,7 +5046,7 @@ var _ = Describe("Verify install-config manifest", func() { } getInstallConfigFromDB := func() map[string]interface{} { - response, err := userBMClient.Installer.V2GetClusterInstallConfig(ctx, + response, err := utils_test.TestContext.UserBMClient.Installer.V2GetClusterInstallConfig(ctx, &installer.V2GetClusterInstallConfigParams{ClusterID: clusterID}) Expect(err).NotTo(HaveOccurred()) @@ -5083,7 +5084,7 @@ var _ = Describe("Verify install-config manifest", func() { By("Validate 'sshKey'") sshKey, ok := installConfig["sshKey"].(string) Expect(ok).To(Equal(true)) - Expect(sshKey).To(Equal(sshPublicKey)) + Expect(sshKey).To(Equal(utils_test.SshPublicKey)) By("Validate 'networking'") networking, ok := installConfig["networking"].(map[interface{}]interface{}) @@ -5121,7 +5122,7 @@ var _ = Describe("Verify install-config manifest", func() { } installCluster := func(platformType models.PlatformType, overrideInstallConfig bool) { - registerClusterReply, err := userBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ + registerClusterReply, err := utils_test.TestContext.UserBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ NewClusterParams: &models.ClusterCreateParams{ BaseDNSDomain: "example.com", ClusterNetworks: []*models.ClusterNetwork{{Cidr: models.Subnet(clusterCIDR), HostPrefix: 23}}, @@ -5129,7 +5130,7 @@ var _ = Describe("Verify install-config manifest", func() { Name: swag.String("test-cluster"), OpenshiftVersion: swag.String(SDNNetworkTypeOpenshiftVersion), PullSecret: swag.String(pullSecret), - SSHPublicKey: sshPublicKey, + SSHPublicKey: utils_test.SshPublicKey, NetworkType: swag.String(models.ClusterCreateParamsNetworkTypeOpenShiftSDN), Platform: &models.Platform{Type: common.PlatformTypePtr(platformType)}, }, @@ -5144,7 +5145,7 @@ var _ = Describe("Verify install-config manifest", func() { ClusterID: clusterID, InstallConfigParams: `{"fips": true}`, } - _, err = userBMClient.Installer.V2UpdateClusterInstallConfig(ctx, ¶ms) + _, err = utils_test.TestContext.UserBMClient.Installer.V2UpdateClusterInstallConfig(ctx, ¶ms) Expect(err).To(BeNil()) } @@ -5156,12 +5157,12 @@ var _ = Describe("Verify install-config manifest", func() { setClusterAsFinalizing(ctx, clusterID) // Completing cluster installation - completeInstallationAndVerify(ctx, agentBMClient, clusterID, true) + completeInstallationAndVerify(ctx, utils_test.TestContext.AgentBMClient, clusterID, true) } AfterEach(func() { - deregisterResources() - clearDB() + utils_test.TestContext.DeregisterResources() + utils_test.TestContext.ClearDB() }) DescribeTable("Validate install-config content", func( @@ -5186,7 +5187,7 @@ var _ = Describe("Verify role assignment for non-standard HA OCP Control Plane c var ctx = context.TODO() It("with 4 masters, 1 worker", func() { - reply, err := userBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ + reply, err := utils_test.TestContext.UserBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ Context: ctx, NewClusterParams: &models.ClusterCreateParams{ Name: swag.String("test-cluster"), @@ -5207,11 +5208,11 @@ var _ = Describe("Verify role assignment for non-standard HA OCP Control Plane c ips := hostutil.GenerateIPv4Addresses(5, defaultCIDRv4) for k := 0; k < 5; k++ { - registerNodeWithInventory(ctx, *infraEnv.ID, fmt.Sprintf("host-%d", k), ips[0], getDefaultInventory(defaultCIDRv4)) + utils_test.TestContext.RegisterNodeWithInventory(ctx, *infraEnv.ID, fmt.Sprintf("host-%d", k), ips[0], utils_test.GetDefaultInventory(defaultCIDRv4)) } Eventually(func() bool { - reply, err := userBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: *cluster.ID}) + reply, err := utils_test.TestContext.UserBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: *cluster.ID}) if err != nil { return false } diff --git a/subsystem/cluster_v2_test.go b/subsystem/cluster_v2_test.go index ea7e77118d7..87831428f39 100644 --- a/subsystem/cluster_v2_test.go +++ b/subsystem/cluster_v2_test.go @@ -16,6 +16,7 @@ import ( "github.com/openshift/assisted-service/internal/host/hostutil" "github.com/openshift/assisted-service/internal/usage" "github.com/openshift/assisted-service/models" + "github.com/openshift/assisted-service/subsystem/utils_test" ) var _ = Describe("Cluster UI Settings", func() { @@ -24,7 +25,7 @@ var _ = Describe("Cluster UI Settings", func() { ) ctx := context.Background() BeforeEach(func() { - response, err := userBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ + response, err := utils_test.TestContext.UserBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ NewClusterParams: &models.ClusterCreateParams{ Name: swag.String("test-cluster"), OpenshiftVersion: swag.String(openshiftVersion), @@ -37,13 +38,13 @@ var _ = Describe("Cluster UI Settings", func() { }) It("Should be able to store and retrieve cluster UI settings", func() { - _, err := userBMClient.Installer.V2UpdateClusterUISettings(ctx, &installer.V2UpdateClusterUISettingsParams{ + _, err := utils_test.TestContext.UserBMClient.Installer.V2UpdateClusterUISettings(ctx, &installer.V2UpdateClusterUISettingsParams{ ClusterID: clusterId, UISettings: "{\"foo\":\"bar\"}", }) Expect(err).ToNot(HaveOccurred()) By("Should be able to retrieve cluster UI settings", func() { - response, err := userBMClient.Installer.V2GetClusterUISettings(ctx, &installer.V2GetClusterUISettingsParams{ + response, err := utils_test.TestContext.UserBMClient.Installer.V2GetClusterUISettings(ctx, &installer.V2GetClusterUISettingsParams{ ClusterID: clusterId, }) Expect(err).ToNot(HaveOccurred()) @@ -61,7 +62,7 @@ var _ = Describe("[V2ClusterTests]", func() { var h1, h2, h3 *models.Host BeforeEach(func() { - clusterReq, err := userBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ + clusterReq, err := utils_test.TestContext.UserBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ NewClusterParams: &models.ClusterCreateParams{ Name: swag.String("test-cluster"), OpenshiftVersion: swag.String(openshiftVersion), @@ -83,53 +84,53 @@ var _ = Describe("[V2ClusterTests]", func() { By("register h2 h3 to cluster via the bound infraEnv") ips = hostutil.GenerateIPv4Addresses(3, defaultCIDRv4) - h2 = registerNode(ctx, boundInfraEnv, "h2", ips[1]) - h3 = registerNode(ctx, boundInfraEnv, "h3", ips[2]) - v2UpdateVipParams(ctx, clusterID) - waitForClusterState(ctx, clusterID, models.ClusterStatusInsufficient, defaultWaitForClusterStateTimeout, + h2 = utils_test.TestContext.RegisterNode(ctx, boundInfraEnv, "h2", ips[1]) + h3 = utils_test.TestContext.RegisterNode(ctx, boundInfraEnv, "h3", ips[2]) + utils_test.TestContext.V2UpdateVipParams(ctx, clusterID) + waitForClusterState(ctx, clusterID, models.ClusterStatusInsufficient, utils_test.DefaultWaitForClusterStateTimeout, IgnoreStateInfo) }) It("Bind/Unbind single host in 3 nodes cluster (standalone InfraEnv)", func() { //register node to the InfraEnv and get its inventory By("register h1 with the stand-alone InfraEnv") - h1 = ®isterHost(infraEnvID).Host - generateHWPostStepReply(ctx, h1, getDefaultInventory(ips[0]), "h1") - waitForHostStateV2(ctx, models.HostStatusKnownUnbound, defaultWaitForHostStateTimeout, h1) + h1 = &utils_test.TestContext.RegisterHost(infraEnvID).Host + utils_test.TestContext.GenerateHWPostStepReply(ctx, h1, utils_test.GetDefaultInventory(ips[0]), "h1") + waitForHostStateV2(ctx, models.HostStatusKnownUnbound, utils_test.DefaultWaitForHostStateTimeout, h1) //bind the 3rd node and re-register it By("bind h1 to cluster") - bindHost(infraEnvID, *h1.ID, clusterID) - waitForHostStateV2(ctx, models.HostStatusBinding, defaultWaitForHostStateTimeout, h1) + utils_test.TestContext.BindHost(infraEnvID, *h1.ID, clusterID) + waitForHostStateV2(ctx, models.HostStatusBinding, utils_test.DefaultWaitForHostStateTimeout, h1) By("register h1 again and define the connectivity to the other hosts") - h1 = ®isterHostByUUID(h1.InfraEnvID, *h1.ID).Host + h1 = &utils_test.TestContext.RegisterHostByUUID(h1.InfraEnvID, *h1.ID).Host - generateEssentialHostSteps(ctx, h1, "h1", ips[0]) + utils_test.TestContext.GenerateEssentialHostSteps(ctx, h1, "h1", ips[0]) generateFullMeshConnectivity(ctx, ips[0], h1, h2, h3) - waitForHostStateV2(ctx, models.HostStatusKnown, defaultWaitForHostStateTimeout, h1) + waitForHostStateV2(ctx, models.HostStatusKnown, utils_test.DefaultWaitForHostStateTimeout, h1) By("cluster is ready") - generateEssentialPrepareForInstallationSteps(ctx, h1) - waitForClusterState(ctx, clusterID, models.ClusterStatusReady, defaultWaitForClusterStateTimeout, + utils_test.TestContext.GenerateEssentialPrepareForInstallationSteps(ctx, h1) + waitForClusterState(ctx, clusterID, models.ClusterStatusReady, utils_test.DefaultWaitForClusterStateTimeout, IgnoreStateInfo) By("verify host name is set") - h1 = getHostV2(infraEnvID, *h1.ID) + h1 = utils_test.TestContext.GetHostV2(infraEnvID, *h1.ID) Expect(h1.RequestedHostname).To(Equal("h1")) By("unbind host and re-register h1 --> cluster return to insufficient") - unbindHost(infraEnvID, *h1.ID) - h1 = ®isterHost(infraEnvID).Host - generateHWPostStepReply(ctx, h1, getDefaultInventory(ips[0]), "h1") - waitForHostStateV2(ctx, models.HostStatusKnownUnbound, defaultWaitForHostStateTimeout, h1) + utils_test.TestContext.UnbindHost(infraEnvID, *h1.ID) + h1 = &utils_test.TestContext.RegisterHost(infraEnvID).Host + utils_test.TestContext.GenerateHWPostStepReply(ctx, h1, utils_test.GetDefaultInventory(ips[0]), "h1") + waitForHostStateV2(ctx, models.HostStatusKnownUnbound, utils_test.DefaultWaitForHostStateTimeout, h1) By("verify that the cluster status is updated immediately") - c := getCluster(clusterID) + c := utils_test.TestContext.GetCluster(clusterID) Expect(*c.Status).To(Equal(models.ClusterStatusInsufficient)) By("verify that the unbound host still retains its name and disks count") - h1 = getHostV2(infraEnvID, *h1.ID) + h1 = utils_test.TestContext.GetHostV2(infraEnvID, *h1.ID) Expect(h1.RequestedHostname).To(Equal(("h1"))) var inventory models.Inventory _ = json.Unmarshal([]byte(h1.Inventory), &inventory) @@ -139,22 +140,22 @@ var _ = Describe("[V2ClusterTests]", func() { It("register single host in 3 nodes cluster (bound InfraEnv)", func() { //register node to the InfraEnv and get its inventory By("register h1 with the bound InfraEnv (implicit binding)") - h1 = ®isterHost(boundInfraEnv).Host - host := getHostV2(boundInfraEnv, *h1.ID) + h1 = &utils_test.TestContext.RegisterHost(boundInfraEnv).Host + host := utils_test.TestContext.GetHostV2(boundInfraEnv, *h1.ID) Expect(host.ClusterID).NotTo(BeNil()) - generateHWPostStepReply(ctx, h1, getDefaultInventory(ips[0]), "h1") - generateEssentialHostSteps(ctx, h1, "h1", ips[0]) + utils_test.TestContext.GenerateHWPostStepReply(ctx, h1, utils_test.GetDefaultInventory(ips[0]), "h1") + utils_test.TestContext.GenerateEssentialHostSteps(ctx, h1, "h1", ips[0]) generateFullMeshConnectivity(ctx, ips[0], h1, h2, h3) - waitForHostStateV2(ctx, models.HostStatusKnown, defaultWaitForHostStateTimeout, h1) + waitForHostStateV2(ctx, models.HostStatusKnown, utils_test.DefaultWaitForHostStateTimeout, h1) By("cluster is ready") - generateEssentialPrepareForInstallationSteps(ctx, h1) - waitForClusterState(ctx, clusterID, models.ClusterStatusReady, defaultWaitForClusterStateTimeout, + utils_test.TestContext.GenerateEssentialPrepareForInstallationSteps(ctx, h1) + waitForClusterState(ctx, clusterID, models.ClusterStatusReady, utils_test.DefaultWaitForClusterStateTimeout, IgnoreStateInfo) By("unbind host should fail since infraEnv is bound to cluster") - _, err := userBMClient.Installer.UnbindHost(ctx, &installer.UnbindHostParams{ + _, err := utils_test.TestContext.UserBMClient.Installer.UnbindHost(ctx, &installer.UnbindHostParams{ HostID: *h1.ID, InfraEnvID: boundInfraEnv, }) @@ -164,46 +165,46 @@ var _ = Describe("[V2ClusterTests]", func() { It("Hosts unbinding on cluster delete", func() { //register node to the InfraEnv and get its inventory By("register h1 with InfraEnv") - h1 = ®isterHost(infraEnvID).Host - generateHWPostStepReply(ctx, h1, getDefaultInventory(ips[0]), "h1") - waitForHostStateV2(ctx, models.HostStatusKnownUnbound, defaultWaitForHostStateTimeout, h1) + h1 = &utils_test.TestContext.RegisterHost(infraEnvID).Host + utils_test.TestContext.GenerateHWPostStepReply(ctx, h1, utils_test.GetDefaultInventory(ips[0]), "h1") + waitForHostStateV2(ctx, models.HostStatusKnownUnbound, utils_test.DefaultWaitForHostStateTimeout, h1) //bind the 3rd node and re-register it By("bind h1 to cluster") - bindHost(infraEnvID, *h1.ID, clusterID) - waitForHostStateV2(ctx, models.HostStatusBinding, defaultWaitForHostStateTimeout, h1) + utils_test.TestContext.BindHost(infraEnvID, *h1.ID, clusterID) + waitForHostStateV2(ctx, models.HostStatusBinding, utils_test.DefaultWaitForHostStateTimeout, h1) By("register h1 again and define the connectivity to the other hosts") - h1 = ®isterHostByUUID(h1.InfraEnvID, *h1.ID).Host + h1 = &utils_test.TestContext.RegisterHostByUUID(h1.InfraEnvID, *h1.ID).Host - generateEssentialHostSteps(ctx, h1, "h1", ips[0]) + utils_test.TestContext.GenerateEssentialHostSteps(ctx, h1, "h1", ips[0]) generateFullMeshConnectivity(ctx, ips[0], h1, h2, h3) - waitForHostStateV2(ctx, models.HostStatusKnown, defaultWaitForHostStateTimeout, h1) + waitForHostStateV2(ctx, models.HostStatusKnown, utils_test.DefaultWaitForHostStateTimeout, h1) By("cluster is ready") - generateEssentialPrepareForInstallationSteps(ctx, h1) - waitForClusterState(ctx, clusterID, models.ClusterStatusReady, defaultWaitForClusterStateTimeout, + utils_test.TestContext.GenerateEssentialPrepareForInstallationSteps(ctx, h1) + waitForClusterState(ctx, clusterID, models.ClusterStatusReady, utils_test.DefaultWaitForClusterStateTimeout, IgnoreStateInfo) By("Delete Cluster") - _, err := userBMClient.Installer.V2DeregisterCluster(ctx, &installer.V2DeregisterClusterParams{ClusterID: clusterID}) + _, err := utils_test.TestContext.UserBMClient.Installer.V2DeregisterCluster(ctx, &installer.V2DeregisterClusterParams{ClusterID: clusterID}) Expect(err).NotTo(HaveOccurred()) By("Wait for unbinding") - waitForHostStateV2(ctx, models.HostStatusUnbinding, defaultWaitForHostStateTimeout, h1) + waitForHostStateV2(ctx, models.HostStatusUnbinding, utils_test.DefaultWaitForHostStateTimeout, h1) By("Host is unbound") - h1 = getHostV2(infraEnvID, *h1.ID) + h1 = utils_test.TestContext.GetHostV2(infraEnvID, *h1.ID) Expect(h1.ClusterID).To(BeNil()) By("Other hosts are deleted") - _, err = userBMClient.Installer.V2GetHost(context.Background(), &installer.V2GetHostParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2GetHost(context.Background(), &installer.V2GetHostParams{ InfraEnvID: boundInfraEnv, HostID: *h2.ID, }) Expect(err).To(HaveOccurred()) - _, err = userBMClient.Installer.V2GetHost(context.Background(), &installer.V2GetHostParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2GetHost(context.Background(), &installer.V2GetHostParams{ InfraEnvID: boundInfraEnv, HostID: *h3.ID, }) @@ -213,75 +214,75 @@ var _ = Describe("[V2ClusterTests]", func() { It("Host unbinding pending user action on cluster delete", func() { //register node to the InfraEnv and get its inventory By("register h1 with InfraEnv") - h1 = ®isterHost(infraEnvID).Host - generateHWPostStepReply(ctx, h1, getDefaultInventory(ips[0]), "h1") - waitForHostStateV2(ctx, models.HostStatusKnownUnbound, defaultWaitForHostStateTimeout, h1) + h1 = &utils_test.TestContext.RegisterHost(infraEnvID).Host + utils_test.TestContext.GenerateHWPostStepReply(ctx, h1, utils_test.GetDefaultInventory(ips[0]), "h1") + waitForHostStateV2(ctx, models.HostStatusKnownUnbound, utils_test.DefaultWaitForHostStateTimeout, h1) //bind the 3rd node and re-register it By("bind h1 to cluster") - bindHost(infraEnvID, *h1.ID, clusterID) - waitForHostStateV2(ctx, models.HostStatusBinding, defaultWaitForHostStateTimeout, h1) + utils_test.TestContext.BindHost(infraEnvID, *h1.ID, clusterID) + waitForHostStateV2(ctx, models.HostStatusBinding, utils_test.DefaultWaitForHostStateTimeout, h1) By("register h1 again and define the connectivity to the other hosts") - h1 = ®isterHostByUUID(h1.InfraEnvID, *h1.ID).Host + h1 = &utils_test.TestContext.RegisterHostByUUID(h1.InfraEnvID, *h1.ID).Host - generateEssentialHostSteps(ctx, h1, "h1", ips[0]) + utils_test.TestContext.GenerateEssentialHostSteps(ctx, h1, "h1", ips[0]) generateFullMeshConnectivity(ctx, ips[0], h1, h2, h3) - waitForHostStateV2(ctx, models.HostStatusKnown, defaultWaitForHostStateTimeout, h1) + waitForHostStateV2(ctx, models.HostStatusKnown, utils_test.DefaultWaitForHostStateTimeout, h1) By("cluster is ready") - generateEssentialPrepareForInstallationSteps(ctx, h1) - waitForClusterState(ctx, clusterID, models.ClusterStatusReady, defaultWaitForClusterStateTimeout, + utils_test.TestContext.GenerateEssentialPrepareForInstallationSteps(ctx, h1) + waitForClusterState(ctx, clusterID, models.ClusterStatusReady, utils_test.DefaultWaitForClusterStateTimeout, IgnoreStateInfo) By("Start installation") - _, err := userBMClient.Installer.V2InstallCluster(ctx, &installer.V2InstallClusterParams{ClusterID: clusterID}) + _, err := utils_test.TestContext.UserBMClient.Installer.V2InstallCluster(ctx, &installer.V2InstallClusterParams{ClusterID: clusterID}) Expect(err).NotTo(HaveOccurred()) - generateEssentialPrepareForInstallationSteps(ctx, h1, h2, h3) + utils_test.TestContext.GenerateEssentialPrepareForInstallationSteps(ctx, h1, h2, h3) waitForClusterState(context.Background(), clusterID, models.ClusterStatusInstalling, 3*time.Minute, IgnoreStateInfo) By("Cancel installation") - _, err = userBMClient.Installer.V2CancelInstallation(ctx, &installer.V2CancelInstallationParams{ClusterID: clusterID}) + _, err = utils_test.TestContext.UserBMClient.Installer.V2CancelInstallation(ctx, &installer.V2CancelInstallationParams{ClusterID: clusterID}) Expect(err).NotTo(HaveOccurred()) waitForClusterState(context.Background(), clusterID, models.ClusterStatusCancelled, 3*time.Minute, IgnoreStateInfo) By("Delete Cluster") - _, err = userBMClient.Installer.V2DeregisterCluster(ctx, &installer.V2DeregisterClusterParams{ClusterID: clusterID}) + _, err = utils_test.TestContext.UserBMClient.Installer.V2DeregisterCluster(ctx, &installer.V2DeregisterClusterParams{ClusterID: clusterID}) Expect(err).NotTo(HaveOccurred()) By("Wait for unbinding Pending User Action") - waitForHostStateV2(ctx, models.HostStatusUnbindingPendingUserAction, defaultWaitForHostStateTimeout, h1) + waitForHostStateV2(ctx, models.HostStatusUnbindingPendingUserAction, utils_test.DefaultWaitForHostStateTimeout, h1) By("Host is unbound") - h1 = getHostV2(infraEnvID, *h1.ID) + h1 = utils_test.TestContext.GetHostV2(infraEnvID, *h1.ID) Expect(h1.ClusterID).To(BeNil()) By("Other hosts are deleted") - _, err = userBMClient.Installer.V2GetHost(context.Background(), &installer.V2GetHostParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2GetHost(context.Background(), &installer.V2GetHostParams{ InfraEnvID: boundInfraEnv, HostID: *h2.ID, }) Expect(err).To(HaveOccurred()) - _, err = userBMClient.Installer.V2GetHost(context.Background(), &installer.V2GetHostParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2GetHost(context.Background(), &installer.V2GetHostParams{ InfraEnvID: boundInfraEnv, HostID: *h3.ID, }) Expect(err).To(HaveOccurred()) By("register h1 again") - h1 = ®isterHostByUUID(h1.InfraEnvID, *h1.ID).Host - generateHWPostStepReply(ctx, h1, getDefaultInventory(ips[0]), "h1") - waitForHostStateV2(ctx, models.HostStatusKnownUnbound, defaultWaitForHostStateTimeout, h1) + h1 = &utils_test.TestContext.RegisterHostByUUID(h1.InfraEnvID, *h1.ID).Host + utils_test.TestContext.GenerateHWPostStepReply(ctx, h1, utils_test.GetDefaultInventory(ips[0]), "h1") + waitForHostStateV2(ctx, models.HostStatusKnownUnbound, utils_test.DefaultWaitForHostStateTimeout, h1) }) It("Cluster validations are run after host update", func() { By("register 3 nodes and check that the cluster is ready") - h1 = registerNode(ctx, boundInfraEnv, "h1", ips[0]) + h1 = utils_test.TestContext.RegisterNode(ctx, boundInfraEnv, "h1", ips[0]) generateFullMeshConnectivity(ctx, ips[0], h1, h2, h3) - waitForClusterState(ctx, clusterID, models.ClusterStatusReady, defaultWaitForClusterStateTimeout, + waitForClusterState(ctx, clusterID, models.ClusterStatusReady, utils_test.DefaultWaitForClusterStateTimeout, IgnoreStateInfo) By("update the host's role to worker and check validation") @@ -293,13 +294,13 @@ var _ = Describe("[V2ClusterTests]", func() { }, } h1 = updateHostV2(ctx, hostReq) - c := getCluster(clusterID) + c := utils_test.TestContext.GetCluster(clusterID) Expect(*c.Status).To(Equal(models.ClusterStatusInsufficient)) }) It("Verify garbage collector inactive cluster and infraenv deregistration", func() { By("Update cluster's updated_at attribute to become eligible for deregistration due to inactivity") - cluster := getCluster(clusterID) + cluster := utils_test.TestContext.GetCluster(clusterID) db.Model(&cluster).UpdateColumn("updated_at", time.Now().AddDate(-1, 0, 0)) By("Fetch cluster to make sure it was permanently removed by the garbage collector") @@ -320,12 +321,12 @@ var _ = Describe("[V2ClusterTests]", func() { }, "1m", "10s").Should(HaveOccurred()) By("Verify that hosts are deleted") - _, err = userBMClient.Installer.V2GetHost(context.Background(), &installer.V2GetHostParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2GetHost(context.Background(), &installer.V2GetHostParams{ InfraEnvID: boundInfraEnv, HostID: *h2.ID, }) Expect(err).To(HaveOccurred()) - _, err = userBMClient.Installer.V2GetHost(context.Background(), &installer.V2GetHostParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2GetHost(context.Background(), &installer.V2GetHostParams{ InfraEnvID: boundInfraEnv, HostID: *h3.ID, }) @@ -355,24 +356,24 @@ var _ = Describe("[V2ClusterTests]", func() { InstallConfigParams: override, } - _, err := userBMClient.Installer.V2UpdateClusterInstallConfig(ctx, ¶ms) + _, err := utils_test.TestContext.UserBMClient.Installer.V2UpdateClusterInstallConfig(ctx, ¶ms) Expect(err).NotTo(BeNil()) By("verifying that the cluster install config override was not set") - c := getCluster(clusterID) + c := utils_test.TestContext.GetCluster(clusterID) Expect(c.InstallConfigOverrides).To(BeEmpty()) By("verifying the feature usage for install config overrides was not set") - verifyUsageNotSet(c.FeatureUsage, "Install Config Overrides") + utils_test.VerifyUsageNotSet(c.FeatureUsage, "Install Config Overrides") By("succeeding when provided a valid override") override = `{"controlPlane": {"hyperthreading": "Disabled"}}` params.InstallConfigParams = override - _, err = userBMClient.Installer.V2UpdateClusterInstallConfig(ctx, ¶ms) + _, err = utils_test.TestContext.UserBMClient.Installer.V2UpdateClusterInstallConfig(ctx, ¶ms) Expect(err).To(BeNil()) By("verify that the cluster install config override is correctly updated") - c = getCluster(clusterID) + c = utils_test.TestContext.GetCluster(clusterID) Expect(c.InstallConfigOverrides).To(Equal(params.InstallConfigParams)) By("verifying the feature usage for install config overrides was set") @@ -382,21 +383,21 @@ var _ = Describe("[V2ClusterTests]", func() { Name: usage.InstallConfigOverrides, Data: overrideUsageProps, } - verifyUsageSet(c.FeatureUsage, overrideUsage) + utils_test.VerifyUsageSet(c.FeatureUsage, overrideUsage) By("failing when provided an invalid override") originalOverride := override override = `{"foo": "bar", "controlPlane": {"hyperthreading": "Enabled"}}` params.InstallConfigParams = override - _, err = userBMClient.Installer.V2UpdateClusterInstallConfig(ctx, ¶ms) + _, err = utils_test.TestContext.UserBMClient.Installer.V2UpdateClusterInstallConfig(ctx, ¶ms) Expect(err).ToNot(BeNil()) By("verify that the cluster install config override did not get updated") - c = getCluster(clusterID) + c = utils_test.TestContext.GetCluster(clusterID) Expect(c.InstallConfigOverrides).To(Equal(originalOverride)) By("verifying the feature usage for install config overrides was not changed") - verifyUsageSet(c.FeatureUsage, overrideUsage) + utils_test.VerifyUsageSet(c.FeatureUsage, overrideUsage) }) }) @@ -415,14 +416,14 @@ var _ = Describe("[V2ClusterTests] multiarch", func() { BeforeEach(func() { // (MGMT-11859) "user2" has permissions to use multiarch, "user" does not - tmpBMClient = userBMClient - userBMClient = user2BMClient - tmpAgentBMClient = agentBMClient - agentBMClient = agent2BMClient + tmpBMClient = utils_test.TestContext.UserBMClient + utils_test.TestContext.UserBMClient = utils_test.TestContext.User2BMClient + tmpAgentBMClient = utils_test.TestContext.AgentBMClient + utils_test.TestContext.AgentBMClient = utils_test.TestContext.Agent2BMClient tmpPullSecret = pullSecret pullSecret = fmt.Sprintf(psTemplate, FakePS2) - clusterReq, err := userBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ + clusterReq, err := utils_test.TestContext.UserBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ NewClusterParams: &models.ClusterCreateParams{ Name: swag.String("test-cluster"), OpenshiftVersion: swag.String(multiarchOpenshiftVersion), @@ -452,71 +453,71 @@ var _ = Describe("[V2ClusterTests] multiarch", func() { By("register h2 h3 to cluster via the bound arm64 infraenv") ips = hostutil.GenerateIPv4Addresses(3, defaultCIDRv4) - h2 = registerNode(ctx, ARMinfraEnvID, "h2", ips[1]) - h3 = registerNode(ctx, ARMinfraEnvID, "h3", ips[2]) - v2UpdateVipParams(ctx, clusterID) - waitForClusterState(ctx, clusterID, models.ClusterStatusInsufficient, defaultWaitForClusterStateTimeout, + h2 = utils_test.TestContext.RegisterNode(ctx, ARMinfraEnvID, "h2", ips[1]) + h3 = utils_test.TestContext.RegisterNode(ctx, ARMinfraEnvID, "h3", ips[2]) + utils_test.TestContext.V2UpdateVipParams(ctx, clusterID) + waitForClusterState(ctx, clusterID, models.ClusterStatusInsufficient, utils_test.DefaultWaitForClusterStateTimeout, IgnoreStateInfo) }) AfterEach(func() { // (MGMT-11859) Reverting the switch from "user" to "user2" that is needed only to test // access to multiarch release images in environments with org-based control - userBMClient = tmpBMClient + utils_test.TestContext.UserBMClient = tmpBMClient pullSecret = tmpPullSecret - agentBMClient = tmpAgentBMClient + utils_test.TestContext.AgentBMClient = tmpAgentBMClient }) It("Bind single host to x86 unbound infraenv", func() { By("register h1 with the unbound infraenv") - h1 = ®isterHost(X86infraEnvID).Host - host := getHostV2(X86infraEnvID, *h1.ID) + h1 = &utils_test.TestContext.RegisterHost(X86infraEnvID).Host + host := utils_test.TestContext.GetHostV2(X86infraEnvID, *h1.ID) Expect(host.ClusterID).To(BeNil()) - generateHWPostStepReply(ctx, h1, getDefaultInventory(ips[0]), "h1") - waitForHostStateV2(ctx, models.HostStatusKnownUnbound, defaultWaitForHostStateTimeout, h1) + utils_test.TestContext.GenerateHWPostStepReply(ctx, h1, utils_test.GetDefaultInventory(ips[0]), "h1") + waitForHostStateV2(ctx, models.HostStatusKnownUnbound, utils_test.DefaultWaitForHostStateTimeout, h1) By("bind h1 to cluster") - bindHost(X86infraEnvID, *h1.ID, clusterID) - waitForHostStateV2(ctx, models.HostStatusBinding, defaultWaitForHostStateTimeout, h1) + utils_test.TestContext.BindHost(X86infraEnvID, *h1.ID, clusterID) + waitForHostStateV2(ctx, models.HostStatusBinding, utils_test.DefaultWaitForHostStateTimeout, h1) By("register h1 again and define the connectivity to the other hosts") - h1 = ®isterHostByUUID(h1.InfraEnvID, *h1.ID).Host + h1 = &utils_test.TestContext.RegisterHostByUUID(h1.InfraEnvID, *h1.ID).Host - generateEssentialHostSteps(ctx, h1, "h1", ips[0]) + utils_test.TestContext.GenerateEssentialHostSteps(ctx, h1, "h1", ips[0]) generateFullMeshConnectivity(ctx, ips[0], h1, h2, h3) - waitForHostStateV2(ctx, models.HostStatusKnown, defaultWaitForHostStateTimeout, h1) + waitForHostStateV2(ctx, models.HostStatusKnown, utils_test.DefaultWaitForHostStateTimeout, h1) By("cluster is ready") - generateEssentialPrepareForInstallationSteps(ctx, h1) - waitForClusterState(ctx, clusterID, models.ClusterStatusReady, defaultWaitForClusterStateTimeout, + utils_test.TestContext.GenerateEssentialPrepareForInstallationSteps(ctx, h1) + waitForClusterState(ctx, clusterID, models.ClusterStatusReady, utils_test.DefaultWaitForClusterStateTimeout, IgnoreStateInfo) }) It("Bind single host to arm64 bound infraenv", func() { By("register h1 with the bound infraenv") - h1 = ®isterHost(ARMinfraEnvID).Host - host := getHostV2(ARMinfraEnvID, *h1.ID) + h1 = &utils_test.TestContext.RegisterHost(ARMinfraEnvID).Host + host := utils_test.TestContext.GetHostV2(ARMinfraEnvID, *h1.ID) Expect(host.ClusterID).NotTo(BeNil()) - generateHWPostStepReply(ctx, h1, getDefaultInventory(ips[0]), "h1") - generateEssentialHostSteps(ctx, h1, "h1", ips[0]) + utils_test.TestContext.GenerateHWPostStepReply(ctx, h1, utils_test.GetDefaultInventory(ips[0]), "h1") + utils_test.TestContext.GenerateEssentialHostSteps(ctx, h1, "h1", ips[0]) generateFullMeshConnectivity(ctx, ips[0], h1, h2, h3) - waitForHostStateV2(ctx, models.HostStatusKnown, defaultWaitForHostStateTimeout, h1) + waitForHostStateV2(ctx, models.HostStatusKnown, utils_test.DefaultWaitForHostStateTimeout, h1) By("cluster is ready") - generateEssentialPrepareForInstallationSteps(ctx, h1) - waitForClusterState(ctx, clusterID, models.ClusterStatusReady, defaultWaitForClusterStateTimeout, + utils_test.TestContext.GenerateEssentialPrepareForInstallationSteps(ctx, h1) + waitForClusterState(ctx, clusterID, models.ClusterStatusReady, utils_test.DefaultWaitForClusterStateTimeout, IgnoreStateInfo) }) It("Fail to register an infraenv with a non-supported CPUArchitecture ", func() { - _, err := userBMClient.Installer.RegisterInfraEnv(context.Background(), &installer.RegisterInfraEnvParams{ + _, err := utils_test.TestContext.UserBMClient.Installer.RegisterInfraEnv(context.Background(), &installer.RegisterInfraEnvParams{ InfraenvCreateParams: &models.InfraEnvCreateParams{ Name: swag.String("test-infra-env"), OpenshiftVersion: multiarchOpenshiftVersion, PullSecret: swag.String(pullSecret), - SSHAuthorizedKey: swag.String(sshPublicKey), + SSHAuthorizedKey: swag.String(utils_test.SshPublicKey), ImageType: models.ImageTypeFullIso, ClusterID: &clusterID, CPUArchitecture: "fake-chocobomb-architecture", @@ -537,7 +538,7 @@ var _ = Describe("[V2ClusterTests] multiarch", func() { var clusterID strfmt.UUID registerClusterForMultiArch := func() *models.Cluster { - clusterReq, err := user2BMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ + clusterReq, err := utils_test.TestContext.User2BMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ NewClusterParams: &models.ClusterCreateParams{ Name: swag.String("test-cluster"), OpenshiftVersion: swag.String("4.12-multi"), @@ -554,10 +555,10 @@ var _ = Describe("[V2ClusterTests] multiarch", func() { } BeforeEach(func() { // (MGMT-11859) "user2" has permissions to use multiarch, "user" does not - tmpBMClient = userBMClient - userBMClient = user2BMClient - tmpAgentBMClient = agentBMClient - agentBMClient = agent2BMClient + tmpBMClient = utils_test.TestContext.UserBMClient + utils_test.TestContext.UserBMClient = utils_test.TestContext.User2BMClient + tmpAgentBMClient = utils_test.TestContext.AgentBMClient + utils_test.TestContext.AgentBMClient = utils_test.TestContext.Agent2BMClient tmpPullSecret = pullSecret pullSecret = fmt.Sprintf(psTemplate, FakePS2) @@ -566,9 +567,9 @@ var _ = Describe("[V2ClusterTests] multiarch", func() { AfterEach(func() { // (MGMT-11859) Reverting the switch from "user" to "user2" that is needed only to test // access to multiarch release images in environments with org-based control - userBMClient = tmpBMClient + utils_test.TestContext.UserBMClient = tmpBMClient pullSecret = tmpPullSecret - agentBMClient = tmpAgentBMClient + utils_test.TestContext.AgentBMClient = tmpAgentBMClient }) It("Default image type on s390x", func() { @@ -576,12 +577,12 @@ var _ = Describe("[V2ClusterTests] multiarch", func() { infraEnv := registerInfraEnvSpecificVersionAndArch(cluster.ID, "", common.S390xCPUArchitecture, "") Expect(*infraEnv.Type).To(Equal(models.ImageTypeFullIso)) - _, err := userBMClient.Installer.RegisterInfraEnv(context.Background(), &installer.RegisterInfraEnvParams{ + _, err := utils_test.TestContext.UserBMClient.Installer.RegisterInfraEnv(context.Background(), &installer.RegisterInfraEnvParams{ InfraenvCreateParams: &models.InfraEnvCreateParams{ Name: swag.String("test-infra-env"), OpenshiftVersion: "4.12", PullSecret: swag.String(pullSecret), - SSHAuthorizedKey: swag.String(sshPublicKey), + SSHAuthorizedKey: swag.String(utils_test.SshPublicKey), ImageType: models.ImageTypeMinimalIso, ClusterID: &clusterID, CPUArchitecture: common.S390xCPUArchitecture, diff --git a/subsystem/day2_cluster_test.go b/subsystem/day2_cluster_test.go index e9c3cedce38..6251493777a 100644 --- a/subsystem/day2_cluster_test.go +++ b/subsystem/day2_cluster_test.go @@ -16,6 +16,7 @@ import ( hostpkg "github.com/openshift/assisted-service/internal/host" "github.com/openshift/assisted-service/internal/host/hostutil" "github.com/openshift/assisted-service/models" + "github.com/openshift/assisted-service/subsystem/utils_test" ) const ( @@ -32,7 +33,7 @@ var _ = Describe("Day2 v2 cluster tests", func() { BeforeEach(func() { openshiftClusterID := strfmt.UUID(uuid.New().String()) - cluster, err = userBMClient.Installer.V2ImportCluster(ctx, &installer.V2ImportClusterParams{ + cluster, err = utils_test.TestContext.UserBMClient.Installer.V2ImportCluster(ctx, &installer.V2ImportClusterParams{ NewImportClusterParams: &models.ImportClusterParams{ Name: swag.String("test-cluster"), OpenshiftVersion: openshiftVersion, @@ -49,7 +50,7 @@ var _ = Describe("Day2 v2 cluster tests", func() { Expect(swag.StringValue(&cluster.GetPayload().OcpReleaseImage)).Should(BeEmpty()) Expect(cluster.GetPayload().StatusUpdatedAt).ShouldNot(Equal(strfmt.DateTime(time.Time{}))) - _, err = userBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ ClusterUpdateParams: &models.V2ClusterUpdateParams{ PullSecret: swag.String(pullSecret), }, @@ -64,28 +65,28 @@ var _ = Describe("Day2 v2 cluster tests", func() { }) It("cluster CRUD", func() { - _ = ®isterHost(*infraEnvID).Host + _ = &utils_test.TestContext.RegisterHost(*infraEnvID).Host Expect(err).NotTo(HaveOccurred()) - getReply, err1 := userBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) + getReply, err1 := utils_test.TestContext.UserBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) Expect(err1).NotTo(HaveOccurred()) Expect(getReply.GetPayload().Hosts[0].ClusterID.String()).Should(Equal(clusterID.String())) - getReply, err = agentBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) + getReply, err = utils_test.TestContext.AgentBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) Expect(err).NotTo(HaveOccurred()) Expect(getReply.GetPayload().Hosts[0].ClusterID.String()).Should(Equal(clusterID.String())) - list, err2 := userBMClient.Installer.V2ListClusters(ctx, &installer.V2ListClustersParams{}) + list, err2 := utils_test.TestContext.UserBMClient.Installer.V2ListClusters(ctx, &installer.V2ListClustersParams{}) Expect(err2).NotTo(HaveOccurred()) Expect(len(list.GetPayload())).Should(Equal(1)) - _, err = userBMClient.Installer.V2DeregisterCluster(ctx, &installer.V2DeregisterClusterParams{ClusterID: clusterID}) + _, err = utils_test.TestContext.UserBMClient.Installer.V2DeregisterCluster(ctx, &installer.V2DeregisterClusterParams{ClusterID: clusterID}) Expect(err).NotTo(HaveOccurred()) - list, err = userBMClient.Installer.V2ListClusters(ctx, &installer.V2ListClustersParams{}) + list, err = utils_test.TestContext.UserBMClient.Installer.V2ListClusters(ctx, &installer.V2ListClustersParams{}) Expect(err).NotTo(HaveOccurred()) Expect(len(list.GetPayload())).Should(Equal(0)) - _, err = userBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) + _, err = utils_test.TestContext.UserBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) Expect(err).Should(HaveOccurred()) }) }) @@ -99,7 +100,7 @@ var _ = Describe("Day2 cluster tests", func() { BeforeEach(func() { openshiftClusterID := strfmt.UUID(uuid.New().String()) - cluster, err = userBMClient.Installer.V2ImportCluster(ctx, &installer.V2ImportClusterParams{ + cluster, err = utils_test.TestContext.UserBMClient.Installer.V2ImportCluster(ctx, &installer.V2ImportClusterParams{ NewImportClusterParams: &models.ImportClusterParams{ Name: swag.String("test-cluster"), APIVipDnsname: swag.String("api.test-cluster.example.com"), @@ -116,7 +117,7 @@ var _ = Describe("Day2 cluster tests", func() { Expect(swag.StringValue(cluster.GetPayload().StatusInfo)).Should(Equal(statusInfoAddingHosts)) Expect(cluster.GetPayload().StatusUpdatedAt).ShouldNot(Equal(strfmt.DateTime(time.Time{}))) - _, err = userBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ ClusterUpdateParams: &models.V2ClusterUpdateParams{ PullSecret: swag.String(pullSecret), }, @@ -124,12 +125,12 @@ var _ = Describe("Day2 cluster tests", func() { }) Expect(err).NotTo(HaveOccurred()) - res, err1 := userBMClient.Installer.RegisterInfraEnv(ctx, &installer.RegisterInfraEnvParams{ + res, err1 := utils_test.TestContext.UserBMClient.Installer.RegisterInfraEnv(ctx, &installer.RegisterInfraEnvParams{ InfraenvCreateParams: &models.InfraEnvCreateParams{ Name: swag.String("test-infra-env"), OpenshiftVersion: openshiftVersion, PullSecret: swag.String(pullSecret), - SSHAuthorizedKey: swag.String(sshPublicKey), + SSHAuthorizedKey: swag.String(utils_test.SshPublicKey), ImageType: models.ImageTypeFullIso, ClusterID: cluster.GetPayload().ID, }, @@ -144,10 +145,10 @@ var _ = Describe("Day2 cluster tests", func() { }) It("cluster update hostname", func() { - host1 := ®isterHost(infraEnvID).Host - host2 := ®isterHost(infraEnvID).Host + host1 := &utils_test.TestContext.RegisterHost(infraEnvID).Host + host2 := &utils_test.TestContext.RegisterHost(infraEnvID).Host - _, err = userBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ HostID: *host1.ID, InfraEnvID: infraEnvID, HostUpdateParams: &models.HostUpdateParams{ @@ -155,7 +156,7 @@ var _ = Describe("Day2 cluster tests", func() { }, }) Expect(err).NotTo(HaveOccurred()) - _, err = userBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ HostID: *host2.ID, InfraEnvID: infraEnvID, HostUpdateParams: &models.HostUpdateParams{ @@ -164,17 +165,17 @@ var _ = Describe("Day2 cluster tests", func() { }) Expect(err).NotTo(HaveOccurred()) - h := getHostV2(infraEnvID, *host1.ID) + h := utils_test.TestContext.GetHostV2(infraEnvID, *host1.ID) Expect(h.RequestedHostname).Should(Equal("host1newname")) - h = getHostV2(infraEnvID, *host2.ID) + h = utils_test.TestContext.GetHostV2(infraEnvID, *host2.ID) Expect(h.RequestedHostname).Should(Equal("host2newname")) }) It("cluster update machineConfigPool", func() { - host1 := ®isterHost(infraEnvID).Host - host2 := ®isterHost(infraEnvID).Host + host1 := &utils_test.TestContext.RegisterHost(infraEnvID).Host + host2 := &utils_test.TestContext.RegisterHost(infraEnvID).Host - _, err = userBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ HostID: *host1.ID, InfraEnvID: infraEnvID, HostUpdateParams: &models.HostUpdateParams{ @@ -182,7 +183,7 @@ var _ = Describe("Day2 cluster tests", func() { }, }) Expect(err).NotTo(HaveOccurred()) - _, err = userBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ HostID: *host2.ID, InfraEnvID: infraEnvID, HostUpdateParams: &models.HostUpdateParams{ @@ -191,302 +192,302 @@ var _ = Describe("Day2 cluster tests", func() { }) Expect(err).NotTo(HaveOccurred()) - h := getHostV2(infraEnvID, *host1.ID) + h := utils_test.TestContext.GetHostV2(infraEnvID, *host1.ID) Expect(h.MachineConfigPoolName).Should(Equal("host1newpool")) - h = getHostV2(infraEnvID, *host2.ID) + h = utils_test.TestContext.GetHostV2(infraEnvID, *host2.ID) Expect(h.MachineConfigPoolName).Should(Equal("host2newpool")) }) It("check host states - one node", func() { - host := ®isterHost(infraEnvID).Host - h := getHostV2(infraEnvID, *host.ID) + host := &utils_test.TestContext.RegisterHost(infraEnvID).Host + h := utils_test.TestContext.GetHostV2(infraEnvID, *host.ID) By("checking discovery state") Expect(*h.Status).Should(Equal("discovering")) - steps := getNextSteps(infraEnvID, *host.ID) - areStepsInList(steps, []models.StepType{models.StepTypeInventory}) + steps := utils_test.TestContext.GetNextSteps(infraEnvID, *host.ID) + utils_test.AreStepsInList(steps, []models.StepType{models.StepTypeInventory}) By("checking insufficient state state - one host, no connectivity check") ips := hostutil.GenerateIPv4Addresses(2, defaultCIDRv4) - generateEssentialHostSteps(ctx, h, "h1host", ips[0]) - generateDomainResolution(ctx, h, "test-cluster", "") + utils_test.TestContext.GenerateEssentialHostSteps(ctx, h, "h1host", ips[0]) + utils_test.TestContext.GenerateDomainResolution(ctx, h, "test-cluster", "") waitForHostStateV2(ctx, "insufficient", 60*time.Second, h) - steps = getNextSteps(infraEnvID, *host.ID) - areStepsInList(steps, []models.StepType{models.StepTypeInventory, models.StepTypeAPIVipConnectivityCheck}) + steps = utils_test.TestContext.GetNextSteps(infraEnvID, *host.ID) + utils_test.AreStepsInList(steps, []models.StepType{models.StepTypeInventory, models.StepTypeAPIVipConnectivityCheck}) By("checking known state state - one host, ignition will come from host") - generateDomainNameResolutionReply(ctx, h, *common.TestDomainNameResolutionsSuccess) - generateApiVipPostStepReply(ctx, h, cluster.Payload, true) + utils_test.TestContext.GenerateDomainNameResolutionReply(ctx, h, *common.TestDomainNameResolutionsSuccess) + utils_test.TestContext.GenerateApiVipPostStepReply(ctx, h, cluster.Payload, true) waitForHostStateV2(ctx, "known", 60*time.Second, h) }) It("check host states - two nodes", func() { - host := ®isterHost(infraEnvID).Host - h1 := getHostV2(infraEnvID, *host.ID) - host = ®isterHost(infraEnvID).Host - h2 := getHostV2(infraEnvID, *host.ID) + host := &utils_test.TestContext.RegisterHost(infraEnvID).Host + h1 := utils_test.TestContext.GetHostV2(infraEnvID, *host.ID) + host = &utils_test.TestContext.RegisterHost(infraEnvID).Host + h2 := utils_test.TestContext.GetHostV2(infraEnvID, *host.ID) ips := hostutil.GenerateIPv4Addresses(2, defaultCIDRv4) By("checking discovery state") Expect(*h1.Status).Should(Equal("discovering")) - steps := getNextSteps(infraEnvID, *h1.ID) - areStepsInList(steps, []models.StepType{models.StepTypeInventory}) + steps := utils_test.TestContext.GetNextSteps(infraEnvID, *h1.ID) + utils_test.AreStepsInList(steps, []models.StepType{models.StepTypeInventory}) By("checking discovery state host2") Expect(*h2.Status).Should(Equal("discovering")) - steps = getNextSteps(infraEnvID, *h2.ID) - areStepsInList(steps, []models.StepType{models.StepTypeInventory}) + steps = utils_test.TestContext.GetNextSteps(infraEnvID, *h2.ID) + utils_test.AreStepsInList(steps, []models.StepType{models.StepTypeInventory}) By("checking insufficient state state host2 ") - generateEssentialHostSteps(ctx, h2, "h2host", ips[1]) - generateDomainResolution(ctx, h2, "test-cluster", "") - generateConnectivityCheckPostStepReply(ctx, h2, ips[0], true) + utils_test.TestContext.GenerateEssentialHostSteps(ctx, h2, "h2host", ips[1]) + utils_test.TestContext.GenerateDomainResolution(ctx, h2, "test-cluster", "") + utils_test.TestContext.GenerateConnectivityCheckPostStepReply(ctx, h2, ips[0], true) waitForHostStateV2(ctx, "insufficient", 60*time.Second, h2) - steps = getNextSteps(infraEnvID, *h2.ID) - areStepsInList(steps, []models.StepType{models.StepTypeInventory, models.StepTypeAPIVipConnectivityCheck}) + steps = utils_test.TestContext.GetNextSteps(infraEnvID, *h2.ID) + utils_test.AreStepsInList(steps, []models.StepType{models.StepTypeInventory, models.StepTypeAPIVipConnectivityCheck}) By("checking insufficient state state") - generateEssentialHostSteps(ctx, h1, "h1host", ips[0]) - generateConnectivityCheckPostStepReply(ctx, h1, ips[1], true) - generateDomainResolution(ctx, h1, "test-cluster", "") + utils_test.TestContext.GenerateEssentialHostSteps(ctx, h1, "h1host", ips[0]) + utils_test.TestContext.GenerateConnectivityCheckPostStepReply(ctx, h1, ips[1], true) + utils_test.TestContext.GenerateDomainResolution(ctx, h1, "test-cluster", "") waitForHostStateV2(ctx, "insufficient", 60*time.Second, h1) - steps = getNextSteps(infraEnvID, *h1.ID) - areStepsInList(steps, []models.StepType{models.StepTypeInventory, models.StepTypeAPIVipConnectivityCheck, models.StepTypeConnectivityCheck}) + steps = utils_test.TestContext.GetNextSteps(infraEnvID, *h1.ID) + utils_test.AreStepsInList(steps, []models.StepType{models.StepTypeInventory, models.StepTypeAPIVipConnectivityCheck, models.StepTypeConnectivityCheck}) By("checking known state state") - generateDomainNameResolutionReply(ctx, h1, *common.TestDomainNameResolutionsSuccess) - generateApiVipPostStepReply(ctx, h1, cluster.Payload, true) + utils_test.TestContext.GenerateDomainNameResolutionReply(ctx, h1, *common.TestDomainNameResolutionsSuccess) + utils_test.TestContext.GenerateApiVipPostStepReply(ctx, h1, cluster.Payload, true) waitForHostStateV2(ctx, "known", 60*time.Second, h1) }) It("check installation - one node", func() { - host := ®isterHost(infraEnvID).Host - h := getHostV2(infraEnvID, *host.ID) - generateEssentialHostSteps(ctx, h, "hostname", defaultCIDRv4) - generateDomainResolution(ctx, h, "test-cluster", "") + host := &utils_test.TestContext.RegisterHost(infraEnvID).Host + h := utils_test.TestContext.GetHostV2(infraEnvID, *host.ID) + utils_test.TestContext.GenerateEssentialHostSteps(ctx, h, "hostname", defaultCIDRv4) + utils_test.TestContext.GenerateDomainResolution(ctx, h, "test-cluster", "") waitForHostStateV2(ctx, "insufficient", 60*time.Second, h) - generateDomainNameResolutionReply(ctx, h, *common.TestDomainNameResolutionsSuccess) - generateApiVipPostStepReply(ctx, h, cluster.Payload, true) + utils_test.TestContext.GenerateDomainNameResolutionReply(ctx, h, *common.TestDomainNameResolutionsSuccess) + utils_test.TestContext.GenerateApiVipPostStepReply(ctx, h, cluster.Payload, true) waitForHostStateV2(ctx, "known", 60*time.Second, h) - _, err := userBMClient.Installer.V2InstallHost(ctx, &installer.V2InstallHostParams{InfraEnvID: infraEnvID, HostID: *h.ID}) + _, err := utils_test.TestContext.UserBMClient.Installer.V2InstallHost(ctx, &installer.V2InstallHostParams{InfraEnvID: infraEnvID, HostID: *h.ID}) Expect(err).NotTo(HaveOccurred()) - h = getHostV2(infraEnvID, *host.ID) + h = utils_test.TestContext.GetHostV2(infraEnvID, *host.ID) Expect(*h.Status).Should(Equal("installing")) Expect(h.Role).Should(Equal(models.HostRoleWorker)) - updateProgress(*h.ID, infraEnvID, models.HostStageStartingInstallation) - h = getHostV2(infraEnvID, *host.ID) + utils_test.TestContext.UpdateProgress(*h.ID, infraEnvID, models.HostStageStartingInstallation) + h = utils_test.TestContext.GetHostV2(infraEnvID, *host.ID) Expect(*h.Status).Should(Equal("installing-in-progress")) - updateProgress(*h.ID, infraEnvID, models.HostStageRebooting) - h = getHostV2(infraEnvID, *host.ID) + utils_test.TestContext.UpdateProgress(*h.ID, infraEnvID, models.HostStageRebooting) + h = utils_test.TestContext.GetHostV2(infraEnvID, *host.ID) Expect(*h.Status).Should(Equal("added-to-existing-cluster")) - c := getCluster(clusterID) + c := utils_test.TestContext.GetCluster(clusterID) Expect(*c.Status).Should(Equal("adding-hosts")) }) It("check installation - 2 nodes", func() { - host := ®isterHost(infraEnvID).Host - h1 := getHostV2(infraEnvID, *host.ID) - host = ®isterHost(infraEnvID).Host - h2 := getHostV2(infraEnvID, *host.ID) + host := &utils_test.TestContext.RegisterHost(infraEnvID).Host + h1 := utils_test.TestContext.GetHostV2(infraEnvID, *host.ID) + host = &utils_test.TestContext.RegisterHost(infraEnvID).Host + h2 := utils_test.TestContext.GetHostV2(infraEnvID, *host.ID) ips := hostutil.GenerateIPv4Addresses(2, defaultCIDRv4) - generateEssentialHostSteps(ctx, h1, "hostname1", ips[0]) - generateDomainResolution(ctx, h1, "test-cluster", "") - generateConnectivityCheckPostStepReply(ctx, h1, ips[1], true) + utils_test.TestContext.GenerateEssentialHostSteps(ctx, h1, "hostname1", ips[0]) + utils_test.TestContext.GenerateDomainResolution(ctx, h1, "test-cluster", "") + utils_test.TestContext.GenerateConnectivityCheckPostStepReply(ctx, h1, ips[1], true) waitForHostStateV2(ctx, "insufficient", 60*time.Second, h1) - generateDomainNameResolutionReply(ctx, h1, *common.TestDomainNameResolutionsSuccess) - generateApiVipPostStepReply(ctx, h1, cluster.Payload, true) + utils_test.TestContext.GenerateDomainNameResolutionReply(ctx, h1, *common.TestDomainNameResolutionsSuccess) + utils_test.TestContext.GenerateApiVipPostStepReply(ctx, h1, cluster.Payload, true) waitForHostStateV2(ctx, "known", 60*time.Second, h1) - generateEssentialHostSteps(ctx, h2, "hostname2", ips[1]) - generateDomainResolution(ctx, h2, "test-cluster", "") - generateConnectivityCheckPostStepReply(ctx, h2, ips[0], true) + utils_test.TestContext.GenerateEssentialHostSteps(ctx, h2, "hostname2", ips[1]) + utils_test.TestContext.GenerateDomainResolution(ctx, h2, "test-cluster", "") + utils_test.TestContext.GenerateConnectivityCheckPostStepReply(ctx, h2, ips[0], true) waitForHostStateV2(ctx, "insufficient", 60*time.Second, h2) - generateDomainNameResolutionReply(ctx, h2, *common.TestDomainNameResolutionsSuccess) - generateApiVipPostStepReply(ctx, h2, cluster.Payload, true) + utils_test.TestContext.GenerateDomainNameResolutionReply(ctx, h2, *common.TestDomainNameResolutionsSuccess) + utils_test.TestContext.GenerateApiVipPostStepReply(ctx, h2, cluster.Payload, true) waitForHostStateV2(ctx, "known", 60*time.Second, h2) - _, err := userBMClient.Installer.V2InstallHost(ctx, &installer.V2InstallHostParams{InfraEnvID: infraEnvID, HostID: *h1.ID}) + _, err := utils_test.TestContext.UserBMClient.Installer.V2InstallHost(ctx, &installer.V2InstallHostParams{InfraEnvID: infraEnvID, HostID: *h1.ID}) Expect(err).NotTo(HaveOccurred()) - _, err = userBMClient.Installer.V2InstallHost(ctx, &installer.V2InstallHostParams{InfraEnvID: infraEnvID, HostID: *h2.ID}) + _, err = utils_test.TestContext.UserBMClient.Installer.V2InstallHost(ctx, &installer.V2InstallHostParams{InfraEnvID: infraEnvID, HostID: *h2.ID}) Expect(err).NotTo(HaveOccurred()) - h1 = getHostV2(infraEnvID, *h1.ID) + h1 = utils_test.TestContext.GetHostV2(infraEnvID, *h1.ID) Expect(*h1.Status).Should(Equal("installing")) Expect(h1.Role).Should(Equal(models.HostRoleWorker)) - h2 = getHostV2(infraEnvID, *h2.ID) + h2 = utils_test.TestContext.GetHostV2(infraEnvID, *h2.ID) Expect(*h2.Status).Should(Equal("installing")) Expect(h2.Role).Should(Equal(models.HostRoleWorker)) - updateProgress(*h1.ID, infraEnvID, models.HostStageStartingInstallation) - h1 = getHostV2(infraEnvID, *h1.ID) + utils_test.TestContext.UpdateProgress(*h1.ID, infraEnvID, models.HostStageStartingInstallation) + h1 = utils_test.TestContext.GetHostV2(infraEnvID, *h1.ID) Expect(*h1.Status).Should(Equal("installing-in-progress")) - updateProgress(*h2.ID, infraEnvID, models.HostStageStartingInstallation) - h2 = getHostV2(infraEnvID, *h2.ID) + utils_test.TestContext.UpdateProgress(*h2.ID, infraEnvID, models.HostStageStartingInstallation) + h2 = utils_test.TestContext.GetHostV2(infraEnvID, *h2.ID) Expect(*h2.Status).Should(Equal("installing-in-progress")) - updateProgress(*h1.ID, infraEnvID, models.HostStageRebooting) - h1 = getHostV2(infraEnvID, *h1.ID) + utils_test.TestContext.UpdateProgress(*h1.ID, infraEnvID, models.HostStageRebooting) + h1 = utils_test.TestContext.GetHostV2(infraEnvID, *h1.ID) Expect(*h1.Status).Should(Equal("added-to-existing-cluster")) - updateProgress(*h2.ID, infraEnvID, models.HostStageRebooting) - h2 = getHostV2(infraEnvID, *h2.ID) + utils_test.TestContext.UpdateProgress(*h2.ID, infraEnvID, models.HostStageRebooting) + h2 = utils_test.TestContext.GetHostV2(infraEnvID, *h2.ID) Expect(*h2.Status).Should(Equal("added-to-existing-cluster")) - c := getCluster(clusterID) + c := utils_test.TestContext.GetCluster(clusterID) Expect(*c.Status).Should(Equal("adding-hosts")) }) It("check installation - 0 nodes", func() { - host := ®isterHost(infraEnvID).Host - h1 := getHostV2(infraEnvID, *host.ID) - host = ®isterHost(infraEnvID).Host - h2 := getHostV2(infraEnvID, *host.ID) + host := &utils_test.TestContext.RegisterHost(infraEnvID).Host + h1 := utils_test.TestContext.GetHostV2(infraEnvID, *host.ID) + host = &utils_test.TestContext.RegisterHost(infraEnvID).Host + h2 := utils_test.TestContext.GetHostV2(infraEnvID, *host.ID) ips := hostutil.GenerateIPv4Addresses(2, defaultCIDRv4) - generateEssentialHostSteps(ctx, h1, "hostname1", ips[0]) - generateDomainResolution(ctx, h1, "test-cluster", "") + utils_test.TestContext.GenerateEssentialHostSteps(ctx, h1, "hostname1", ips[0]) + utils_test.TestContext.GenerateDomainResolution(ctx, h1, "test-cluster", "") waitForHostStateV2(ctx, "insufficient", 60*time.Second, h1) - generateEssentialHostSteps(ctx, h2, "hostname2", ips[1]) - generateDomainResolution(ctx, h2, "test-cluster", "") + utils_test.TestContext.GenerateEssentialHostSteps(ctx, h2, "hostname2", ips[1]) + utils_test.TestContext.GenerateDomainResolution(ctx, h2, "test-cluster", "") waitForHostStateV2(ctx, "insufficient", 60*time.Second, h2) - _, err := userBMClient.Installer.V2InstallHost(ctx, &installer.V2InstallHostParams{InfraEnvID: infraEnvID, HostID: *h1.ID}) + _, err := utils_test.TestContext.UserBMClient.Installer.V2InstallHost(ctx, &installer.V2InstallHostParams{InfraEnvID: infraEnvID, HostID: *h1.ID}) Expect(err).To(HaveOccurred()) - _, err = userBMClient.Installer.V2InstallHost(ctx, &installer.V2InstallHostParams{InfraEnvID: infraEnvID, HostID: *h2.ID}) + _, err = utils_test.TestContext.UserBMClient.Installer.V2InstallHost(ctx, &installer.V2InstallHostParams{InfraEnvID: infraEnvID, HostID: *h2.ID}) Expect(err).To(HaveOccurred()) - h1 = getHostV2(infraEnvID, *h1.ID) + h1 = utils_test.TestContext.GetHostV2(infraEnvID, *h1.ID) Expect(*h1.Status).Should(Equal("insufficient")) Expect(h1.Role).Should(Equal(models.HostRoleWorker)) - h2 = getHostV2(infraEnvID, *h2.ID) + h2 = utils_test.TestContext.GetHostV2(infraEnvID, *h2.ID) Expect(*h2.Status).Should(Equal("insufficient")) Expect(h2.Role).Should(Equal(models.HostRoleWorker)) - c := getCluster(clusterID) + c := utils_test.TestContext.GetCluster(clusterID) Expect(*c.Status).Should(Equal("adding-hosts")) }) It("check installation - install specific node", func() { - host := ®isterHost(infraEnvID).Host - h := getHostV2(infraEnvID, *host.ID) - generateEssentialHostSteps(ctx, h, "hostname", defaultCIDRv4) - generateDomainResolution(ctx, h, "test-cluster", "") + host := &utils_test.TestContext.RegisterHost(infraEnvID).Host + h := utils_test.TestContext.GetHostV2(infraEnvID, *host.ID) + utils_test.TestContext.GenerateEssentialHostSteps(ctx, h, "hostname", defaultCIDRv4) + utils_test.TestContext.GenerateDomainResolution(ctx, h, "test-cluster", "") waitForHostStateV2(ctx, "insufficient", 60*time.Second, h) - generateDomainNameResolutionReply(ctx, h, *common.TestDomainNameResolutionsSuccess) - generateApiVipPostStepReply(ctx, h, cluster.Payload, true) + utils_test.TestContext.GenerateDomainNameResolutionReply(ctx, h, *common.TestDomainNameResolutionsSuccess) + utils_test.TestContext.GenerateApiVipPostStepReply(ctx, h, cluster.Payload, true) waitForHostStateV2(ctx, "known", 60*time.Second, h) - _, err := userBMClient.Installer.V2InstallHost(ctx, &installer.V2InstallHostParams{InfraEnvID: infraEnvID, HostID: *h.ID}) + _, err := utils_test.TestContext.UserBMClient.Installer.V2InstallHost(ctx, &installer.V2InstallHostParams{InfraEnvID: infraEnvID, HostID: *h.ID}) Expect(err).NotTo(HaveOccurred()) - h = getHostV2(infraEnvID, *host.ID) + h = utils_test.TestContext.GetHostV2(infraEnvID, *host.ID) Expect(*h.Status).Should(Equal("installing")) Expect(h.Role).Should(Equal(models.HostRoleWorker)) - updateProgress(*h.ID, infraEnvID, models.HostStageStartingInstallation) - h = getHostV2(infraEnvID, *host.ID) + utils_test.TestContext.UpdateProgress(*h.ID, infraEnvID, models.HostStageStartingInstallation) + h = utils_test.TestContext.GetHostV2(infraEnvID, *host.ID) Expect(*h.Status).Should(Equal("installing-in-progress")) - updateProgress(*h.ID, infraEnvID, models.HostStageRebooting) - h = getHostV2(infraEnvID, *host.ID) + utils_test.TestContext.UpdateProgress(*h.ID, infraEnvID, models.HostStageRebooting) + h = utils_test.TestContext.GetHostV2(infraEnvID, *host.ID) Expect(*h.Status).Should(Equal("added-to-existing-cluster")) }) It("check installation - node registers after reboot", func() { - host := ®isterHost(infraEnvID).Host - h := getHostV2(infraEnvID, *host.ID) - generateEssentialHostSteps(ctx, h, "hostname", defaultCIDRv4) - generateDomainResolution(ctx, h, "test-cluster", "") + host := &utils_test.TestContext.RegisterHost(infraEnvID).Host + h := utils_test.TestContext.GetHostV2(infraEnvID, *host.ID) + utils_test.TestContext.GenerateEssentialHostSteps(ctx, h, "hostname", defaultCIDRv4) + utils_test.TestContext.GenerateDomainResolution(ctx, h, "test-cluster", "") waitForHostStateV2(ctx, "insufficient", 60*time.Second, h) - generateDomainNameResolutionReply(ctx, h, *common.TestDomainNameResolutionsSuccess) - generateApiVipPostStepReply(ctx, h, cluster.Payload, true) + utils_test.TestContext.GenerateDomainNameResolutionReply(ctx, h, *common.TestDomainNameResolutionsSuccess) + utils_test.TestContext.GenerateApiVipPostStepReply(ctx, h, cluster.Payload, true) waitForHostStateV2(ctx, "known", 60*time.Second, h) - _, err := userBMClient.Installer.V2InstallHost(ctx, &installer.V2InstallHostParams{InfraEnvID: infraEnvID, HostID: *h.ID}) + _, err := utils_test.TestContext.UserBMClient.Installer.V2InstallHost(ctx, &installer.V2InstallHostParams{InfraEnvID: infraEnvID, HostID: *h.ID}) Expect(err).NotTo(HaveOccurred()) - h = getHostV2(infraEnvID, *host.ID) + h = utils_test.TestContext.GetHostV2(infraEnvID, *host.ID) Expect(*h.Status).Should(Equal("installing")) Expect(h.Role).Should(Equal(models.HostRoleWorker)) - steps := getNextSteps(infraEnvID, *h.ID) - areStepsInList(steps, []models.StepType{models.StepTypeInstall}) - updateProgress(*h.ID, infraEnvID, models.HostStageStartingInstallation) - h = getHostV2(infraEnvID, *host.ID) + steps := utils_test.TestContext.GetNextSteps(infraEnvID, *h.ID) + utils_test.AreStepsInList(steps, []models.StepType{models.StepTypeInstall}) + utils_test.TestContext.UpdateProgress(*h.ID, infraEnvID, models.HostStageStartingInstallation) + h = utils_test.TestContext.GetHostV2(infraEnvID, *host.ID) Expect(*h.Status).Should(Equal("installing-in-progress")) - updateProgress(*h.ID, infraEnvID, models.HostStageRebooting) - h = getHostV2(infraEnvID, *host.ID) + utils_test.TestContext.UpdateProgress(*h.ID, infraEnvID, models.HostStageRebooting) + h = utils_test.TestContext.GetHostV2(infraEnvID, *host.ID) Expect(*h.Status).Should(Equal("added-to-existing-cluster")) - c := getCluster(clusterID) + c := utils_test.TestContext.GetCluster(clusterID) Expect(*c.Status).Should(Equal("adding-hosts")) - _ = registerHostByUUID(infraEnvID, *h.ID) - h = getHostV2(infraEnvID, *host.ID) + _ = utils_test.TestContext.RegisterHostByUUID(infraEnvID, *h.ID) + h = utils_test.TestContext.GetHostV2(infraEnvID, *host.ID) Expect(*h.Status).Should(Equal("installing-pending-user-action")) }) It("reset node after failed installation", func() { - host := ®isterHost(infraEnvID).Host - h := getHostV2(infraEnvID, *host.ID) - generateEssentialHostSteps(ctx, h, "hostname", defaultCIDRv4) - generateDomainResolution(ctx, h, "test-cluster", "") + host := &utils_test.TestContext.RegisterHost(infraEnvID).Host + h := utils_test.TestContext.GetHostV2(infraEnvID, *host.ID) + utils_test.TestContext.GenerateEssentialHostSteps(ctx, h, "hostname", defaultCIDRv4) + utils_test.TestContext.GenerateDomainResolution(ctx, h, "test-cluster", "") waitForHostStateV2(ctx, "insufficient", 60*time.Second, h) - generateDomainNameResolutionReply(ctx, h, *common.TestDomainNameResolutionsSuccess) - generateApiVipPostStepReply(ctx, h, cluster.Payload, true) + utils_test.TestContext.GenerateDomainNameResolutionReply(ctx, h, *common.TestDomainNameResolutionsSuccess) + utils_test.TestContext.GenerateApiVipPostStepReply(ctx, h, cluster.Payload, true) waitForHostStateV2(ctx, "known", 60*time.Second, h) - _, err := userBMClient.Installer.V2InstallHost(ctx, &installer.V2InstallHostParams{InfraEnvID: infraEnvID, HostID: *h.ID}) + _, err := utils_test.TestContext.UserBMClient.Installer.V2InstallHost(ctx, &installer.V2InstallHostParams{InfraEnvID: infraEnvID, HostID: *h.ID}) Expect(err).NotTo(HaveOccurred()) - h = getHostV2(infraEnvID, *host.ID) + h = utils_test.TestContext.GetHostV2(infraEnvID, *host.ID) Expect(*h.Status).Should(Equal("installing")) Expect(h.Role).Should(Equal(models.HostRoleWorker)) - updateProgress(*h.ID, infraEnvID, models.HostStageStartingInstallation) - h = getHostV2(infraEnvID, *host.ID) + utils_test.TestContext.UpdateProgress(*h.ID, infraEnvID, models.HostStageStartingInstallation) + h = utils_test.TestContext.GetHostV2(infraEnvID, *host.ID) Expect(*h.Status).Should(Equal("installing-in-progress")) - updateProgress(*h.ID, infraEnvID, models.HostStageRebooting) - h = getHostV2(infraEnvID, *host.ID) + utils_test.TestContext.UpdateProgress(*h.ID, infraEnvID, models.HostStageRebooting) + h = utils_test.TestContext.GetHostV2(infraEnvID, *host.ID) Expect(*h.Status).Should(Equal("added-to-existing-cluster")) - c := getCluster(clusterID) + c := utils_test.TestContext.GetCluster(clusterID) Expect(*c.Status).Should(Equal("adding-hosts")) - _, err = userBMClient.Installer.V2ResetHost(ctx, &installer.V2ResetHostParams{InfraEnvID: infraEnvID, HostID: *host.ID}) + _, err = utils_test.TestContext.UserBMClient.Installer.V2ResetHost(ctx, &installer.V2ResetHostParams{InfraEnvID: infraEnvID, HostID: *host.ID}) Expect(err).NotTo(HaveOccurred()) - h = getHostV2(infraEnvID, *host.ID) + h = utils_test.TestContext.GetHostV2(infraEnvID, *host.ID) Expect(*h.Status).Should(Equal("resetting-pending-user-action")) - host = ®isterHostByUUID(infraEnvID, *host.ID).Host - h = getHostV2(infraEnvID, *host.ID) + host = &utils_test.TestContext.RegisterHostByUUID(infraEnvID, *host.ID).Host + h = utils_test.TestContext.GetHostV2(infraEnvID, *host.ID) Expect(*h.Status).Should(Equal("discovering")) }) It("reset node during failed installation", func() { - host := ®isterHost(infraEnvID).Host - h := getHostV2(infraEnvID, *host.ID) - generateEssentialHostSteps(ctx, h, "hostname", defaultCIDRv4) - generateDomainResolution(ctx, h, "test-cluster", "") + host := &utils_test.TestContext.RegisterHost(infraEnvID).Host + h := utils_test.TestContext.GetHostV2(infraEnvID, *host.ID) + utils_test.TestContext.GenerateEssentialHostSteps(ctx, h, "hostname", defaultCIDRv4) + utils_test.TestContext.GenerateDomainResolution(ctx, h, "test-cluster", "") waitForHostStateV2(ctx, "insufficient", 60*time.Second, h) - generateDomainNameResolutionReply(ctx, h, *common.TestDomainNameResolutionsSuccess) - generateApiVipPostStepReply(ctx, h, cluster.Payload, true) + utils_test.TestContext.GenerateDomainNameResolutionReply(ctx, h, *common.TestDomainNameResolutionsSuccess) + utils_test.TestContext.GenerateApiVipPostStepReply(ctx, h, cluster.Payload, true) waitForHostStateV2(ctx, "known", 60*time.Second, h) - _, err := userBMClient.Installer.V2InstallHost(ctx, &installer.V2InstallHostParams{InfraEnvID: infraEnvID, HostID: *h.ID}) + _, err := utils_test.TestContext.UserBMClient.Installer.V2InstallHost(ctx, &installer.V2InstallHostParams{InfraEnvID: infraEnvID, HostID: *h.ID}) Expect(err).NotTo(HaveOccurred()) - h = getHostV2(infraEnvID, *host.ID) + h = utils_test.TestContext.GetHostV2(infraEnvID, *host.ID) Expect(*h.Status).Should(Equal("installing")) Expect(h.Role).Should(Equal(models.HostRoleWorker)) - updateProgress(*h.ID, infraEnvID, models.HostStageStartingInstallation) - h = getHostV2(infraEnvID, *host.ID) + utils_test.TestContext.UpdateProgress(*h.ID, infraEnvID, models.HostStageStartingInstallation) + h = utils_test.TestContext.GetHostV2(infraEnvID, *host.ID) Expect(*h.Status).Should(Equal("installing-in-progress")) - _, err = userBMClient.Installer.V2ResetHost(ctx, &installer.V2ResetHostParams{InfraEnvID: infraEnvID, HostID: *host.ID}) + _, err = utils_test.TestContext.UserBMClient.Installer.V2ResetHost(ctx, &installer.V2ResetHostParams{InfraEnvID: infraEnvID, HostID: *host.ID}) Expect(err).NotTo(HaveOccurred()) - h = getHostV2(infraEnvID, *host.ID) + h = utils_test.TestContext.GetHostV2(infraEnvID, *host.ID) Expect(*h.Status).Should(Equal("resetting-pending-user-action")) - host = ®isterHostByUUID(infraEnvID, *host.ID).Host - h = getHostV2(infraEnvID, *host.ID) + host = &utils_test.TestContext.RegisterHostByUUID(infraEnvID, *host.ID).Host + h = utils_test.TestContext.GetHostV2(infraEnvID, *host.ID) Expect(*h.Status).Should(Equal("discovering")) }) It("reset node failed install command", func() { - host := ®isterHost(infraEnvID).Host - h := getHostV2(infraEnvID, *host.ID) - generateEssentialHostSteps(ctx, h, "hostname", defaultCIDRv4) - generateDomainResolution(ctx, h, "test-cluster", "") + host := &utils_test.TestContext.RegisterHost(infraEnvID).Host + h := utils_test.TestContext.GetHostV2(infraEnvID, *host.ID) + utils_test.TestContext.GenerateEssentialHostSteps(ctx, h, "hostname", defaultCIDRv4) + utils_test.TestContext.GenerateDomainResolution(ctx, h, "test-cluster", "") waitForHostStateV2(ctx, "insufficient", 60*time.Second, h) - generateDomainNameResolutionReply(ctx, h, *common.TestDomainNameResolutionsSuccess) - generateApiVipPostStepReply(ctx, h, cluster.Payload, true) + utils_test.TestContext.GenerateDomainNameResolutionReply(ctx, h, *common.TestDomainNameResolutionsSuccess) + utils_test.TestContext.GenerateApiVipPostStepReply(ctx, h, cluster.Payload, true) waitForHostStateV2(ctx, "known", 60*time.Second, h) - _, err := userBMClient.Installer.V2InstallHost(ctx, &installer.V2InstallHostParams{InfraEnvID: infraEnvID, HostID: *h.ID}) + _, err := utils_test.TestContext.UserBMClient.Installer.V2InstallHost(ctx, &installer.V2InstallHostParams{InfraEnvID: infraEnvID, HostID: *h.ID}) Expect(err).NotTo(HaveOccurred()) - h = getHostV2(infraEnvID, *host.ID) + h = utils_test.TestContext.GetHostV2(infraEnvID, *host.ID) Expect(*h.Status).Should(Equal("installing")) Expect(h.Role).Should(Equal(models.HostRoleWorker)) // post failure to execute the install command - _, err = agentBMClient.Installer.V2PostStepReply(ctx, &installer.V2PostStepReplyParams{ + _, err = utils_test.TestContext.AgentBMClient.Installer.V2PostStepReply(ctx, &installer.V2PostStepReplyParams{ InfraEnvID: infraEnvID, HostID: *host.ID, Reply: &models.StepReply{ @@ -498,14 +499,14 @@ var _ = Describe("Day2 cluster tests", func() { }, }) Expect(err).NotTo(HaveOccurred()) - h = getHostV2(infraEnvID, *host.ID) + h = utils_test.TestContext.GetHostV2(infraEnvID, *host.ID) Expect(*h.Status).Should(Equal("error")) - _, err = userBMClient.Installer.V2ResetHost(ctx, &installer.V2ResetHostParams{InfraEnvID: infraEnvID, HostID: *host.ID}) + _, err = utils_test.TestContext.UserBMClient.Installer.V2ResetHost(ctx, &installer.V2ResetHostParams{InfraEnvID: infraEnvID, HostID: *host.ID}) Expect(err).NotTo(HaveOccurred()) - h = getHostV2(infraEnvID, *host.ID) + h = utils_test.TestContext.GetHostV2(infraEnvID, *host.ID) Expect(*h.Status).Should(Equal("resetting-pending-user-action")) - host = ®isterHostByUUID(infraEnvID, *host.ID).Host - h = getHostV2(infraEnvID, *host.ID) + host = &utils_test.TestContext.RegisterHostByUUID(infraEnvID, *host.ID).Host + h = utils_test.TestContext.GetHostV2(infraEnvID, *host.ID) Expect(*h.Status).Should(Equal("discovering")) }) }) @@ -519,7 +520,7 @@ var _ = Describe("Day2 cluster with bind/unbind hosts", func() { BeforeEach(func() { openshiftClusterID := strfmt.UUID(uuid.New().String()) - cluster, err = userBMClient.Installer.V2ImportCluster(ctx, &installer.V2ImportClusterParams{ + cluster, err = utils_test.TestContext.UserBMClient.Installer.V2ImportCluster(ctx, &installer.V2ImportClusterParams{ NewImportClusterParams: &models.ImportClusterParams{ Name: swag.String("test-cluster"), APIVipDnsname: swag.String("api.test-cluster.example.com"), @@ -529,7 +530,7 @@ var _ = Describe("Day2 cluster with bind/unbind hosts", func() { Expect(err).NotTo(HaveOccurred()) clusterID = *cluster.GetPayload().ID - _, err = userBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ ClusterUpdateParams: &models.V2ClusterUpdateParams{ PullSecret: swag.String(pullSecret), }, @@ -537,12 +538,12 @@ var _ = Describe("Day2 cluster with bind/unbind hosts", func() { }) Expect(err).NotTo(HaveOccurred()) - infraEnv, err := userBMClient.Installer.RegisterInfraEnv(ctx, &installer.RegisterInfraEnvParams{ + infraEnv, err := utils_test.TestContext.UserBMClient.Installer.RegisterInfraEnv(ctx, &installer.RegisterInfraEnvParams{ InfraenvCreateParams: &models.InfraEnvCreateParams{ Name: swag.String("test-infra-env"), OpenshiftVersion: openshiftVersion, PullSecret: swag.String(pullSecret), - SSHAuthorizedKey: swag.String(sshPublicKey), + SSHAuthorizedKey: swag.String(utils_test.SshPublicKey), ImageType: models.ImageTypeFullIso, }, }) @@ -551,10 +552,10 @@ var _ = Describe("Day2 cluster with bind/unbind hosts", func() { }) It("check host states with binding - two nodes", func() { - host := ®isterHost(infraEnvID).Host - h1 := getHostV2(infraEnvID, *host.ID) - host = ®isterHost(infraEnvID).Host - h2 := getHostV2(infraEnvID, *host.ID) + host := &utils_test.TestContext.RegisterHost(infraEnvID).Host + h1 := utils_test.TestContext.GetHostV2(infraEnvID, *host.ID) + host = &utils_test.TestContext.RegisterHost(infraEnvID).Host + h2 := utils_test.TestContext.GetHostV2(infraEnvID, *host.ID) ips := hostutil.GenerateIPv4Addresses(2, defaultCIDRv4) By("hosts in state discovering-unbound") @@ -562,23 +563,23 @@ var _ = Describe("Day2 cluster with bind/unbind hosts", func() { Expect(*h2.Status).Should(Equal(models.HostStatusDiscoveringUnbound)) By("host h1 become known-unbound after inventory reply") - generateGetNextStepsWithTimestamp(ctx, h1, time.Now().Unix()) - generateHWPostStepReply(ctx, h1, getDefaultInventory(ips[0]), "h1") + utils_test.TestContext.GenerateGetNextStepsWithTimestamp(ctx, h1, time.Now().Unix()) + utils_test.TestContext.GenerateHWPostStepReply(ctx, h1, utils_test.GetDefaultInventory(ips[0]), "h1") waitForHostStateV2(ctx, models.HostStatusKnownUnbound, 60*time.Second, h1) By("bind host h1 and re-register - host become insufficient") - bindHost(infraEnvID, *h1.ID, clusterID) + utils_test.TestContext.BindHost(infraEnvID, *h1.ID, clusterID) waitForHostStateV2(ctx, models.HostStatusBinding, 60*time.Second, h1) - h1 = ®isterHostByUUID(infraEnvID, *h1.ID).Host - generateGetNextStepsWithTimestamp(ctx, h1, time.Now().Unix()) - generateHWPostStepReply(ctx, h1, getDefaultInventory(ips[0]), "h1") + h1 = &utils_test.TestContext.RegisterHostByUUID(infraEnvID, *h1.ID).Host + utils_test.TestContext.GenerateGetNextStepsWithTimestamp(ctx, h1, time.Now().Unix()) + utils_test.TestContext.GenerateHWPostStepReply(ctx, h1, utils_test.GetDefaultInventory(ips[0]), "h1") waitForHostStateV2(ctx, models.HostStatusInsufficient, 60*time.Second, h1) By("add connectivity - host h1 becomes known") - generateDomainResolution(ctx, h1, "test-cluster", "") - generateConnectivityCheckPostStepReply(ctx, h1, ips[1], true) - generateDomainNameResolutionReply(ctx, h1, *common.TestDomainNameResolutionsSuccess) - generateApiVipPostStepReply(ctx, h1, cluster.Payload, true) + utils_test.TestContext.GenerateDomainResolution(ctx, h1, "test-cluster", "") + utils_test.TestContext.GenerateConnectivityCheckPostStepReply(ctx, h1, ips[1], true) + utils_test.TestContext.GenerateDomainNameResolutionReply(ctx, h1, *common.TestDomainNameResolutionsSuccess) + utils_test.TestContext.GenerateApiVipPostStepReply(ctx, h1, cluster.Payload, true) waitForHostStateV2(ctx, models.HostStatusKnown, 60*time.Second, h1) }) }) @@ -597,7 +598,7 @@ var _ = Describe("Installation progress", func() { // register cluster openshiftClusterID := strfmt.UUID(uuid.New().String()) - importClusterReply, err := userBMClient.Installer.V2ImportCluster(ctx, &installer.V2ImportClusterParams{ + importClusterReply, err := utils_test.TestContext.UserBMClient.Installer.V2ImportCluster(ctx, &installer.V2ImportClusterParams{ NewImportClusterParams: &models.ImportClusterParams{ Name: swag.String("day2-cluster"), APIVipDnsname: swag.String("api.test-cluster.example.com"), @@ -608,7 +609,7 @@ var _ = Describe("Installation progress", func() { c = importClusterReply.GetPayload() c.OpenshiftVersion = openshiftVersion - _, err = userBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ ClusterUpdateParams: &models.V2ClusterUpdateParams{ PullSecret: swag.String(pullSecret), }, @@ -616,12 +617,12 @@ var _ = Describe("Installation progress", func() { }) Expect(err).NotTo(HaveOccurred()) - res, err1 := userBMClient.Installer.RegisterInfraEnv(ctx, &installer.RegisterInfraEnvParams{ + res, err1 := utils_test.TestContext.UserBMClient.Installer.RegisterInfraEnv(ctx, &installer.RegisterInfraEnvParams{ InfraenvCreateParams: &models.InfraEnvCreateParams{ Name: swag.String("test-infra-env"), OpenshiftVersion: openshiftVersion, PullSecret: swag.String(pullSecret), - SSHAuthorizedKey: swag.String(sshPublicKey), + SSHAuthorizedKey: swag.String(utils_test.SshPublicKey), ImageType: models.ImageTypeFullIso, ClusterID: c.ID, }, @@ -632,8 +633,8 @@ var _ = Describe("Installation progress", func() { // register host to be used by the test as day2 host // day2 host is now initialized as a worker - registerHost(infraEnvID) - c = getCluster(*c.ID) + utils_test.TestContext.RegisterHost(infraEnvID) + c = utils_test.TestContext.GetCluster(*c.ID) Expect(c.Hosts[0].ProgressStages).To(Equal(hostpkg.WorkerStages[:5])) Expect(c.Hosts[0].Progress.InstallationPercentage).To(Equal(int64(0))) @@ -642,17 +643,17 @@ var _ = Describe("Installation progress", func() { By("install hosts", func() { - generateEssentialHostSteps(ctx, c.Hosts[0], "hostname", defaultCIDRv4) - generateDomainResolution(ctx, c.Hosts[0], "day2-cluster", "") - generateApiVipPostStepReply(ctx, c.Hosts[0], nil, true) - generateDomainNameResolutionReply(ctx, c.Hosts[0], *common.TestDomainNameResolutionsSuccess) - generateApiVipPostStepReply(ctx, c.Hosts[0], c, true) + utils_test.TestContext.GenerateEssentialHostSteps(ctx, c.Hosts[0], "hostname", defaultCIDRv4) + utils_test.TestContext.GenerateDomainResolution(ctx, c.Hosts[0], "day2-cluster", "") + utils_test.TestContext.GenerateApiVipPostStepReply(ctx, c.Hosts[0], nil, true) + utils_test.TestContext.GenerateDomainNameResolutionReply(ctx, c.Hosts[0], *common.TestDomainNameResolutionsSuccess) + utils_test.TestContext.GenerateApiVipPostStepReply(ctx, c.Hosts[0], c, true) waitForHostStateV2(ctx, "known", 60*time.Second, c.Hosts[0]) - _, err := userBMClient.Installer.V2InstallHost(ctx, &installer.V2InstallHostParams{InfraEnvID: infraEnvID, HostID: *c.Hosts[0].ID}) + _, err := utils_test.TestContext.UserBMClient.Installer.V2InstallHost(ctx, &installer.V2InstallHostParams{InfraEnvID: infraEnvID, HostID: *c.Hosts[0].ID}) Expect(err).NotTo(HaveOccurred()) - c = getCluster(*c.ID) + c = utils_test.TestContext.GetCluster(*c.ID) Expect(*c.Hosts[0].Status).Should(Equal("installing")) Expect(c.Hosts[0].Role).Should(Equal(models.HostRoleWorker)) @@ -663,8 +664,8 @@ var _ = Describe("Installation progress", func() { By("report hosts' progress - 1st report", func() { - updateProgress(*c.Hosts[0].ID, infraEnvID, models.HostStageStartingInstallation) - c = getCluster(*c.ID) + utils_test.TestContext.UpdateProgress(*c.Hosts[0].ID, infraEnvID, models.HostStageStartingInstallation) + c = utils_test.TestContext.GetCluster(*c.ID) Expect(*c.Hosts[0].Status).Should(Equal("installing-in-progress")) Expect(c.Hosts[0].ProgressStages).To(Equal(hostpkg.WorkerStages[:5])) @@ -674,8 +675,8 @@ var _ = Describe("Installation progress", func() { By("report hosts' progress - 2nd report", func() { - updateProgress(*c.Hosts[0].ID, infraEnvID, models.HostStageInstalling) - c = getCluster(*c.ID) + utils_test.TestContext.UpdateProgress(*c.Hosts[0].ID, infraEnvID, models.HostStageInstalling) + c = utils_test.TestContext.GetCluster(*c.ID) Expect(c.Hosts[0].ProgressStages).To(Equal(hostpkg.WorkerStages[:5])) Expect(c.Hosts[0].Progress.InstallationPercentage).To(Equal(int64(40))) expectProgressToBe(c, 0, 0, 0) @@ -683,8 +684,8 @@ var _ = Describe("Installation progress", func() { By("report hosts' progress - 3rd report", func() { - updateProgress(*c.Hosts[0].ID, infraEnvID, models.HostStageWritingImageToDisk) - c = getCluster(*c.ID) + utils_test.TestContext.UpdateProgress(*c.Hosts[0].ID, infraEnvID, models.HostStageWritingImageToDisk) + c = utils_test.TestContext.GetCluster(*c.ID) Expect(c.Hosts[0].ProgressStages).To(Equal(hostpkg.WorkerStages[:5])) Expect(c.Hosts[0].Progress.InstallationPercentage).To(Equal(int64(60))) expectProgressToBe(c, 0, 0, 0) @@ -692,8 +693,8 @@ var _ = Describe("Installation progress", func() { By("report hosts' progress - last report", func() { - updateProgress(*c.Hosts[0].ID, infraEnvID, models.HostStageRebooting) - c = getCluster(*c.ID) + utils_test.TestContext.UpdateProgress(*c.Hosts[0].ID, infraEnvID, models.HostStageRebooting) + c = utils_test.TestContext.GetCluster(*c.ID) Expect(*c.Hosts[0].Status).Should(Equal(models.HostStatusAddedToExistingCluster)) Expect(c.Hosts[0].ProgressStages).To(Equal(hostpkg.WorkerStages[:5])) Expect(c.Hosts[0].Progress.InstallationPercentage).To(Equal(int64(100))) diff --git a/subsystem/events_test.go b/subsystem/events_test.go index 38129ec1633..dbe73d8a9d8 100644 --- a/subsystem/events_test.go +++ b/subsystem/events_test.go @@ -11,12 +11,13 @@ import ( "github.com/openshift/assisted-service/client/installer" eventgen "github.com/openshift/assisted-service/internal/common/events" "github.com/openshift/assisted-service/models" + "github.com/openshift/assisted-service/subsystem/utils_test" ) var _ = Describe("Events tests", func() { It("Match Event Name", func() { - c, err := userBMClient.Installer.V2RegisterCluster(context.TODO(), &installer.V2RegisterClusterParams{ + c, err := utils_test.TestContext.UserBMClient.Installer.V2RegisterCluster(context.TODO(), &installer.V2RegisterClusterParams{ NewClusterParams: &models.ClusterCreateParams{ BaseDNSDomain: "fake.domain", Name: swag.String("test-v2events-cluster"), @@ -28,7 +29,7 @@ var _ = Describe("Events tests", func() { Expect(err).NotTo(HaveOccurred()) clusterId := *c.GetPayload().ID - evs, err := userBMClient.Events.V2ListEvents(context.TODO(), &events.V2ListEventsParams{ + evs, err := utils_test.TestContext.UserBMClient.Events.V2ListEvents(context.TODO(), &events.V2ListEventsParams{ ClusterID: &clusterId, HostIds: nil, InfraEnvID: nil, diff --git a/subsystem/feature_support_levels_test.go b/subsystem/feature_support_levels_test.go index 36baf2da57a..b7053ddc777 100644 --- a/subsystem/feature_support_levels_test.go +++ b/subsystem/feature_support_levels_test.go @@ -12,6 +12,7 @@ import ( "github.com/openshift/assisted-service/internal/common" "github.com/openshift/assisted-service/internal/featuresupport" "github.com/openshift/assisted-service/models" + "github.com/openshift/assisted-service/subsystem/utils_test" "github.com/thoas/go-funk" ) @@ -28,7 +29,7 @@ var _ = Describe("Feature support levels API", func() { PlatformType: platformType, CPUArchitecture: cpuArchitecture, } - supportedFeaturesOK, err := user2BMClient.Installer.GetSupportedFeatures(ctx, ¶ms) + supportedFeaturesOK, err := utils_test.TestContext.User2BMClient.Installer.GetSupportedFeatures(ctx, ¶ms) if err != nil { return nil, err } @@ -37,7 +38,7 @@ var _ = Describe("Feature support levels API", func() { } registerNewCluster := func(version, cpuArchitecture, highAvailabilityMode string, userManagedNetworking *bool) (*installer.V2RegisterClusterCreated, error) { - cluster, errRegisterCluster := user2BMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ + cluster, errRegisterCluster := utils_test.TestContext.User2BMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ NewClusterParams: &models.ClusterCreateParams{ Name: swag.String("test-cluster"), OpenshiftVersion: swag.String(version), @@ -57,12 +58,12 @@ var _ = Describe("Feature support levels API", func() { } registerNewInfraEnv := func(id *strfmt.UUID, version, cpuArchitecture string) (*installer.RegisterInfraEnvCreated, error) { - infraEnv, errRegisterInfraEnv := user2BMClient.Installer.RegisterInfraEnv(context.Background(), &installer.RegisterInfraEnvParams{ + infraEnv, errRegisterInfraEnv := utils_test.TestContext.User2BMClient.Installer.RegisterInfraEnv(context.Background(), &installer.RegisterInfraEnvParams{ InfraenvCreateParams: &models.InfraEnvCreateParams{ Name: swag.String("test-infra-env"), OpenshiftVersion: version, PullSecret: swag.String(fmt.Sprintf(psTemplate, FakePS2)), - SSHAuthorizedKey: swag.String(sshPublicKey), + SSHAuthorizedKey: swag.String(utils_test.SshPublicKey), ImageType: models.ImageTypeFullIso, ClusterID: id, CPUArchitecture: cpuArchitecture, @@ -106,7 +107,7 @@ var _ = Describe("Feature support levels API", func() { Expect(err).NotTo(HaveOccurred()) Expect(cluster.Payload.CPUArchitecture).To(Equal(common.MultiCPUArchitecture)) - _, err = user2BMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ + _, err = utils_test.TestContext.User2BMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ ClusterUpdateParams: &models.V2ClusterUpdateParams{ UserManagedNetworking: swag.Bool(false), }, @@ -125,7 +126,7 @@ var _ = Describe("Feature support levels API", func() { Expect(err).NotTo(HaveOccurred()) Expect(infraEnv.Payload.CPUArchitecture).To(Equal("s390x")) - _, err = user2BMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ + _, err = utils_test.TestContext.User2BMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ ClusterUpdateParams: &models.V2ClusterUpdateParams{ UserManagedNetworking: swag.Bool(false), }, @@ -142,7 +143,7 @@ var _ = Describe("Feature support levels API", func() { Expect(err).NotTo(HaveOccurred()) Expect(cluster.Payload.CPUArchitecture).To(Equal(common.MultiCPUArchitecture)) - _, err = user2BMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ + _, err = utils_test.TestContext.User2BMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ ClusterUpdateParams: &models.V2ClusterUpdateParams{ OlmOperators: []*models.OperatorCreateParams{ {Name: "odf"}, @@ -164,7 +165,7 @@ var _ = Describe("Feature support levels API", func() { Expect(err).ToNot(HaveOccurred()) Expect(common.ImageTypeValue(infraEnv.Payload.Type)).ToNot(Equal(models.ImageTypeMinimalIso)) - updatedInfraEnv, err := user2BMClient.Installer.UpdateInfraEnv(ctx, &installer.UpdateInfraEnvParams{ + updatedInfraEnv, err := utils_test.TestContext.User2BMClient.Installer.UpdateInfraEnv(ctx, &installer.UpdateInfraEnvParams{ InfraEnvID: *infraEnv.Payload.ID, InfraEnvUpdateParams: &models.InfraEnvUpdateParams{ ImageType: models.ImageTypeMinimalIso, @@ -180,7 +181,7 @@ var _ = Describe("Feature support levels API", func() { Expect(err).ToNot(HaveOccurred()) Expect(common.ImageTypeValue(infraEnv.Payload.Type)).ToNot(Equal(models.ImageTypeMinimalIso)) - updatedInfraEnv, err := user2BMClient.Installer.UpdateInfraEnv(ctx, &installer.UpdateInfraEnvParams{ + updatedInfraEnv, err := utils_test.TestContext.User2BMClient.Installer.UpdateInfraEnv(ctx, &installer.UpdateInfraEnvParams{ InfraEnvID: *infraEnv.Payload.ID, InfraEnvUpdateParams: &models.InfraEnvUpdateParams{ ImageType: models.ImageTypeMinimalIso, @@ -196,7 +197,7 @@ var _ = Describe("Feature support levels API", func() { Expect(err).ToNot(HaveOccurred()) Expect(common.ImageTypeValue(infraEnv.Payload.Type)).ToNot(Equal(models.ImageTypeMinimalIso)) - _, err = user2BMClient.Installer.UpdateInfraEnv(ctx, &installer.UpdateInfraEnvParams{ + _, err = utils_test.TestContext.User2BMClient.Installer.UpdateInfraEnv(ctx, &installer.UpdateInfraEnvParams{ InfraEnvID: *infraEnv.Payload.ID, InfraEnvUpdateParams: &models.InfraEnvUpdateParams{ ImageType: models.ImageTypeMinimalIso, @@ -258,7 +259,7 @@ var _ = Describe("Feature support levels API", func() { OpenshiftVersion: version, CPUArchitecture: swag.String(arch), } - response, err := userBMClient.Installer.GetSupportedFeatures(ctx, ¶ms) + response, err := utils_test.TestContext.UserBMClient.Installer.GetSupportedFeatures(ctx, ¶ms) Expect(err).ShouldNot(HaveOccurred()) for featureID, supportLevel := range response.Payload.Features { @@ -269,7 +270,7 @@ var _ = Describe("Feature support levels API", func() { }) It(fmt.Sprintf("GetSupportedFeatures with empty CPU architectrue, OCP version %s", version), func() { - response, err := userBMClient.Installer.GetSupportedFeatures(ctx, &installer.GetSupportedFeaturesParams{OpenshiftVersion: version}) + response, err := utils_test.TestContext.UserBMClient.Installer.GetSupportedFeatures(ctx, &installer.GetSupportedFeaturesParams{OpenshiftVersion: version}) Expect(err).ShouldNot(HaveOccurred()) for featureID, supportLevel := range response.Payload.Features { @@ -288,7 +289,7 @@ var _ = Describe("Feature support levels API", func() { version := "4.6" params.OpenshiftVersion = version - response, err := userBMClient.Installer.GetSupportedArchitectures(ctx, ¶ms) + response, err := utils_test.TestContext.UserBMClient.Installer.GetSupportedArchitectures(ctx, ¶ms) Expect(err).ShouldNot(HaveOccurred()) architecturesSupportLevel := response.Payload.Architectures @@ -304,7 +305,7 @@ var _ = Describe("Feature support levels API", func() { version := "4.12" params.OpenshiftVersion = version - response, err := userBMClient.Installer.GetSupportedArchitectures(ctx, ¶ms) + response, err := utils_test.TestContext.UserBMClient.Installer.GetSupportedArchitectures(ctx, ¶ms) Expect(err).ShouldNot(HaveOccurred()) architecturesSupportLevel := response.Payload.Architectures @@ -320,7 +321,7 @@ var _ = Describe("Feature support levels API", func() { version := "4.13" params.OpenshiftVersion = version - response, err := userBMClient.Installer.GetSupportedArchitectures(ctx, ¶ms) + response, err := utils_test.TestContext.UserBMClient.Installer.GetSupportedArchitectures(ctx, ¶ms) Expect(err).ShouldNot(HaveOccurred()) architecturesSupportLevel := response.Payload.Architectures diff --git a/subsystem/host_test.go b/subsystem/host_test.go index 43bf0b38977..8140a5e65d0 100644 --- a/subsystem/host_test.go +++ b/subsystem/host_test.go @@ -16,6 +16,7 @@ import ( serviceHost "github.com/openshift/assisted-service/internal/host" "github.com/openshift/assisted-service/models" "github.com/openshift/assisted-service/pkg/auth" + "github.com/openshift/assisted-service/subsystem/utils_test" ) var _ = Describe("Host tests", func() { @@ -26,7 +27,7 @@ var _ = Describe("Host tests", func() { BeforeEach(func() { var err error - cluster, err = userBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ + cluster, err = utils_test.TestContext.UserBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ NewClusterParams: &models.ClusterCreateParams{ Name: swag.String("test-cluster"), OpenshiftVersion: swag.String(VipAutoAllocOpenshiftVersion), @@ -40,8 +41,8 @@ var _ = Describe("Host tests", func() { }) It("Should reject hostname if it is forbidden", func() { - host := ®isterHost(*infraEnvID).Host - host = getHostV2(*infraEnvID, *host.ID) + host := &utils_test.TestContext.RegisterHost(*infraEnvID).Host + host = utils_test.TestContext.GetHostV2(*infraEnvID, *host.ID) Expect(host).NotTo(BeNil()) hostnames := []string{ @@ -61,14 +62,14 @@ var _ = Describe("Host tests", func() { HostName: &hostnames[i], }, } - _, hostnameUpdateError := userBMClient.Installer.V2UpdateHost(ctx, hostnameChangeRequest) + _, hostnameUpdateError := utils_test.TestContext.UserBMClient.Installer.V2UpdateHost(ctx, hostnameChangeRequest) Expect(hostnameUpdateError).To(HaveOccurred()) } }) It("Should accept hostname if it is permitted", func() { - host := ®isterHost(*infraEnvID).Host - host = getHostV2(*infraEnvID, *host.ID) + host := &utils_test.TestContext.RegisterHost(*infraEnvID).Host + host = utils_test.TestContext.GetHostV2(*infraEnvID, *host.ID) Expect(host).NotTo(BeNil()) hostname := "arbitrary.hostname" @@ -79,34 +80,34 @@ var _ = Describe("Host tests", func() { HostName: &hostname, }, } - _, hostnameUpdateError := userBMClient.Installer.V2UpdateHost(ctx, hostnameChangeRequest) + _, hostnameUpdateError := utils_test.TestContext.UserBMClient.Installer.V2UpdateHost(ctx, hostnameChangeRequest) Expect(hostnameUpdateError).NotTo(HaveOccurred()) }) It("host CRUD", func() { - host := ®isterHost(*infraEnvID).Host - host = getHostV2(*infraEnvID, *host.ID) + host := &utils_test.TestContext.RegisterHost(*infraEnvID).Host + host = utils_test.TestContext.GetHostV2(*infraEnvID, *host.ID) Expect(*host.Status).Should(Equal("discovering")) Expect(host.StatusUpdatedAt).ShouldNot(Equal(strfmt.DateTime(time.Time{}))) - list, err := userBMClient.Installer.V2ListHosts(ctx, &installer.V2ListHostsParams{InfraEnvID: *infraEnvID}) + list, err := utils_test.TestContext.UserBMClient.Installer.V2ListHosts(ctx, &installer.V2ListHostsParams{InfraEnvID: *infraEnvID}) Expect(err).NotTo(HaveOccurred()) Expect(len(list.GetPayload())).Should(Equal(1)) - list, err = agentBMClient.Installer.V2ListHosts(ctx, &installer.V2ListHostsParams{InfraEnvID: *infraEnvID}) + list, err = utils_test.TestContext.AgentBMClient.Installer.V2ListHosts(ctx, &installer.V2ListHostsParams{InfraEnvID: *infraEnvID}) Expect(err).NotTo(HaveOccurred()) Expect(len(list.GetPayload())).Should(Equal(1)) - _, err = userBMClient.Installer.V2DeregisterHost(ctx, &installer.V2DeregisterHostParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2DeregisterHost(ctx, &installer.V2DeregisterHostParams{ InfraEnvID: *infraEnvID, HostID: *host.ID, }) Expect(err).NotTo(HaveOccurred()) - list, err = userBMClient.Installer.V2ListHosts(ctx, &installer.V2ListHostsParams{InfraEnvID: *infraEnvID}) + list, err = utils_test.TestContext.UserBMClient.Installer.V2ListHosts(ctx, &installer.V2ListHostsParams{InfraEnvID: *infraEnvID}) Expect(err).NotTo(HaveOccurred()) Expect(len(list.GetPayload())).Should(Equal(0)) - _, err = userBMClient.Installer.V2GetHost(ctx, &installer.V2GetHostParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2GetHost(ctx, &installer.V2GetHostParams{ InfraEnvID: *infraEnvID, HostID: *host.ID, }) @@ -114,8 +115,8 @@ var _ = Describe("Host tests", func() { }) It("should update host installation disk id successfully", func() { - host := ®isterHost(*infraEnvID).Host - host = getHostV2(*infraEnvID, *host.ID) + host := &utils_test.TestContext.RegisterHost(*infraEnvID).Host + host = utils_test.TestContext.GetHostV2(*infraEnvID, *host.ID) Expect(host).NotTo(BeNil()) inventory, error := common.UnmarshalInventory(defaultInventory()) Expect(error).ToNot(HaveOccurred()) @@ -156,7 +157,7 @@ var _ = Describe("Host tests", func() { }, } - updatedHost, updateError := userBMClient.Installer.V2UpdateHost(ctx, diskSelectionRequest) + updatedHost, updateError := utils_test.TestContext.UserBMClient.Installer.V2UpdateHost(ctx, diskSelectionRequest) Expect(updateError).NotTo(HaveOccurred()) host = updatedHost.Payload @@ -165,183 +166,183 @@ var _ = Describe("Host tests", func() { }) It("next step", func() { - _, err := userBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ + _, err := utils_test.TestContext.UserBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ ClusterID: clusterID, ClusterUpdateParams: &models.V2ClusterUpdateParams{ VipDhcpAllocation: swag.Bool(false), }, }) Expect(err).ToNot(HaveOccurred()) - host := ®isterHost(*infraEnvID).Host - host2 := ®isterHost(*infraEnvID).Host + host := &utils_test.TestContext.RegisterHost(*infraEnvID).Host + host2 := &utils_test.TestContext.RegisterHost(*infraEnvID).Host Expect(db.Model(host2).UpdateColumns(&models.Host{Inventory: defaultInventory(), Status: swag.String(models.HostStatusInsufficient), InstallationDiskID: "wwn-0x1111111111111111111111"}).Error).NotTo(HaveOccurred()) - steps := getNextSteps(*infraEnvID, *host.ID) - Expect(isStepTypeInList(steps, models.StepTypeInventory)).Should(BeTrue()) - host = getHostV2(*infraEnvID, *host.ID) + steps := utils_test.TestContext.GetNextSteps(*infraEnvID, *host.ID) + Expect(utils_test.IsStepTypeInList(steps, models.StepTypeInventory)).Should(BeTrue()) + host = utils_test.TestContext.GetHostV2(*infraEnvID, *host.ID) Expect(db.Model(host).Update("status", "insufficient").Error).NotTo(HaveOccurred()) Expect(db.Model(host).UpdateColumn("inventory", defaultInventory()).Error).NotTo(HaveOccurred()) - steps = getNextSteps(*infraEnvID, *host.ID) - Expect(isStepTypeInList(steps, models.StepTypeInventory)).Should(BeTrue()) - Expect(isStepTypeInList(steps, models.StepTypeFreeNetworkAddresses)).Should(BeTrue()) - Expect(isStepTypeInList(steps, models.StepTypeVerifyVips)).Should(BeFalse()) + steps = utils_test.TestContext.GetNextSteps(*infraEnvID, *host.ID) + Expect(utils_test.IsStepTypeInList(steps, models.StepTypeInventory)).Should(BeTrue()) + Expect(utils_test.IsStepTypeInList(steps, models.StepTypeFreeNetworkAddresses)).Should(BeTrue()) + Expect(utils_test.IsStepTypeInList(steps, models.StepTypeVerifyVips)).Should(BeFalse()) Expect(db.Save(&models.APIVip{IP: "1.2.3.4", ClusterID: clusterID}).Error).ToNot(HaveOccurred()) - steps = getNextSteps(*infraEnvID, *host.ID) - Expect(isStepTypeInList(steps, models.StepTypeInventory)).Should(BeTrue()) - Expect(isStepTypeInList(steps, models.StepTypeFreeNetworkAddresses)).Should(BeTrue()) - Expect(isStepTypeInList(steps, models.StepTypeVerifyVips)).Should(BeTrue()) + steps = utils_test.TestContext.GetNextSteps(*infraEnvID, *host.ID) + Expect(utils_test.IsStepTypeInList(steps, models.StepTypeInventory)).Should(BeTrue()) + Expect(utils_test.IsStepTypeInList(steps, models.StepTypeFreeNetworkAddresses)).Should(BeTrue()) + Expect(utils_test.IsStepTypeInList(steps, models.StepTypeVerifyVips)).Should(BeTrue()) Expect(db.Model(host).Update("status", "known").Error).NotTo(HaveOccurred()) - steps = getNextSteps(*infraEnvID, *host.ID) - Expect(isStepTypeInList(steps, models.StepTypeConnectivityCheck)).Should(BeTrue()) - Expect(isStepTypeInList(steps, models.StepTypeFreeNetworkAddresses)).Should(BeTrue()) - Expect(isStepTypeInList(steps, models.StepTypeVerifyVips)).Should(BeTrue()) + steps = utils_test.TestContext.GetNextSteps(*infraEnvID, *host.ID) + Expect(utils_test.IsStepTypeInList(steps, models.StepTypeConnectivityCheck)).Should(BeTrue()) + Expect(utils_test.IsStepTypeInList(steps, models.StepTypeFreeNetworkAddresses)).Should(BeTrue()) + Expect(utils_test.IsStepTypeInList(steps, models.StepTypeVerifyVips)).Should(BeTrue()) Expect(db.Model(host).Update("status", "disabled").Error).NotTo(HaveOccurred()) - steps = getNextSteps(*infraEnvID, *host.ID) + steps = utils_test.TestContext.GetNextSteps(*infraEnvID, *host.ID) Expect(steps.NextInstructionSeconds).Should(Equal(int64(120))) Expect(*steps.PostStepAction).Should(Equal(models.StepsPostStepActionContinue)) Expect(len(steps.Instructions)).Should(Equal(0)) Expect(db.Model(host).Update("status", "insufficient").Error).NotTo(HaveOccurred()) - steps = getNextSteps(*infraEnvID, *host.ID) - Expect(isStepTypeInList(steps, models.StepTypeConnectivityCheck)).Should(BeTrue()) + steps = utils_test.TestContext.GetNextSteps(*infraEnvID, *host.ID) + Expect(utils_test.IsStepTypeInList(steps, models.StepTypeConnectivityCheck)).Should(BeTrue()) Expect(db.Model(host).Update("status", "error").Error).NotTo(HaveOccurred()) - steps = getNextSteps(*infraEnvID, *host.ID) - Expect(isStepTypeInList(steps, models.StepTypeStopInstallation)).Should(BeTrue()) + steps = utils_test.TestContext.GetNextSteps(*infraEnvID, *host.ID) + Expect(utils_test.IsStepTypeInList(steps, models.StepTypeStopInstallation)).Should(BeTrue()) Expect(db.Model(host).Update("status", models.HostStatusResetting).Error).NotTo(HaveOccurred()) - steps = getNextSteps(*infraEnvID, *host.ID) + steps = utils_test.TestContext.GetNextSteps(*infraEnvID, *host.ID) Expect(len(steps.Instructions)).Should(Equal(0)) Expect(db.Model(cluster.GetPayload()).Update("status", models.ClusterStatusPreparingForInstallation).Error).NotTo(HaveOccurred()) Expect(db.Model(host2).Update("status", models.HostStatusPreparingForInstallation).Error).NotTo(HaveOccurred()) - steps = getNextSteps(*infraEnvID, *host2.ID) - Expect(isStepTypeInList(steps, models.StepTypeInstallationDiskSpeedCheck)).Should(BeTrue()) - Expect(isStepTypeInList(steps, models.StepTypeContainerImageAvailability)).Should(BeTrue()) + steps = utils_test.TestContext.GetNextSteps(*infraEnvID, *host2.ID) + Expect(utils_test.IsStepTypeInList(steps, models.StepTypeInstallationDiskSpeedCheck)).Should(BeTrue()) + Expect(utils_test.IsStepTypeInList(steps, models.StepTypeContainerImageAvailability)).Should(BeTrue()) }) It("next step - DHCP", func() { By("Creating cluster") Expect(db.Save(&models.MachineNetwork{ClusterID: clusterID, Cidr: "1.2.3.0/24"}).Error).ToNot(HaveOccurred()) By("Creating hosts") - host := ®isterHost(*infraEnvID).Host - host2 := ®isterHost(*infraEnvID).Host + host := &utils_test.TestContext.RegisterHost(*infraEnvID).Host + host2 := &utils_test.TestContext.RegisterHost(*infraEnvID).Host Expect(db.Model(host2).UpdateColumns(&models.Host{Inventory: defaultInventory(), Status: swag.String(models.HostStatusInsufficient)}).Error).NotTo(HaveOccurred()) By("Get steps in discovering ...") - steps := getNextSteps(*infraEnvID, *host.ID) - Expect(isStepTypeInList(steps, models.StepTypeInventory)).Should(BeTrue()) - host = getHostV2(*infraEnvID, *host.ID) + steps := utils_test.TestContext.GetNextSteps(*infraEnvID, *host.ID) + Expect(utils_test.IsStepTypeInList(steps, models.StepTypeInventory)).Should(BeTrue()) + host = utils_test.TestContext.GetHostV2(*infraEnvID, *host.ID) By("Get steps in insufficient ...") Expect(db.Model(host).Update("status", "insufficient").Error).NotTo(HaveOccurred()) Expect(db.Model(host).UpdateColumn("inventory", defaultInventory()).Error).NotTo(HaveOccurred()) - steps = getNextSteps(*infraEnvID, *host.ID) - Expect(isStepTypeInList(steps, models.StepTypeInventory)).Should(BeTrue()) - Expect(isStepTypeInList(steps, models.StepTypeFreeNetworkAddresses)).Should(BeTrue()) - Expect(isStepTypeInList(steps, models.StepTypeDhcpLeaseAllocate)).Should(BeTrue()) - Expect(isStepTypeInList(steps, models.StepTypeVerifyVips)).Should(BeFalse()) + steps = utils_test.TestContext.GetNextSteps(*infraEnvID, *host.ID) + Expect(utils_test.IsStepTypeInList(steps, models.StepTypeInventory)).Should(BeTrue()) + Expect(utils_test.IsStepTypeInList(steps, models.StepTypeFreeNetworkAddresses)).Should(BeTrue()) + Expect(utils_test.IsStepTypeInList(steps, models.StepTypeDhcpLeaseAllocate)).Should(BeTrue()) + Expect(utils_test.IsStepTypeInList(steps, models.StepTypeVerifyVips)).Should(BeFalse()) Expect(db.Save(&models.APIVip{IP: "1.2.3.4", ClusterID: clusterID}).Error).ToNot(HaveOccurred()) - steps = getNextSteps(*infraEnvID, *host.ID) - Expect(isStepTypeInList(steps, models.StepTypeInventory)).Should(BeTrue()) - Expect(isStepTypeInList(steps, models.StepTypeFreeNetworkAddresses)).Should(BeTrue()) - Expect(isStepTypeInList(steps, models.StepTypeDhcpLeaseAllocate)).Should(BeTrue()) - Expect(isStepTypeInList(steps, models.StepTypeVerifyVips)).Should(BeTrue()) + steps = utils_test.TestContext.GetNextSteps(*infraEnvID, *host.ID) + Expect(utils_test.IsStepTypeInList(steps, models.StepTypeInventory)).Should(BeTrue()) + Expect(utils_test.IsStepTypeInList(steps, models.StepTypeFreeNetworkAddresses)).Should(BeTrue()) + Expect(utils_test.IsStepTypeInList(steps, models.StepTypeDhcpLeaseAllocate)).Should(BeTrue()) + Expect(utils_test.IsStepTypeInList(steps, models.StepTypeVerifyVips)).Should(BeTrue()) By("Get steps in known ...") Expect(db.Model(host).Update("status", "known").Error).NotTo(HaveOccurred()) - steps = getNextSteps(*infraEnvID, *host.ID) - Expect(isStepTypeInList(steps, models.StepTypeConnectivityCheck)).Should(BeTrue()) - Expect(isStepTypeInList(steps, models.StepTypeFreeNetworkAddresses)).Should(BeTrue()) - Expect(isStepTypeInList(steps, models.StepTypeDhcpLeaseAllocate)).Should(BeTrue()) - Expect(isStepTypeInList(steps, models.StepTypeVerifyVips)).Should(BeTrue()) + steps = utils_test.TestContext.GetNextSteps(*infraEnvID, *host.ID) + Expect(utils_test.IsStepTypeInList(steps, models.StepTypeConnectivityCheck)).Should(BeTrue()) + Expect(utils_test.IsStepTypeInList(steps, models.StepTypeFreeNetworkAddresses)).Should(BeTrue()) + Expect(utils_test.IsStepTypeInList(steps, models.StepTypeDhcpLeaseAllocate)).Should(BeTrue()) + Expect(utils_test.IsStepTypeInList(steps, models.StepTypeVerifyVips)).Should(BeTrue()) By("Get steps in disabled ...") Expect(db.Model(host).Update("status", "disabled").Error).NotTo(HaveOccurred()) - steps = getNextSteps(*infraEnvID, *host.ID) + steps = utils_test.TestContext.GetNextSteps(*infraEnvID, *host.ID) Expect(steps.NextInstructionSeconds).Should(Equal(int64(120))) Expect(len(steps.Instructions)).Should(Equal(0)) By("Get steps in insufficient ...") Expect(db.Model(host).Update("status", "insufficient").Error).NotTo(HaveOccurred()) - steps = getNextSteps(*infraEnvID, *host.ID) - Expect(isStepTypeInList(steps, models.StepTypeConnectivityCheck)).Should(BeTrue()) - Expect(isStepTypeInList(steps, models.StepTypeDhcpLeaseAllocate)).Should(BeTrue()) + steps = utils_test.TestContext.GetNextSteps(*infraEnvID, *host.ID) + Expect(utils_test.IsStepTypeInList(steps, models.StepTypeConnectivityCheck)).Should(BeTrue()) + Expect(utils_test.IsStepTypeInList(steps, models.StepTypeDhcpLeaseAllocate)).Should(BeTrue()) By("Get steps in error ...") Expect(db.Model(host).Update("status", "error").Error).NotTo(HaveOccurred()) - steps = getNextSteps(*infraEnvID, *host.ID) - Expect(isStepTypeInList(steps, models.StepTypeStopInstallation)).Should(BeTrue()) + steps = utils_test.TestContext.GetNextSteps(*infraEnvID, *host.ID) + Expect(utils_test.IsStepTypeInList(steps, models.StepTypeStopInstallation)).Should(BeTrue()) By("Get steps in resetting ...") Expect(db.Model(host).Update("status", models.HostStatusResetting).Error).NotTo(HaveOccurred()) - steps = getNextSteps(*infraEnvID, *host.ID) + steps = utils_test.TestContext.GetNextSteps(*infraEnvID, *host.ID) Expect(len(steps.Instructions)).Should(Equal(0)) for _, st := range []string{models.HostStatusInstalling, models.HostStatusPreparingForInstallation} { By(fmt.Sprintf("Get steps in %s ...", st)) Expect(db.Model(host).Update("status", st).Error).NotTo(HaveOccurred()) - steps = getNextSteps(*infraEnvID, *host.ID) - Expect(isStepTypeInList(steps, models.StepTypeDhcpLeaseAllocate)).Should(BeTrue()) + steps = utils_test.TestContext.GetNextSteps(*infraEnvID, *host.ID) + Expect(utils_test.IsStepTypeInList(steps, models.StepTypeDhcpLeaseAllocate)).Should(BeTrue()) } By(fmt.Sprintf("Get steps in %s ...", models.HostStatusInstallingInProgress)) Expect(db.Model(host).Updates(map[string]interface{}{"status": models.HostStatusInstallingInProgress, "progress_stage_updated_at": strfmt.DateTime(time.Now())}).Error).NotTo(HaveOccurred()) - steps = getNextSteps(*infraEnvID, *host.ID) - Expect(isStepTypeInList(steps, models.StepTypeDhcpLeaseAllocate)).Should(BeTrue()) + steps = utils_test.TestContext.GetNextSteps(*infraEnvID, *host.ID) + Expect(utils_test.IsStepTypeInList(steps, models.StepTypeDhcpLeaseAllocate)).Should(BeTrue()) }) It("host_disconnection", func() { - host := ®isterHost(*infraEnvID).Host + host := &utils_test.TestContext.RegisterHost(*infraEnvID).Host Expect(db.Model(host).Update("status", "installing").Error).NotTo(HaveOccurred()) Expect(db.Model(host).Update("role", "master").Error).NotTo(HaveOccurred()) Expect(db.Model(host).Update("bootstrap", "true").Error).NotTo(HaveOccurred()) Expect(db.Model(host).UpdateColumn("inventory", defaultInventory()).Error).NotTo(HaveOccurred()) Expect(db.Model(host).Update("CheckedInAt", strfmt.DateTime(time.Time{})).Error).NotTo(HaveOccurred()) - host = getHostV2(*infraEnvID, *host.ID) + host = utils_test.TestContext.GetHostV2(*infraEnvID, *host.ID) time.Sleep(time.Second * 3) - host = getHostV2(*infraEnvID, *host.ID) + host = utils_test.TestContext.GetHostV2(*infraEnvID, *host.ID) Expect(swag.StringValue(host.Status)).Should(Equal("error")) Expect(swag.StringValue(host.StatusInfo)).Should(Equal("Host failed to install due to timeout while connecting to host during the installation phase.")) }) It("host installation progress", func() { - host := ®isterHost(*infraEnvID).Host + host := &utils_test.TestContext.RegisterHost(*infraEnvID).Host bootstrapStages := serviceHost.BootstrapStages[:] Expect(db.Model(host).Update("status", "installing").Error).NotTo(HaveOccurred()) Expect(db.Model(host).Update("role", "master").Error).NotTo(HaveOccurred()) Expect(db.Model(host).Update("bootstrap", "true").Error).NotTo(HaveOccurred()) Expect(db.Model(host).UpdateColumn("inventory", defaultInventory()).Error).NotTo(HaveOccurred()) - updateProgress(*host.ID, host.InfraEnvID, models.HostStageStartingInstallation) - host = getHostV2(*infraEnvID, *host.ID) + utils_test.TestContext.UpdateProgress(*host.ID, host.InfraEnvID, models.HostStageStartingInstallation) + host = utils_test.TestContext.GetHostV2(*infraEnvID, *host.ID) Expect(host.ProgressStages).Should(Equal(bootstrapStages)) Expect(host.Progress.CurrentStage).Should(Equal(models.HostStageStartingInstallation)) time.Sleep(time.Second * 3) - updateProgress(*host.ID, host.InfraEnvID, models.HostStageInstalling) - host = getHostV2(*infraEnvID, *host.ID) + utils_test.TestContext.UpdateProgress(*host.ID, host.InfraEnvID, models.HostStageInstalling) + host = utils_test.TestContext.GetHostV2(*infraEnvID, *host.ID) Expect(host.ProgressStages).Should(Equal(bootstrapStages)) Expect(host.Progress.CurrentStage).Should(Equal(models.HostStageInstalling)) time.Sleep(time.Second * 3) - updateProgress(*host.ID, host.InfraEnvID, models.HostStageWritingImageToDisk) - host = getHostV2(*infraEnvID, *host.ID) + utils_test.TestContext.UpdateProgress(*host.ID, host.InfraEnvID, models.HostStageWritingImageToDisk) + host = utils_test.TestContext.GetHostV2(*infraEnvID, *host.ID) Expect(host.ProgressStages).Should(Equal(bootstrapStages)) Expect(host.Progress.CurrentStage).Should(Equal(models.HostStageWritingImageToDisk)) time.Sleep(time.Second * 3) - updateProgress(*host.ID, host.InfraEnvID, models.HostStageRebooting) - host = getHostV2(*infraEnvID, *host.ID) + utils_test.TestContext.UpdateProgress(*host.ID, host.InfraEnvID, models.HostStageRebooting) + host = utils_test.TestContext.GetHostV2(*infraEnvID, *host.ID) Expect(host.ProgressStages).Should(Equal(bootstrapStages)) Expect(host.Progress.CurrentStage).Should(Equal(models.HostStageRebooting)) time.Sleep(time.Second * 3) - updateProgress(*host.ID, host.InfraEnvID, models.HostStageConfiguring) - host = getHostV2(*infraEnvID, *host.ID) + utils_test.TestContext.UpdateProgress(*host.ID, host.InfraEnvID, models.HostStageConfiguring) + host = utils_test.TestContext.GetHostV2(*infraEnvID, *host.ID) Expect(host.ProgressStages).Should(Equal(bootstrapStages)) Expect(host.Progress.CurrentStage).Should(Equal(models.HostStageConfiguring)) time.Sleep(time.Second * 3) - updateProgress(*host.ID, host.InfraEnvID, models.HostStageDone) - host = getHostV2(*infraEnvID, *host.ID) + utils_test.TestContext.UpdateProgress(*host.ID, host.InfraEnvID, models.HostStageDone) + host = utils_test.TestContext.GetHostV2(*infraEnvID, *host.ID) Expect(host.ProgressStages).Should(Equal(bootstrapStages)) Expect(host.Progress.CurrentStage).Should(Equal(models.HostStageDone)) time.Sleep(time.Second * 3) }) It("installation_error_reply", func() { - host := ®isterHost(*infraEnvID).Host + host := &utils_test.TestContext.RegisterHost(*infraEnvID).Host Expect(db.Model(host).Update("status", "installing").Error).NotTo(HaveOccurred()) Expect(db.Model(host).UpdateColumn("inventory", defaultInventory()).Error).NotTo(HaveOccurred()) Expect(db.Model(host).Update("role", "worker").Error).NotTo(HaveOccurred()) - _, err := agentBMClient.Installer.V2PostStepReply(ctx, &installer.V2PostStepReplyParams{ + _, err := utils_test.TestContext.AgentBMClient.Installer.V2PostStepReply(ctx, &installer.V2PostStepReplyParams{ InfraEnvID: *infraEnvID, HostID: *host.ID, Reply: &models.StepReply{ @@ -352,19 +353,19 @@ var _ = Describe("Host tests", func() { }, }) Expect(err).ShouldNot(HaveOccurred()) - host = getHostV2(*infraEnvID, *host.ID) + host = utils_test.TestContext.GetHostV2(*infraEnvID, *host.ID) Expect(swag.StringValue(host.Status)).Should(Equal("error")) Expect(swag.StringValue(host.StatusInfo)).Should(Equal("installation command failed")) }) It("connectivity_report_store_only_relevant_reply", func() { - host := ®isterHost(*infraEnvID).Host + host := &utils_test.TestContext.RegisterHost(*infraEnvID).Host connectivity := "{\"remote_hosts\":[{\"host_id\":\"b8a1228d-1091-4e79-be66-738a160f9ff7\",\"l2_connectivity\":null,\"l3_connectivity\":null,\"mtu_report\":null}]}" extraConnectivity := "{\"extra\":\"data\",\"remote_hosts\":[{\"host_id\":\"b8a1228d-1091-4e79-be66-738a160f9ff7\",\"l2_connectivity\":null,\"l3_connectivity\":null}]}" - _, err := agentBMClient.Installer.V2PostStepReply(ctx, &installer.V2PostStepReplyParams{ + _, err := utils_test.TestContext.AgentBMClient.Installer.V2PostStepReply(ctx, &installer.V2PostStepReplyParams{ InfraEnvID: *infraEnvID, HostID: *host.ID, Reply: &models.StepReply{ @@ -375,10 +376,10 @@ var _ = Describe("Host tests", func() { }, }) Expect(err).NotTo(HaveOccurred()) - host = getHostV2(*infraEnvID, *host.ID) + host = utils_test.TestContext.GetHostV2(*infraEnvID, *host.ID) Expect(host.Connectivity).Should(Equal(connectivity)) - _, err = agentBMClient.Installer.V2PostStepReply(ctx, &installer.V2PostStepReplyParams{ + _, err = utils_test.TestContext.AgentBMClient.Installer.V2PostStepReply(ctx, &installer.V2PostStepReplyParams{ InfraEnvID: *infraEnvID, HostID: *host.ID, Reply: &models.StepReply{ @@ -389,11 +390,11 @@ var _ = Describe("Host tests", func() { }, }) Expect(err).To(HaveOccurred()) - host = getHostV2(*infraEnvID, *host.ID) + host = utils_test.TestContext.GetHostV2(*infraEnvID, *host.ID) Expect(host.Connectivity).Should(Equal(connectivity)) //exit code is not 0 - _, err = agentBMClient.Installer.V2PostStepReply(ctx, &installer.V2PostStepReplyParams{ + _, err = utils_test.TestContext.AgentBMClient.Installer.V2PostStepReply(ctx, &installer.V2PostStepReplyParams{ InfraEnvID: *infraEnvID, HostID: *host.ID, Reply: &models.StepReply{ @@ -404,7 +405,7 @@ var _ = Describe("Host tests", func() { }, }) Expect(err).NotTo(HaveOccurred()) - host = getHostV2(*infraEnvID, *host.ID) + host = utils_test.TestContext.GetHostV2(*infraEnvID, *host.ID) Expect(host.Connectivity).Should(Equal(connectivity)) }) @@ -417,11 +418,11 @@ var _ = Describe("Host tests", func() { ) BeforeEach(func() { - h = ®isterHost(*infraEnvID).Host + h = &utils_test.TestContext.RegisterHost(*infraEnvID).Host }) getHostImageStatus := func(hostID strfmt.UUID, imageName string) *models.ContainerImageAvailability { - hostInDb := getHostV2(*infraEnvID, hostID) + hostInDb := utils_test.TestContext.GetHostV2(*infraEnvID, hostID) var hostImageStatuses map[string]*models.ContainerImageAvailability Expect(json.Unmarshal([]byte(hostInDb.ImagesStatus), &hostImageStatuses)).ShouldNot(HaveOccurred()) @@ -433,7 +434,7 @@ var _ = Describe("Host tests", func() { By("pull success", func() { imageStatus = common.TestImageStatusesSuccess - generateContainerImageAvailabilityPostStepReply(ctx, h, []*models.ContainerImageAvailability{imageStatus}) + utils_test.TestContext.GenerateContainerImageAvailabilityPostStepReply(ctx, h, []*models.ContainerImageAvailability{imageStatus}) Expect(getHostImageStatus(*h.ID, imageStatus.Name)).Should(Equal(imageStatus)) waitForHostValidationStatus(clusterID, *infraEnvID, *h.ID, string(serviceHost.ValidationSuccess), models.HostValidationIDContainerImagesAvailable) }) @@ -448,7 +449,7 @@ var _ = Describe("Host tests", func() { Time: imageStatus.Time, } - generateContainerImageAvailabilityPostStepReply(ctx, h, []*models.ContainerImageAvailability{newImageStatus}) + utils_test.TestContext.GenerateContainerImageAvailabilityPostStepReply(ctx, h, []*models.ContainerImageAvailability{newImageStatus}) Expect(getHostImageStatus(*h.ID, imageStatus.Name)).Should(Equal(expectedImageStatus)) waitForHostValidationStatus(clusterID, *infraEnvID, *h.ID, string(serviceHost.ValidationFailure), models.HostValidationIDContainerImagesAvailable) }) @@ -459,7 +460,7 @@ var _ = Describe("Host tests", func() { Result: models.ContainerImageAvailabilityResultSuccess, } - generateContainerImageAvailabilityPostStepReply(ctx, h, []*models.ContainerImageAvailability{newImageStatus}) + utils_test.TestContext.GenerateContainerImageAvailabilityPostStepReply(ctx, h, []*models.ContainerImageAvailability{newImageStatus}) Expect(getHostImageStatus(*h.ID, imageStatus.Name)).Should(Equal(imageStatus)) waitForHostValidationStatus(clusterID, *infraEnvID, *h.ID, string(serviceHost.ValidationSuccess), models.HostValidationIDContainerImagesAvailable) }) @@ -475,7 +476,7 @@ var _ = Describe("Host tests", func() { Time: 1000000.0, } - generateContainerImageAvailabilityPostStepReply(ctx, h, []*models.ContainerImageAvailability{imageStatus}) + utils_test.TestContext.GenerateContainerImageAvailabilityPostStepReply(ctx, h, []*models.ContainerImageAvailability{imageStatus}) Expect(getHostImageStatus(*h.ID, imageStatus.Name)).Should(Equal(imageStatus)) waitForHostValidationStatus(clusterID, *infraEnvID, *h.ID, string(serviceHost.ValidationFailure), models.HostValidationIDContainerImagesAvailable) }) @@ -490,7 +491,7 @@ var _ = Describe("Host tests", func() { Time: imageStatus.Time, } - generateContainerImageAvailabilityPostStepReply(ctx, h, []*models.ContainerImageAvailability{newImageStatus}) + utils_test.TestContext.GenerateContainerImageAvailabilityPostStepReply(ctx, h, []*models.ContainerImageAvailability{newImageStatus}) Expect(getHostImageStatus(*h.ID, imageStatus.Name)).Should(Equal(expectedImageStatus)) waitForHostValidationStatus(clusterID, *infraEnvID, *h.ID, string(serviceHost.ValidationFailure), models.HostValidationIDContainerImagesAvailable) }) @@ -508,7 +509,7 @@ var _ = Describe("Host tests", func() { Time: imageStatus.Time, } - generateContainerImageAvailabilityPostStepReply(ctx, h, []*models.ContainerImageAvailability{newImageStatus}) + utils_test.TestContext.GenerateContainerImageAvailabilityPostStepReply(ctx, h, []*models.ContainerImageAvailability{newImageStatus}) Expect(getHostImageStatus(*h.ID, imageStatus.Name)).Should(Equal(expectedImageStatus)) waitForHostValidationStatus(clusterID, *infraEnvID, *h.ID, string(serviceHost.ValidationFailure), models.HostValidationIDContainerImagesAvailable) }) @@ -518,7 +519,7 @@ var _ = Describe("Host tests", func() { By("pull failed", func() { imageStatus = common.TestImageStatusesFailure - generateContainerImageAvailabilityPostStepReply(ctx, h, []*models.ContainerImageAvailability{imageStatus}) + utils_test.TestContext.GenerateContainerImageAvailabilityPostStepReply(ctx, h, []*models.ContainerImageAvailability{imageStatus}) Expect(getHostImageStatus(*h.ID, imageStatus.Name)).Should(Equal(imageStatus)) waitForHostValidationStatus(clusterID, *infraEnvID, *h.ID, string(serviceHost.ValidationFailure), models.HostValidationIDContainerImagesAvailable) }) @@ -532,7 +533,7 @@ var _ = Describe("Host tests", func() { Time: imageStatus.Time, } - generateContainerImageAvailabilityPostStepReply(ctx, h, []*models.ContainerImageAvailability{newImageStatus}) + utils_test.TestContext.GenerateContainerImageAvailabilityPostStepReply(ctx, h, []*models.ContainerImageAvailability{newImageStatus}) Expect(getHostImageStatus(*h.ID, imageStatus.Name)).Should(Equal(expectedImageStatus)) waitForHostValidationStatus(clusterID, *infraEnvID, *h.ID, string(serviceHost.ValidationSuccess), models.HostValidationIDContainerImagesAvailable) }) @@ -540,9 +541,9 @@ var _ = Describe("Host tests", func() { }) It("register_same_host_id", func() { - hostID := strToUUID(uuid.New().String()) + hostID := utils_test.StrToUUID(uuid.New().String()) // register to cluster1 - _, err := agentBMClient.Installer.V2RegisterHost(context.Background(), &installer.V2RegisterHostParams{ + _, err := utils_test.TestContext.AgentBMClient.Installer.V2RegisterHost(context.Background(), &installer.V2RegisterHostParams{ InfraEnvID: *infraEnvID, NewHostParams: &models.HostCreateParams{ HostID: hostID, @@ -550,7 +551,7 @@ var _ = Describe("Host tests", func() { }) Expect(err).NotTo(HaveOccurred()) - cluster2, err := userBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ + cluster2, err := utils_test.TestContext.UserBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ NewClusterParams: &models.ClusterCreateParams{ Name: swag.String("another-cluster"), OpenshiftVersion: swag.String(openshiftVersion), @@ -561,7 +562,7 @@ var _ = Describe("Host tests", func() { infraEnvID2 := registerInfraEnv(cluster2.GetPayload().ID, models.ImageTypeMinimalIso).ID // register to cluster2 - _, err = agentBMClient.Installer.V2RegisterHost(ctx, &installer.V2RegisterHostParams{ + _, err = utils_test.TestContext.AgentBMClient.Installer.V2RegisterHost(ctx, &installer.V2RegisterHostParams{ InfraEnvID: *infraEnvID2, NewHostParams: &models.HostCreateParams{ HostID: hostID, @@ -570,18 +571,18 @@ var _ = Describe("Host tests", func() { Expect(err).NotTo(HaveOccurred()) // successfully get from both clusters - _ = getHostV2(*infraEnvID, *hostID) - h2 := getHostV2(*infraEnvID2, *hostID) + _ = utils_test.TestContext.GetHostV2(*infraEnvID, *hostID) + h2 := utils_test.TestContext.GetHostV2(*infraEnvID2, *hostID) h2initialRegistrationTimestamp := h2.RegisteredAt - _, err = userBMClient.Installer.V2DeregisterHost(ctx, &installer.V2DeregisterHostParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2DeregisterHost(ctx, &installer.V2DeregisterHostParams{ InfraEnvID: *infraEnvID, HostID: *hostID, }) Expect(err).NotTo(HaveOccurred()) time.Sleep(time.Second * 2) - _, err = agentBMClient.Installer.V2RegisterHost(ctx, &installer.V2RegisterHostParams{ + _, err = utils_test.TestContext.AgentBMClient.Installer.V2RegisterHost(ctx, &installer.V2RegisterHostParams{ InfraEnvID: *infraEnvID2, NewHostParams: &models.HostCreateParams{ HostID: hostID, @@ -590,12 +591,12 @@ var _ = Describe("Host tests", func() { Expect(err).NotTo(HaveOccurred()) // confirm if new registration updated the timestamp - h2 = getHostV2(*infraEnvID2, *hostID) + h2 = utils_test.TestContext.GetHostV2(*infraEnvID2, *hostID) h2newRegistrationTimestamp := h2.RegisteredAt Expect(h2newRegistrationTimestamp.Equal(h2initialRegistrationTimestamp)).Should(BeFalse()) Eventually(func() string { - h := getHostV2(*infraEnvID2, *hostID) + h := utils_test.TestContext.GetHostV2(*infraEnvID2, *hostID) return swag.StringValue(h.Status) }, "30s", "1s").Should(Equal(models.HostStatusDiscovering)) }) @@ -608,8 +609,8 @@ var _ = Describe("Host tests", func() { wrongTokenStubID, err := wiremock.createWrongStubTokenAuth(WrongPullSecret) Expect(err).ToNot(HaveOccurred()) - hostID := strToUUID(uuid.New().String()) - _, err = badAgentBMClient.Installer.V2RegisterHost(context.Background(), &installer.V2RegisterHostParams{ + hostID := utils_test.StrToUUID(uuid.New().String()) + _, err = utils_test.TestContext.BadAgentBMClient.Installer.V2RegisterHost(context.Background(), &installer.V2RegisterHostParams{ InfraEnvID: *infraEnvID, NewHostParams: &models.HostCreateParams{ HostID: hostID, @@ -622,7 +623,7 @@ var _ = Describe("Host tests", func() { }) It("next_step_runner_command", func() { - registration := registerHost(*infraEnvID) + registration := utils_test.TestContext.RegisterHost(*infraEnvID) Expect(registration.NextStepRunnerCommand).ShouldNot(BeNil()) Expect(registration.NextStepRunnerCommand.Command).Should(BeEmpty()) Expect(registration.NextStepRunnerCommand.Args).ShouldNot(BeEmpty()) @@ -631,7 +632,7 @@ var _ = Describe("Host tests", func() { }) func updateInventory(ctx context.Context, infraEnvId strfmt.UUID, hostId strfmt.UUID, inventory string) *models.Host { - _, err := agentBMClient.Installer.V2PostStepReply(ctx, &installer.V2PostStepReplyParams{ + _, err := utils_test.TestContext.AgentBMClient.Installer.V2PostStepReply(ctx, &installer.V2PostStepReplyParams{ InfraEnvID: infraEnvId, HostID: hostId, Reply: &models.StepReply{ @@ -642,7 +643,7 @@ func updateInventory(ctx context.Context, infraEnvId strfmt.UUID, hostId strfmt. }, }) Expect(err).ShouldNot(HaveOccurred()) - host := getHostV2(infraEnvId, hostId) + host := utils_test.TestContext.GetHostV2(infraEnvId, hostId) Expect(host).NotTo(BeNil()) Expect(host.Inventory).NotTo(BeEmpty()) return host diff --git a/subsystem/host_v2_test.go b/subsystem/host_v2_test.go index 8b51ee6c390..a4e411ee6ef 100644 --- a/subsystem/host_v2_test.go +++ b/subsystem/host_v2_test.go @@ -14,6 +14,7 @@ import ( "github.com/openshift/assisted-service/client/installer" "github.com/openshift/assisted-service/internal/common" "github.com/openshift/assisted-service/models" + "github.com/openshift/assisted-service/subsystem/utils_test" ) var _ = Describe("Host tests v2", func() { @@ -25,19 +26,19 @@ var _ = Describe("Host tests v2", func() { BeforeEach(func() { var err error - infraEnv, err = userBMClient.Installer.RegisterInfraEnv(ctx, &installer.RegisterInfraEnvParams{ + infraEnv, err = utils_test.TestContext.UserBMClient.Installer.RegisterInfraEnv(ctx, &installer.RegisterInfraEnvParams{ InfraenvCreateParams: &models.InfraEnvCreateParams{ Name: swag.String("test-infra-env"), OpenshiftVersion: openshiftVersion, PullSecret: swag.String(pullSecret), - SSHAuthorizedKey: swag.String(sshPublicKey), + SSHAuthorizedKey: swag.String(utils_test.SshPublicKey), ImageType: models.ImageTypeFullIso, }, }) Expect(err).NotTo(HaveOccurred()) infraEnvID = *infraEnv.GetPayload().ID - cluster, err = userBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ + cluster, err = utils_test.TestContext.UserBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ NewClusterParams: &models.ClusterCreateParams{ Name: swag.String("test-cluster"), OpenshiftVersion: swag.String(openshiftVersion), @@ -49,25 +50,25 @@ var _ = Describe("Host tests v2", func() { }) It("host infra env CRUD", func() { - host := ®isterHost(infraEnvID).Host - host = getHostV2(infraEnvID, *host.ID) + host := &utils_test.TestContext.RegisterHost(infraEnvID).Host + host = utils_test.TestContext.GetHostV2(infraEnvID, *host.ID) Expect(*host.Status).Should(Equal("discovering-unbound")) Expect(host.StatusUpdatedAt).ShouldNot(Equal(strfmt.DateTime(time.Time{}))) - list, err := userBMClient.Installer.V2ListHosts(ctx, &installer.V2ListHostsParams{InfraEnvID: infraEnvID}) + list, err := utils_test.TestContext.UserBMClient.Installer.V2ListHosts(ctx, &installer.V2ListHostsParams{InfraEnvID: infraEnvID}) Expect(err).NotTo(HaveOccurred()) Expect(len(list.GetPayload())).Should(Equal(1)) - _, err = userBMClient.Installer.V2DeregisterHost(ctx, &installer.V2DeregisterHostParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2DeregisterHost(ctx, &installer.V2DeregisterHostParams{ InfraEnvID: infraEnvID, HostID: *host.ID, }) Expect(err).NotTo(HaveOccurred()) - list, err = userBMClient.Installer.V2ListHosts(ctx, &installer.V2ListHostsParams{InfraEnvID: infraEnvID}) + list, err = utils_test.TestContext.UserBMClient.Installer.V2ListHosts(ctx, &installer.V2ListHostsParams{InfraEnvID: infraEnvID}) Expect(err).NotTo(HaveOccurred()) Expect(len(list.GetPayload())).Should(Equal(0)) - _, err = userBMClient.Installer.V2GetHost(ctx, &installer.V2GetHostParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2GetHost(ctx, &installer.V2GetHostParams{ InfraEnvID: infraEnvID, HostID: *host.ID, }) @@ -75,17 +76,17 @@ var _ = Describe("Host tests v2", func() { }) It("infra-env host should reach know-unbound state", func() { - host := ®isterHost(infraEnvID).Host - host = getHostV2(infraEnvID, *host.ID) + host := &utils_test.TestContext.RegisterHost(infraEnvID).Host + host = utils_test.TestContext.GetHostV2(infraEnvID, *host.ID) Expect(host).NotTo(BeNil()) - waitForHostStateV2(ctx, models.HostStatusDiscoveringUnbound, defaultWaitForHostStateTimeout, host) + waitForHostStateV2(ctx, models.HostStatusDiscoveringUnbound, utils_test.DefaultWaitForHostStateTimeout, host) host = updateInventory(ctx, infraEnvID, *host.ID, defaultInventory()) - waitForHostStateV2(ctx, models.HostStatusKnownUnbound, defaultWaitForHostStateTimeout, host) + waitForHostStateV2(ctx, models.HostStatusKnownUnbound, utils_test.DefaultWaitForHostStateTimeout, host) }) It("update_hostname_successfully", func() { - host := ®isterHost(infraEnvID).Host - host = getHostV2(infraEnvID, *host.ID) + host := &utils_test.TestContext.RegisterHost(infraEnvID).Host + host = utils_test.TestContext.GetHostV2(infraEnvID, *host.ID) Expect(host).NotTo(BeNil()) host = updateInventory(ctx, infraEnvID, *host.ID, defaultInventory()) @@ -101,8 +102,8 @@ var _ = Describe("Host tests v2", func() { }) It("update node labels successfully", func() { - host := ®isterHost(infraEnvID).Host - host = getHostV2(infraEnvID, *host.ID) + host := &utils_test.TestContext.RegisterHost(infraEnvID).Host + host = utils_test.TestContext.GetHostV2(infraEnvID, *host.ID) Expect(host).NotTo(BeNil()) host = updateInventory(ctx, infraEnvID, *host.ID, defaultInventory()) @@ -126,8 +127,8 @@ var _ = Describe("Host tests v2", func() { }) It("update infra-env host installation disk id success", func() { - host := ®isterHost(infraEnvID).Host - host = getHostV2(infraEnvID, *host.ID) + host := &utils_test.TestContext.RegisterHost(infraEnvID).Host + host = utils_test.TestContext.GetHostV2(infraEnvID, *host.ID) Expect(host).NotTo(BeNil()) inventory, error := common.UnmarshalInventory(defaultInventory()) Expect(error).ToNot(HaveOccurred()) @@ -168,21 +169,21 @@ var _ = Describe("Host tests v2", func() { }, } - _, error = userBMClient.Installer.V2UpdateHost(ctx, diskSelectionRequest) + _, error = utils_test.TestContext.UserBMClient.Installer.V2UpdateHost(ctx, diskSelectionRequest) Expect(error).ToNot(HaveOccurred()) }) It("register_same_host_id", func() { // register to infra-env 1 - host := ®isterHost(infraEnvID).Host + host := &utils_test.TestContext.RegisterHost(infraEnvID).Host hostID := *host.ID - infraEnv2, err := userBMClient.Installer.RegisterInfraEnv(ctx, &installer.RegisterInfraEnvParams{ + infraEnv2, err := utils_test.TestContext.UserBMClient.Installer.RegisterInfraEnv(ctx, &installer.RegisterInfraEnvParams{ InfraenvCreateParams: &models.InfraEnvCreateParams{ Name: swag.String("another test-infra-env"), OpenshiftVersion: openshiftVersion, PullSecret: swag.String(pullSecret), - SSHAuthorizedKey: swag.String(sshPublicKey), + SSHAuthorizedKey: swag.String(utils_test.SshPublicKey), ImageType: models.ImageTypeFullIso, }, }) @@ -190,52 +191,52 @@ var _ = Describe("Host tests v2", func() { infraEnvID2 := *infraEnv2.GetPayload().ID // register to infra env2 - _ = registerHostByUUID(infraEnvID2, hostID) + _ = utils_test.TestContext.RegisterHostByUUID(infraEnvID2, hostID) // successfully get from both clusters - _ = getHostV2(infraEnvID, hostID) - _ = getHostV2(infraEnvID2, hostID) + _ = utils_test.TestContext.GetHostV2(infraEnvID, hostID) + _ = utils_test.TestContext.GetHostV2(infraEnvID2, hostID) - _, err = userBMClient.Installer.V2DeregisterHost(ctx, &installer.V2DeregisterHostParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2DeregisterHost(ctx, &installer.V2DeregisterHostParams{ InfraEnvID: infraEnvID, HostID: hostID, }) Expect(err).NotTo(HaveOccurred()) - h := getHostV2(infraEnvID2, hostID) + h := utils_test.TestContext.GetHostV2(infraEnvID2, hostID) // register again to cluster 2 and expect it to be in discovery status Expect(db.Model(h).Update("status", "known-unbound").Error).NotTo(HaveOccurred()) - h = getHostV2(infraEnvID2, hostID) + h = utils_test.TestContext.GetHostV2(infraEnvID2, hostID) Expect(swag.StringValue(h.Status)).Should(Equal("known-unbound")) - _ = registerHostByUUID(infraEnvID2, hostID) - h = getHostV2(infraEnvID2, hostID) + _ = utils_test.TestContext.RegisterHostByUUID(infraEnvID2, hostID) + h = utils_test.TestContext.GetHostV2(infraEnvID2, hostID) Expect(swag.StringValue(h.Status)).Should(Equal("discovering-unbound")) }) It("bind host", func() { - host := ®isterHost(infraEnvID).Host - host = getHostV2(infraEnvID, *host.ID) + host := &utils_test.TestContext.RegisterHost(infraEnvID).Host + host = utils_test.TestContext.GetHostV2(infraEnvID, *host.ID) Expect(host).NotTo(BeNil()) - waitForHostStateV2(ctx, models.HostStatusDiscoveringUnbound, defaultWaitForHostStateTimeout, host) + waitForHostStateV2(ctx, models.HostStatusDiscoveringUnbound, utils_test.DefaultWaitForHostStateTimeout, host) host = updateInventory(ctx, infraEnvID, *host.ID, defaultInventory()) - waitForHostStateV2(ctx, models.HostStatusKnownUnbound, defaultWaitForHostStateTimeout, host) - host = bindHost(host.InfraEnvID, *host.ID, clusterID) + waitForHostStateV2(ctx, models.HostStatusKnownUnbound, utils_test.DefaultWaitForHostStateTimeout, host) + host = utils_test.TestContext.BindHost(host.InfraEnvID, *host.ID, clusterID) Expect(host.ClusterID).NotTo(BeNil()) Expect(*host.ClusterID).Should(Equal(clusterID)) - waitForHostStateV2(ctx, models.HostStatusBinding, defaultWaitForHostStateTimeout, host) - steps := getNextSteps(host.InfraEnvID, *host.ID) + waitForHostStateV2(ctx, models.HostStatusBinding, utils_test.DefaultWaitForHostStateTimeout, host) + steps := utils_test.TestContext.GetNextSteps(host.InfraEnvID, *host.ID) Expect(len(steps.Instructions)).Should(Equal(0)) }) It("bind host in insufficient state should fail", func() { - host := ®isterHost(infraEnvID).Host - waitForHostStateV2(ctx, models.HostStatusDiscoveringUnbound, defaultWaitForHostStateTimeout, host) + host := &utils_test.TestContext.RegisterHost(infraEnvID).Host + waitForHostStateV2(ctx, models.HostStatusDiscoveringUnbound, utils_test.DefaultWaitForHostStateTimeout, host) By("move the host to insufficient") Expect(db.Model(host).UpdateColumns(&models.Host{Inventory: defaultInventory(), Status: swag.String(models.HostStatusInsufficient), InstallationDiskID: "wwn-0x1111111111111111111111"}).Error).NotTo(HaveOccurred()) By("reject host in insufficient state") - _, err := userBMClient.Installer.BindHost(context.Background(), &installer.BindHostParams{ + _, err := utils_test.TestContext.UserBMClient.Installer.BindHost(context.Background(), &installer.BindHostParams{ HostID: *host.ID, InfraEnvID: infraEnvID, BindHostParams: &models.BindHostParams{ @@ -255,19 +256,19 @@ var _ = Describe("Day2 Host tests v2", func() { BeforeEach(func() { var err error - infraEnv, err = userBMClient.Installer.RegisterInfraEnv(ctx, &installer.RegisterInfraEnvParams{ + infraEnv, err = utils_test.TestContext.UserBMClient.Installer.RegisterInfraEnv(ctx, &installer.RegisterInfraEnvParams{ InfraenvCreateParams: &models.InfraEnvCreateParams{ Name: swag.String("test-infra-env"), OpenshiftVersion: openshiftVersion, PullSecret: swag.String(pullSecret), - SSHAuthorizedKey: swag.String(sshPublicKey), + SSHAuthorizedKey: swag.String(utils_test.SshPublicKey), ImageType: models.ImageTypeFullIso, }, }) Expect(err).NotTo(HaveOccurred()) infraEnvID = *infraEnv.GetPayload().ID - cluster, err = userBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ + cluster, err = utils_test.TestContext.UserBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ NewClusterParams: &models.ClusterCreateParams{ Name: swag.String("test-cluster"), OpenshiftVersion: swag.String(openshiftVersion), @@ -280,28 +281,28 @@ var _ = Describe("Day2 Host tests v2", func() { }) It("bind host to day2 cluster", func() { - host := ®isterHost(infraEnvID).Host - host = getHostV2(infraEnvID, *host.ID) + host := &utils_test.TestContext.RegisterHost(infraEnvID).Host + host = utils_test.TestContext.GetHostV2(infraEnvID, *host.ID) Expect(host).NotTo(BeNil()) Expect(*host.Status).Should(Equal("discovering-unbound")) Expect(host.StatusUpdatedAt).ShouldNot(Equal(strfmt.DateTime(time.Time{}))) - waitForHostStateV2(ctx, models.HostStatusDiscoveringUnbound, defaultWaitForHostStateTimeout, host) + waitForHostStateV2(ctx, models.HostStatusDiscoveringUnbound, utils_test.DefaultWaitForHostStateTimeout, host) host = updateInventory(ctx, infraEnvID, *host.ID, defaultInventory()) - waitForHostStateV2(ctx, models.HostStatusKnownUnbound, defaultWaitForHostStateTimeout, host) + waitForHostStateV2(ctx, models.HostStatusKnownUnbound, utils_test.DefaultWaitForHostStateTimeout, host) - host = bindHost(infraEnvID, *host.ID, clusterID) + host = utils_test.TestContext.BindHost(infraEnvID, *host.ID, clusterID) Expect(swag.StringValue(host.Status)).Should(Equal("binding")) - host = ®isterHostByUUID(infraEnvID, *host.ID).Host - host = getHostV2(host.InfraEnvID, *host.ID) + host = &utils_test.TestContext.RegisterHostByUUID(infraEnvID, *host.ID).Host + host = utils_test.TestContext.GetHostV2(host.InfraEnvID, *host.ID) Expect(swag.StringValue(host.Status)).Should(Equal("discovering")) Expect(swag.StringValue(host.Kind)).Should(Equal(models.HostKindAddToExistingClusterHost)) }) }) func updateHostV2(ctx context.Context, request *installer.V2UpdateHostParams) *models.Host { - response, error := userBMClient.Installer.V2UpdateHost(ctx, request) + response, error := utils_test.TestContext.UserBMClient.Installer.V2UpdateHost(ctx, request) Expect(error).ShouldNot(HaveOccurred()) Expect(response).NotTo(BeNil()) Expect(response.Payload).NotTo(BeNil()) @@ -309,7 +310,7 @@ func updateHostV2(ctx context.Context, request *installer.V2UpdateHostParams) *m } func isHostInStateV2(ctx context.Context, host *models.Host, state string) (bool, string, string) { - rep, err := userBMClient.Installer.V2GetHost(ctx, &installer.V2GetHostParams{InfraEnvID: host.InfraEnvID, HostID: *host.ID}) + rep, err := utils_test.TestContext.UserBMClient.Installer.V2GetHost(ctx, &installer.V2GetHostParams{InfraEnvID: host.InfraEnvID, HostID: *host.ID}) Expect(err).NotTo(HaveOccurred()) h := rep.GetPayload() return swag.StringValue(h.Status) == state, swag.StringValue(h.Status), swag.StringValue(h.StatusInfo) diff --git a/subsystem/image_test.go b/subsystem/image_test.go index 908dadac0f8..ce3a95dd622 100644 --- a/subsystem/image_test.go +++ b/subsystem/image_test.go @@ -18,6 +18,7 @@ import ( "github.com/openshift/assisted-service/client/versions" "github.com/openshift/assisted-service/internal/common" "github.com/openshift/assisted-service/models" + "github.com/openshift/assisted-service/subsystem/utils_test" ) var _ = Describe("system-test image tests", func() { @@ -27,7 +28,7 @@ var _ = Describe("system-test image tests", func() { ) BeforeEach(func() { - resp, err := userBMClient.Versions.V2ListSupportedOpenshiftVersions( + resp, err := utils_test.TestContext.UserBMClient.Versions.V2ListSupportedOpenshiftVersions( ctx, &versions.V2ListSupportedOpenshiftVersionsParams{OnlyLatest: swag.Bool(true)}, ) Expect(err).NotTo(HaveOccurred()) @@ -41,7 +42,7 @@ var _ = Describe("system-test image tests", func() { By(fmt.Sprintf("For version %s", ocpVersion)) By("Register Cluster") - registerResp, err := userBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ + registerResp, err := utils_test.TestContext.UserBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ NewClusterParams: &models.ClusterCreateParams{ Name: swag.String("test-cluster"), OpenshiftVersion: swag.String(ocpVersion), @@ -63,12 +64,12 @@ var _ = Describe("system-test image tests", func() { } config := common.FormatStaticConfigHostYAML("eth0", "eth1", "192.0.2.155", "192.0.2.156", "192.0.2.1", macInterfaceMap) - getResp, err := userBMClient.Installer.RegisterInfraEnv(ctx, &installer.RegisterInfraEnvParams{ + getResp, err := utils_test.TestContext.UserBMClient.Installer.RegisterInfraEnv(ctx, &installer.RegisterInfraEnvParams{ InfraenvCreateParams: &models.InfraEnvCreateParams{ Name: swag.String("iso-test-infra-env"), OpenshiftVersion: ocpVersion, PullSecret: swag.String(pullSecret), - SSHAuthorizedKey: swag.String(sshPublicKey), + SSHAuthorizedKey: swag.String(utils_test.SshPublicKey), ImageType: imageType, StaticNetworkConfig: []*models.HostStaticNetworkConfig{config}, ClusterID: &clusterID, @@ -94,7 +95,7 @@ var _ = Describe("system-test image tests", func() { }) func verifyEventExistence(ClusterID strfmt.UUID, message string) { - eventsReply, err := userBMClient.Events.V2ListEvents(context.TODO(), &events.V2ListEventsParams{ + eventsReply, err := utils_test.TestContext.UserBMClient.Events.V2ListEvents(context.TODO(), &events.V2ListEventsParams{ ClusterID: &ClusterID, }) Expect(err).NotTo(HaveOccurred()) diff --git a/subsystem/infra_env_test.go b/subsystem/infra_env_test.go index 19d1af261e4..e1d72b01687 100644 --- a/subsystem/infra_env_test.go +++ b/subsystem/infra_env_test.go @@ -21,6 +21,7 @@ import ( "github.com/openshift/assisted-service/internal/bminventory" "github.com/openshift/assisted-service/internal/common" "github.com/openshift/assisted-service/models" + "github.com/openshift/assisted-service/subsystem/utils_test" ) var registerInfraEnv = func(clusterID *strfmt.UUID, imageType models.ImageType) *models.InfraEnv { @@ -36,12 +37,12 @@ var registerInfraEnvSpecificVersionAndArch = func(clusterID *strfmt.UUID, imageT } var internalRegisterInfraEnv = func(clusterID *strfmt.UUID, imageType models.ImageType, cpuArch, ocpVersion string) *models.InfraEnv { - request, err := userBMClient.Installer.RegisterInfraEnv(context.Background(), &installer.RegisterInfraEnvParams{ + request, err := utils_test.TestContext.UserBMClient.Installer.RegisterInfraEnv(context.Background(), &installer.RegisterInfraEnvParams{ InfraenvCreateParams: &models.InfraEnvCreateParams{ Name: swag.String("test-infra-env"), OpenshiftVersion: ocpVersion, PullSecret: swag.String(pullSecret), - SSHAuthorizedKey: swag.String(sshPublicKey), + SSHAuthorizedKey: swag.String(utils_test.SshPublicKey), ImageType: imageType, ClusterID: clusterID, CPUArchitecture: cpuArch, @@ -65,12 +66,12 @@ func validateKernelArgs(infraEnv *models.InfraEnv, expectedKargs models.KernelAr var _ = Describe("Register InfraEnv- kernel arguments", func() { register := func(kernelArgs models.KernelArguments) (*installer.RegisterInfraEnvCreated, error) { - return userBMClient.Installer.RegisterInfraEnv(context.Background(), &installer.RegisterInfraEnvParams{ + return utils_test.TestContext.UserBMClient.Installer.RegisterInfraEnv(context.Background(), &installer.RegisterInfraEnvParams{ InfraenvCreateParams: &models.InfraEnvCreateParams{ Name: swag.String("test-infra-env"), OpenshiftVersion: openshiftVersion, PullSecret: swag.String(pullSecret), - SSHAuthorizedKey: swag.String(sshPublicKey), + SSHAuthorizedKey: swag.String(utils_test.SshPublicKey), ImageType: models.ImageTypeMinimalIso, KernelArguments: kernelArgs, }, @@ -123,7 +124,7 @@ var _ = Describe("Infra_Env", func() { BeforeEach(func() { infraEnv = registerInfraEnv(nil, models.ImageTypeFullIso) - clusterResp, err := userBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ + clusterResp, err := utils_test.TestContext.UserBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ NewClusterParams: &models.ClusterCreateParams{ Name: swag.String("test-cluster"), OpenshiftVersion: swag.String(openshiftVersion), @@ -142,7 +143,7 @@ var _ = Describe("Infra_Env", func() { }) getInfraEnv := func() { - resp, err := userBMClient.Installer.GetInfraEnv(ctx, &installer.GetInfraEnvParams{InfraEnvID: infraEnvID}) + resp, err := utils_test.TestContext.UserBMClient.Installer.GetInfraEnv(ctx, &installer.GetInfraEnvParams{InfraEnvID: infraEnvID}) Expect(err).NotTo(HaveOccurred()) infraEnv = resp.Payload @@ -168,7 +169,7 @@ var _ = Describe("Infra_Env", func() { KernelArguments: kargs1, }, } - res, err := userBMClient.Installer.UpdateInfraEnv(ctx, updateParams) + res, err := utils_test.TestContext.UserBMClient.Installer.UpdateInfraEnv(ctx, updateParams) Expect(err).NotTo(HaveOccurred()) validateKernelArgs(res.Payload, kargs1) @@ -188,7 +189,7 @@ var _ = Describe("Infra_Env", func() { }, } updateParams.InfraEnvUpdateParams.KernelArguments = kargs2 - res, err = userBMClient.Installer.UpdateInfraEnv(ctx, updateParams) + res, err = utils_test.TestContext.UserBMClient.Installer.UpdateInfraEnv(ctx, updateParams) Expect(err).NotTo(HaveOccurred()) validateKernelArgs(res.Payload, kargs2) @@ -197,7 +198,7 @@ var _ = Describe("Infra_Env", func() { // Need to update with some field other than kernel arguments updateParams.InfraEnvUpdateParams.PullSecret = pullSecret - res, err = userBMClient.Installer.UpdateInfraEnv(ctx, updateParams) + res, err = utils_test.TestContext.UserBMClient.Installer.UpdateInfraEnv(ctx, updateParams) Expect(err).NotTo(HaveOccurred()) validateKernelArgs(res.Payload, kargs2) @@ -205,7 +206,7 @@ var _ = Describe("Infra_Env", func() { // Return to default updateParams.InfraEnvUpdateParams.PullSecret = "" updateParams.InfraEnvUpdateParams.KernelArguments = make(models.KernelArguments, 0) - res, err = userBMClient.Installer.UpdateInfraEnv(ctx, updateParams) + res, err = utils_test.TestContext.UserBMClient.Installer.UpdateInfraEnv(ctx, updateParams) Expect(err).NotTo(HaveOccurred()) validateKernelArgs(res.Payload, nil) }) @@ -222,7 +223,7 @@ var _ = Describe("Infra_Env", func() { }, }, } - _, err := userBMClient.Installer.UpdateInfraEnv(ctx, updateParams) + _, err := utils_test.TestContext.UserBMClient.Installer.UpdateInfraEnv(ctx, updateParams) Expect(err).To(HaveOccurred()) }, Entry("unsupported replace operation", models.KernelArgumentOperationReplace, "p1"), @@ -239,7 +240,7 @@ var _ = Describe("Infra_Env", func() { Proxy: &models.Proxy{NoProxy: swag.String("*")}, }, } - res, err := userBMClient.Installer.UpdateInfraEnv(ctx, updateParams) + res, err := utils_test.TestContext.UserBMClient.Installer.UpdateInfraEnv(ctx, updateParams) Expect(err).NotTo(HaveOccurred()) updateInfraEnv := res.Payload Expect(swag.StringValue(updateInfraEnv.Proxy.NoProxy)).To(Equal("*")) @@ -269,7 +270,7 @@ var _ = Describe("Infra_Env", func() { }, } - res, err := userBMClient.Installer.UpdateInfraEnv(ctx, updateParams) + res, err := utils_test.TestContext.UserBMClient.Installer.UpdateInfraEnv(ctx, updateParams) Expect(err).NotTo(HaveOccurred()) updateInfraEnv := res.Payload Expect(updateInfraEnv.SSHAuthorizedKey).To(Equal(newSshKey)) @@ -281,7 +282,7 @@ var _ = Describe("Infra_Env", func() { }) It("download minimal-iso image success", func() { - _, err := userBMClient.Installer.UpdateInfraEnv(ctx, + _, err := utils_test.TestContext.UserBMClient.Installer.UpdateInfraEnv(ctx, &installer.UpdateInfraEnvParams{InfraEnvID: infraEnvID, InfraEnvUpdateParams: &models.InfraEnvUpdateParams{ImageType: models.ImageTypeMinimalIso}}) Expect(err).NotTo(HaveOccurred()) @@ -291,7 +292,7 @@ var _ = Describe("Infra_Env", func() { It("download minimal-initrd success", func() { time.Sleep(time.Second * 10) - _, err := userBMClient.Installer.UpdateInfraEnv(ctx, + _, err := utils_test.TestContext.UserBMClient.Installer.UpdateInfraEnv(ctx, &installer.UpdateInfraEnvParams{InfraEnvID: infraEnvID, InfraEnvUpdateParams: &models.InfraEnvUpdateParams{ImageType: models.ImageTypeMinimalIso}}) Expect(err).NotTo(HaveOccurred()) @@ -300,14 +301,14 @@ var _ = Describe("Infra_Env", func() { log.Fatal(err) } defer os.Remove(file.Name()) - _, _, err = userBMClient.Installer.DownloadMinimalInitrd(ctx, &installer.DownloadMinimalInitrdParams{InfraEnvID: infraEnvID}, file) + _, _, err = utils_test.TestContext.UserBMClient.Installer.DownloadMinimalInitrd(ctx, &installer.DownloadMinimalInitrdParams{InfraEnvID: infraEnvID}, file) Expect(err).NotTo(HaveOccurred()) }) It("download infra-env files discovery ignition file", func() { file, err := os.CreateTemp("", "tmp") Expect(err).NotTo(HaveOccurred()) - _, err = userBMClient.Installer.V2DownloadInfraEnvFiles(ctx, &installer.V2DownloadInfraEnvFilesParams{InfraEnvID: infraEnvID, FileName: "discovery.ign"}, file) + _, err = utils_test.TestContext.UserBMClient.Installer.V2DownloadInfraEnvFiles(ctx, &installer.V2DownloadInfraEnvFilesParams{InfraEnvID: infraEnvID, FileName: "discovery.ign"}, file) Expect(err).NotTo(HaveOccurred()) s, err := file.Stat() Expect(err).NotTo(HaveOccurred()) @@ -347,11 +348,11 @@ var _ = Describe("Infra_Env", func() { StaticNetworkConfig: staticNetworkConfigs, }, } - _, err := userBMClient.Installer.UpdateInfraEnv(ctx, updateParams) + _, err := utils_test.TestContext.UserBMClient.Installer.UpdateInfraEnv(ctx, updateParams) Expect(err).NotTo(HaveOccurred()) By("Downloading the static network config archive") buf := &bytes.Buffer{} - _, err = userBMClient.Installer.V2DownloadInfraEnvFiles(ctx, &installer.V2DownloadInfraEnvFilesParams{InfraEnvID: infraEnvID, FileName: "static-network-config"}, buf) + _, err = utils_test.TestContext.UserBMClient.Installer.V2DownloadInfraEnvFiles(ctx, &installer.V2DownloadInfraEnvFilesParams{InfraEnvID: infraEnvID, FileName: "static-network-config"}, buf) Expect(err).NotTo(HaveOccurred()) By("Verifying the contents of the archive") contents := buf.String() @@ -371,12 +372,12 @@ var _ = Describe("Infra_Env", func() { It("download infra-env files invalid filename option", func() { file, err := os.CreateTemp("", "tmp") Expect(err).NotTo(HaveOccurred()) - _, err = userBMClient.Installer.V2DownloadInfraEnvFiles(ctx, &installer.V2DownloadInfraEnvFilesParams{InfraEnvID: infraEnvID, FileName: "bootstrap.ign"}, file) + _, err = utils_test.TestContext.UserBMClient.Installer.V2DownloadInfraEnvFiles(ctx, &installer.V2DownloadInfraEnvFilesParams{InfraEnvID: infraEnvID, FileName: "bootstrap.ign"}, file) Expect(err).Should(HaveOccurred()) }) It("can list infra-envs", func() { - resp, err := userBMClient.Installer.ListInfraEnvs(ctx, installer.NewListInfraEnvsParams()) + resp, err := utils_test.TestContext.UserBMClient.Installer.ListInfraEnvs(ctx, installer.NewListInfraEnvsParams()) Expect(err).NotTo(HaveOccurred()) Expect(len(resp.Payload)).To(Equal(2)) Expect(resp.Payload).To(ContainElement(infraEnv)) @@ -384,35 +385,35 @@ var _ = Describe("Infra_Env", func() { }) It("can list infra-envs by cluster id", func() { - resp, err := userBMClient.Installer.ListInfraEnvs(ctx, &installer.ListInfraEnvsParams{ClusterID: &clusterID}) + resp, err := utils_test.TestContext.UserBMClient.Installer.ListInfraEnvs(ctx, &installer.ListInfraEnvsParams{ClusterID: &clusterID}) Expect(err).NotTo(HaveOccurred()) Expect(len(resp.Payload)).To(Equal(1)) Expect(resp.Payload[0]).To(Equal(infraEnv2)) }) It("deregister empty infra-env", func() { - _, err := userBMClient.Installer.DeregisterInfraEnv(ctx, &installer.DeregisterInfraEnvParams{InfraEnvID: infraEnvID}) + _, err := utils_test.TestContext.UserBMClient.Installer.DeregisterInfraEnv(ctx, &installer.DeregisterInfraEnvParams{InfraEnvID: infraEnvID}) Expect(err).NotTo(HaveOccurred()) }) It("deregister non-empty infra-env should fail", func() { - hostID := strToUUID(uuid.New().String()) + hostID := utils_test.StrToUUID(uuid.New().String()) // register to infra-env - _, err := agentBMClient.Installer.V2RegisterHost(context.Background(), &installer.V2RegisterHostParams{ + _, err := utils_test.TestContext.AgentBMClient.Installer.V2RegisterHost(context.Background(), &installer.V2RegisterHostParams{ InfraEnvID: infraEnvID, NewHostParams: &models.HostCreateParams{ HostID: hostID, }, }) Expect(err).NotTo(HaveOccurred()) - _, err = userBMClient.Installer.DeregisterInfraEnv(ctx, &installer.DeregisterInfraEnvParams{InfraEnvID: infraEnvID}) + _, err = utils_test.TestContext.UserBMClient.Installer.DeregisterInfraEnv(ctx, &installer.DeregisterInfraEnvParams{InfraEnvID: infraEnvID}) Expect(err).To(HaveOccurred()) }) It("can get ipxe script", func() { buf := &bytes.Buffer{} - _, err := userBMClient.Installer.V2DownloadInfraEnvFiles(ctx, &installer.V2DownloadInfraEnvFilesParams{InfraEnvID: infraEnvID, FileName: "ipxe-script"}, buf) + _, err := utils_test.TestContext.UserBMClient.Installer.V2DownloadInfraEnvFiles(ctx, &installer.V2DownloadInfraEnvFilesParams{InfraEnvID: infraEnvID, FileName: "ipxe-script"}, buf) Expect(err).NotTo(HaveOccurred()) script := buf.String() @@ -420,7 +421,7 @@ var _ = Describe("Infra_Env", func() { }) It("can get ipxe script presigned url", func() { - res, err := userBMClient.Installer.GetInfraEnvPresignedFileURL(ctx, &installer.GetInfraEnvPresignedFileURLParams{InfraEnvID: infraEnvID, FileName: "ipxe-script"}) + res, err := utils_test.TestContext.UserBMClient.Installer.GetInfraEnvPresignedFileURL(ctx, &installer.GetInfraEnvPresignedFileURLParams{InfraEnvID: infraEnvID, FileName: "ipxe-script"}) Expect(err).NotTo(HaveOccurred()) Expect(res.Payload).ToNot(BeNil()) u := res.Payload.URL @@ -435,7 +436,7 @@ var _ = Describe("Infra_Env", func() { Expect(script).To(HavePrefix("#!ipxe")) }) It("ipxe with boot control", func() { - res, err := userBMClient.Installer.GetInfraEnvPresignedFileURL(ctx, &installer.GetInfraEnvPresignedFileURLParams{ + res, err := utils_test.TestContext.UserBMClient.Installer.GetInfraEnvPresignedFileURL(ctx, &installer.GetInfraEnvPresignedFileURLParams{ InfraEnvID: infraEnvID, FileName: "ipxe-script", IpxeScriptType: swag.String(bminventory.BootOrderControl)}) @@ -466,9 +467,9 @@ var _ = Describe("Infra_Env", func() { Expect(string(script)).To(MatchRegexp(`.*initrd --name initrd.*`)) By("Create host") - hostID := strToUUID(uuid.New().String()) + hostID := utils_test.StrToUUID(uuid.New().String()) // register to infra-env - response, err := agentBMClient.Installer.V2RegisterHost(context.Background(), &installer.V2RegisterHostParams{ + response, err := utils_test.TestContext.AgentBMClient.Installer.V2RegisterHost(context.Background(), &installer.V2RegisterHostParams{ InfraEnvID: infraEnvID, NewHostParams: &models.HostCreateParams{ HostID: hostID, @@ -476,7 +477,7 @@ var _ = Describe("Infra_Env", func() { }) Expect(err).ToNot(HaveOccurred()) host := &response.Payload.Host - generateHWPostStepReply(context.Background(), host, getValidWorkerHwInfoWithCIDR("1.2.3.4/24"), "h1") + utils_test.TestContext.GenerateHWPostStepReply(context.Background(), host, getValidWorkerHwInfoWithCIDR("1.2.3.4/24"), "h1") By("host is insufficient") scriptResp, err = http.Get(url) @@ -498,9 +499,9 @@ var _ = Describe("Infra_Env", func() { Expect(db.Model(&models.Host{}).Where("id = ? and infra_env_id = ?", hostID.String(), infraEnvID.String()). Update("status", models.HostStatusInsufficient).Error).ToNot(HaveOccurred()) - hostID2 := strToUUID(uuid.New().String()) + hostID2 := utils_test.StrToUUID(uuid.New().String()) // register to infra-env - response, err = agentBMClient.Installer.V2RegisterHost(context.Background(), &installer.V2RegisterHostParams{ + response, err = utils_test.TestContext.AgentBMClient.Installer.V2RegisterHost(context.Background(), &installer.V2RegisterHostParams{ InfraEnvID: infraEnvID, NewHostParams: &models.HostCreateParams{ HostID: hostID2, @@ -508,7 +509,7 @@ var _ = Describe("Infra_Env", func() { }) Expect(err).ToNot(HaveOccurred()) host2 := &response.Payload.Host - generateHWPostStepReply(context.Background(), host2, getValidWorkerHwInfoWithCIDR("1.2.3.5/24"), "h2") + utils_test.TestContext.GenerateHWPostStepReply(context.Background(), host2, getValidWorkerHwInfoWithCIDR("1.2.3.5/24"), "h2") scriptResp, err = http.Get(url) Expect(err).NotTo(HaveOccurred()) Expect(scriptResp.StatusCode).To(Equal(http.StatusInternalServerError)) @@ -526,7 +527,7 @@ var _ = Describe("Infra_Env", func() { StaticNetworkConfig: staticNetworkConfigs, }, } - _, err := userBMClient.Installer.UpdateInfraEnv(ctx, updateParams) + _, err := utils_test.TestContext.UserBMClient.Installer.UpdateInfraEnv(ctx, updateParams) Expect(err).To(HaveOccurred()) }) @@ -537,7 +538,7 @@ var _ = Describe("Infra_Env", func() { OpenshiftVersion: swag.String("5.99"), }, } - _, err := userBMClient.Installer.UpdateInfraEnv(ctx, updateParams) + _, err := utils_test.TestContext.UserBMClient.Installer.UpdateInfraEnv(ctx, updateParams) Expect(err).To(HaveOccurred()) }) }) diff --git a/subsystem/ipv6_test.go b/subsystem/ipv6_test.go index b8142c86615..9eeb9768999 100644 --- a/subsystem/ipv6_test.go +++ b/subsystem/ipv6_test.go @@ -14,6 +14,7 @@ import ( "github.com/openshift/assisted-service/internal/common" "github.com/openshift/assisted-service/internal/host/hostutil" "github.com/openshift/assisted-service/models" + "github.com/openshift/assisted-service/subsystem/utils_test" ) var ( @@ -46,7 +47,7 @@ var _ = Describe("IPv6 installation", func() { ) BeforeEach(func() { - registerClusterReply, err := userBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ + registerClusterReply, err := utils_test.TestContext.UserBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ NewClusterParams: &models.ClusterCreateParams{ BaseDNSDomain: "example.com", ClusterNetworks: []*models.ClusterNetwork{{Cidr: models.Subnet(clusterCIDR), HostPrefix: 64}}, @@ -54,7 +55,7 @@ var _ = Describe("IPv6 installation", func() { Name: swag.String("test-cluster"), OpenshiftVersion: swag.String(openshiftVersion), PullSecret: swag.String(pullSecret), - SSHPublicKey: sshPublicKey, + SSHPublicKey: utils_test.SshPublicKey, VipDhcpAllocation: swag.Bool(false), NetworkType: swag.String(models.ClusterNetworkTypeOVNKubernetes), }, @@ -67,7 +68,7 @@ var _ = Describe("IPv6 installation", func() { }) It("install_cluster IPv6 happy flow", func() { _ = registerHostsAndSetRolesV6(clusterID, *infraEnvID, 5) - clusterReply, getErr := userBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ + clusterReply, getErr := utils_test.TestContext.UserBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ ClusterID: clusterID, }) Expect(getErr).ToNot(HaveOccurred()) @@ -83,12 +84,12 @@ var _ = Describe("IPv6 installation", func() { } for _, host := range c.Hosts { - updateProgress(*host.ID, host.InfraEnvID, models.HostStageDone) + utils_test.TestContext.UpdateProgress(*host.ID, host.InfraEnvID, models.HostStageDone) } - waitForClusterState(ctx, clusterID, models.ClusterStatusFinalizing, defaultWaitForClusterStateTimeout, clusterFinalizingStateInfo) + waitForClusterState(ctx, clusterID, models.ClusterStatusFinalizing, utils_test.DefaultWaitForClusterStateTimeout, clusterFinalizingStateInfo) By("Completing installation installation") - completeInstallationAndVerify(ctx, agentBMClient, clusterID, true) + completeInstallationAndVerify(ctx, utils_test.TestContext.AgentBMClient, clusterID, true) }) }) @@ -98,17 +99,17 @@ func registerHostsAndSetRolesV6(clusterID, infraEnvID strfmt.UUID, numHosts int) ips := hostutil.GenerateIPv6Addresses(numHosts, defaultCIDRv6) for i := 0; i < numHosts; i++ { hostname := fmt.Sprintf("h%d", i) - host := ®isterHost(infraEnvID).Host + host := &utils_test.TestContext.RegisterHost(infraEnvID).Host validHwInfoV6.Interfaces[0].IPV6Addresses = []string{ips[i]} validHwInfoV6.Interfaces[0].MacAddress = "e6:53:3d:a7:77:b4" - generateEssentialHostStepsWithInventory(ctx, host, hostname, validHwInfoV6) + utils_test.TestContext.GenerateEssentialHostStepsWithInventory(ctx, host, hostname, validHwInfoV6) var role models.HostRole if i < 3 { role = models.HostRoleMaster } else { role = models.HostRoleWorker } - _, err := userBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ + _, err := utils_test.TestContext.UserBMClient.Installer.V2UpdateHost(ctx, &installer.V2UpdateHostParams{ HostUpdateParams: &models.HostUpdateParams{ HostRole: swag.String(string(role)), }, @@ -121,7 +122,7 @@ func registerHostsAndSetRolesV6(clusterID, infraEnvID strfmt.UUID, numHosts int) generateFullMeshConnectivity(ctx, ips[0], hosts...) apiVip := "1001:db8::64" ingressVip := "1001:db8::65" - _, err := userBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ + _, err := utils_test.TestContext.UserBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ ClusterUpdateParams: &models.V2ClusterUpdateParams{ VipDhcpAllocation: swag.Bool(false), APIVips: []*models.APIVip{{IP: models.IP(apiVip), ClusterID: clusterID}}, @@ -131,7 +132,7 @@ func registerHostsAndSetRolesV6(clusterID, infraEnvID strfmt.UUID, numHosts int) }) Expect(err).NotTo(HaveOccurred()) if len(hosts) > 0 { - generateVerifyVipsPostStepReply(ctx, hosts[0], []string{apiVip}, []string{ingressVip}, models.VipVerificationSucceeded) + utils_test.TestContext.GenerateVerifyVipsPostStepReply(ctx, hosts[0], []string{apiVip}, []string{ingressVip}, models.VipVerificationSucceeded) } waitForClusterState(ctx, clusterID, models.ClusterStatusReady, 60*time.Second, clusterReadyStateInfo) diff --git a/subsystem/kubeapi/kubeapi_suite_test.go b/subsystem/kubeapi/kubeapi_suite_test.go new file mode 100644 index 00000000000..adf56bf774b --- /dev/null +++ b/subsystem/kubeapi/kubeapi_suite_test.go @@ -0,0 +1,180 @@ +package kubeapi + +import ( + "context" + "encoding/json" + "fmt" + "net/url" + "os" + "testing" + "time" + + "github.com/go-openapi/runtime" + "github.com/kelseyhightower/envconfig" + bmh_v1alpha1 "github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + hiveext "github.com/openshift/assisted-service/api/hiveextension/v1beta1" + "github.com/openshift/assisted-service/api/v1beta1" + "github.com/openshift/assisted-service/client" + "github.com/openshift/assisted-service/models" + "github.com/openshift/assisted-service/pkg/auth" + hivev1 "github.com/openshift/hive/apis/hive/v1" + "github.com/sirupsen/logrus" + "gorm.io/driver/postgres" + "gorm.io/gorm" + "k8s.io/client-go/kubernetes/scheme" + k8sclient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/config" + "github.com/openshift/assisted-service/subsystem/utils_test" +) + +var log *logrus.Logger +var wiremock *utils_test.WireMock +var kubeClient k8sclient.Client +var VipAutoAllocOpenshiftVersion string = "4.14.0" +var pullSecret = "{\"auths\":{\"cloud.openshift.com\":{\"auth\":\"dXNlcjpwYXNzd29yZAo=\",\"email\":\"r@r.com\"}}}" // #nosec + +var Options struct { + DBHost string `envconfig:"DB_HOST"` + DBPort string `envconfig:"DB_PORT"` + AuthType auth.AuthType `envconfig:"AUTH_TYPE"` + EnableOrgTenancy bool `envconfig:"ENABLE_ORG_TENANCY"` + FeatureGate bool `envconfig:"ENABLE_ORG_BASED_FEATURE_GATES"` + InventoryHost string `envconfig:"INVENTORY"` + TestToken string `envconfig:"TEST_TOKEN"` + TestToken2 string `envconfig:"TEST_TOKEN_2"` + TestTokenAdmin string `envconfig:"TEST_TOKEN_ADMIN"` + TestTokenUnallowed string `envconfig:"TEST_TOKEN_UNALLOWED"` + TestTokenClusterEditor string `envconfig:"TEST_TOKEN_EDITOR"` + OCMHost string `envconfig:"OCM_HOST"` + DeployTarget string `envconfig:"DEPLOY_TARGET" default:"k8s"` + Storage string `envconfig:"STORAGE" default:""` + Namespace string `envconfig:"NAMESPACE" default:"assisted-installer"` + EnableKubeAPI bool `envconfig:"ENABLE_KUBE_API" default:"false"` + DeregisterInactiveAfter time.Duration `envconfig:"DELETED_INACTIVE_AFTER" default:"480h"` // 20d + ReleaseSources string `envconfig:"RELEASE_SOURCES" default:""` +} + +const ( + pollDefaultInterval = 1 * time.Millisecond + pollDefaultTimeout = 30 * time.Second +) + +func clientcfg(authInfo runtime.ClientAuthInfoWriter) client.Config { + cfg := client.Config{ + URL: &url.URL{ + Scheme: client.DefaultSchemes[0], + Host: Options.InventoryHost, + Path: client.DefaultBasePath, + }, + } + if Options.AuthType != auth.TypeNone { + log.Info("API Key authentication enabled for subsystem tests") + cfg.AuthInfo = authInfo + } + return cfg +} + +func setupKubeClient() { + if addErr := v1beta1.AddToScheme(scheme.Scheme); addErr != nil { + logrus.Fatalf("Fail adding kubernetes v1beta1 scheme: %s", addErr) + } + if addErr := hivev1.AddToScheme(scheme.Scheme); addErr != nil { + logrus.Fatalf("Fail adding kubernetes hivev1 scheme: %s", addErr) + } + if addErr := hiveext.AddToScheme(scheme.Scheme); addErr != nil { + logrus.Fatalf("Fail adding kubernetes hivev1 scheme: %s", addErr) + } + if addErr := bmh_v1alpha1.AddToScheme(scheme.Scheme); addErr != nil { + logrus.Fatalf("Fail adding kubernetes bmh scheme: %s", addErr) + } + + var err error + kubeClient, err = k8sclient.New(config.GetConfigOrDie(), k8sclient.Options{Scheme: scheme.Scheme}) + if err != nil { + logrus.Fatalf("Fail adding kubernetes client: %s", err) + } +} + +func init() { + var err error + log = logrus.New() + log.SetReportCaller(true) + err = envconfig.Process("subsystem", &Options) + if err != nil { + log.Fatal(err.Error()) + } + userClientCfg := clientcfg(auth.UserAuthHeaderWriter("bearer " + Options.TestToken)) + agentClientCfg := clientcfg(auth.AgentAuthHeaderWriter(utils_test.FakePS)) + + db, err := gorm.Open(postgres.Open(fmt.Sprintf("host=%s port=%s user=admin database=installer password=admin sslmode=disable", + Options.DBHost, Options.DBPort)), &gorm.Config{}) + if err != nil { + logrus.Fatal("Fail to connect to DB, ", err) + } + + if Options.EnableKubeAPI { + setupKubeClient() + } + + utils_test.TestContext = utils_test.NewSubsystemTestContext( + log, + db, + client.New(agentClientCfg), + client.New(userClientCfg), + nil, + nil, + nil, + nil, + nil, + nil, + pollDefaultInterval, + pollDefaultTimeout, + VipAutoAllocOpenshiftVersion, + ) + + if Options.AuthType == auth.TypeRHSSO { + releaseSourcesString := os.Getenv("RELEASE_SOURCES") + var releaseSources = models.ReleaseSources{} + if err := json.Unmarshal([]byte(releaseSourcesString), &releaseSources); err != nil { + logrus.Fatal("Fail to parse release sources, ", err) + } + + wiremock = &utils_test.WireMock{ + OCMHost: Options.OCMHost, + TestToken: Options.TestToken, + ReleaseSources: releaseSources, + } + + err := wiremock.DeleteAllWiremockStubs() + if err != nil { + logrus.Fatal("Fail to delete all wiremock stubs, ", err) + } + + if err = wiremock.CreateWiremockStubsForOCM(); err != nil { + logrus.Fatal("Failed to init wiremock stubs, ", err) + } + } +} + +func TestSubsystem(t *testing.T) { + AfterEach(func() { + subsystemAfterEach(utils_test.TestContext) + }) + + RegisterFailHandler(Fail) + subsystemAfterEach(utils_test.TestContext) // make sure we start tests from scratch + RunSpecs(t, "Subsystem KubeAPI Suite") +} + +func subsystemAfterEach(testContext *utils_test.SubsystemTestContext) { + if utils_test.Options.EnableKubeAPI { + printCRs(context.Background(), kubeClient) + cleanUpCRs(context.Background(), kubeClient) + verifyCleanUP(context.Background(), kubeClient) + } else { + testContext.DeregisterResources() + } + testContext.ClearDB() +} diff --git a/subsystem/kubeapi_test.go b/subsystem/kubeapi/kubeapi_test.go similarity index 87% rename from subsystem/kubeapi_test.go rename to subsystem/kubeapi/kubeapi_test.go index dc827b9d832..a7f804c097e 100644 --- a/subsystem/kubeapi_test.go +++ b/subsystem/kubeapi/kubeapi_test.go @@ -1,4 +1,4 @@ -package subsystem +package kubeapi import ( "bytes" @@ -56,6 +56,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" k8sclient "sigs.k8s.io/controller-runtime/pkg/client" + "github.com/openshift/assisted-service/subsystem/utils_test" ) const ( @@ -142,22 +143,23 @@ var ( "openshift-v4.14.0": "quay.io/openshift-release-dev/ocp-release:4.14.0-ec.4-x86_64", "openshift-v4.18.0-ec.2": "quay.io/openshift-release-dev/ocp-release:4.18.0-ec.2-x86_64", } + db = utils_test.TestContext.GetDB() ) func deployLocalObjectSecretIfNeeded(ctx context.Context, client k8sclient.Client) *corev1.LocalObjectReference { err := client.Get( ctx, - types.NamespacedName{Namespace: Options.Namespace, Name: pullSecretName}, + types.NamespacedName{Namespace: utils_test.Options.Namespace, Name: utils_test.PullSecretName}, &corev1.Secret{}, ) if apierrors.IsNotFound(err) { data := map[string]string{corev1.DockerConfigJsonKey: pullSecret} - deploySecret(ctx, kubeClient, pullSecretName, data) + deploySecret(ctx, kubeClient, utils_test.PullSecretName, data) } else { Expect(err).To(BeNil()) } return &corev1.LocalObjectReference{ - Name: pullSecretName, + Name: utils_test.PullSecretName, } } @@ -165,7 +167,7 @@ func deployOrUpdateConfigMap(ctx context.Context, client k8sclient.Client, name c := &corev1.ConfigMap{} err := client.Get( ctx, - types.NamespacedName{Namespace: Options.Namespace, Name: name}, + types.NamespacedName{Namespace: utils_test.Options.Namespace, Name: name}, c, ) if apierrors.IsNotFound(err) { @@ -175,7 +177,7 @@ func deployOrUpdateConfigMap(ctx context.Context, client k8sclient.Client, name APIVersion: "v1", }, ObjectMeta: metav1.ObjectMeta{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: name, }, Data: data, @@ -309,7 +311,7 @@ func deploySecret(ctx context.Context, client k8sclient.Client, secretName strin APIVersion: "v1", }, ObjectMeta: metav1.ObjectMeta{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: secretName, }, StringData: secretData, @@ -324,7 +326,7 @@ func deleteSecret(ctx context.Context, client k8sclient.Client, secretName strin APIVersion: "v1", }, ObjectMeta: metav1.ObjectMeta{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: secretName, }, }) @@ -338,7 +340,7 @@ func updateSecret(ctx context.Context, client k8sclient.Client, secretName strin APIVersion: "v1", }, ObjectMeta: metav1.ObjectMeta{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: secretName, }, StringData: secretData, @@ -354,7 +356,7 @@ func deployAgentClusterInstallCRD(ctx context.Context, client k8sclient.Client, APIVersion: "hiveextension/v1beta1", }, ObjectMeta: metav1.ObjectMeta{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterAgentClusterInstallName, }, Spec: *spec, @@ -363,14 +365,14 @@ func deployAgentClusterInstallCRD(ctx context.Context, client k8sclient.Client, } func deployClusterDeploymentCRD(ctx context.Context, client k8sclient.Client, spec *hivev1.ClusterDeploymentSpec) { - GinkgoLogger(fmt.Sprintf("test '%s' creating cluster deployment '%s'", GinkgoT().Name(), spec.ClusterName)) + utils_test.GinkgoLogger(fmt.Sprintf("test '%s' creating cluster deployment '%s'", GinkgoT().Name(), spec.ClusterName)) err := client.Create(ctx, &hivev1.ClusterDeployment{ TypeMeta: metav1.TypeMeta{ Kind: "ClusterDeployment", APIVersion: getAPIVersion(), }, ObjectMeta: metav1.ObjectMeta{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: spec.ClusterName, }, Spec: *spec, @@ -385,7 +387,7 @@ func deployBMHCRD(ctx context.Context, client k8sclient.Client, id string, spec APIVersion: "metal3.io/v1alpha1", }, ObjectMeta: metav1.ObjectMeta{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: createBMHCRDNameFromID(id), }, Spec: *spec, @@ -415,7 +417,7 @@ func deployPPICRD(ctx context.Context, client k8sclient.Client, name string, spe APIVersion: "metal3.io/v1alpha1", }, ObjectMeta: metav1.ObjectMeta{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: name, }, Spec: *spec, @@ -441,7 +443,7 @@ func deployClusterImageSetCRD(ctx context.Context, client k8sclient.Client, imag APIVersion: getAPIVersion(), }, ObjectMeta: metav1.ObjectMeta{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: imageSetRef.Name, }, Spec: hivev1.ClusterImageSetSpec{ @@ -458,7 +460,7 @@ func deployInfraEnvCRD(ctx context.Context, client k8sclient.Client, name string APIVersion: getAPIVersion(), }, ObjectMeta: metav1.ObjectMeta{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: name, }, Spec: *spec, @@ -474,7 +476,7 @@ func deployNMStateConfigCRD(ctx context.Context, client k8sclient.Client, name s APIVersion: getAPIVersion(), }, ObjectMeta: metav1.ObjectMeta{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: name, Labels: map[string]string{NMStateLabelName: NMStateLabelValue}, }, @@ -492,7 +494,7 @@ func deployAgentCRD(ctx context.Context, client k8sclient.Client, APIVersion: getAPIVersion(), }, ObjectMeta: metav1.ObjectMeta{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: name, Labels: map[string]string{ controllers.BMH_INFRA_ENV_LABEL: infraNsName.Name, @@ -654,7 +656,7 @@ func configureLocalAgentClient(infraEnvID string) { } key := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: "assisted-installer-local-auth-key", } secret := getSecret(context.Background(), kubeClient, key) @@ -662,12 +664,12 @@ func configureLocalAgentClient(infraEnvID string) { tok, err := gencrypto.LocalJWTForKey(infraEnvID, string(privKeyPEM), gencrypto.InfraEnvKey) Expect(err).To(BeNil()) - agentBMClient = client.New(clientcfg(auth.AgentAuthHeaderWriter(tok))) + utils_test.TestContext.AgentBMClient = client.New(clientcfg(auth.AgentAuthHeaderWriter(tok))) } func checkAgentCondition(ctx context.Context, hostId string, conditionType conditionsv1.ConditionType, reason string) { hostkey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: hostId, } Eventually(func() string { @@ -680,13 +682,13 @@ func checkAgentCondition(ctx context.Context, hostId string, conditionType condi } func registerIPv6MasterNode(ctx context.Context, infraEnvID strfmt.UUID, name, ip string) *models.Host { - host := ®isterHost(infraEnvID).Host - validHwInfoV6.Interfaces[0].IPV6Addresses = []string{ip} - generateEssentialHostStepsWithInventory(ctx, host, name, validHwInfoV6) - generateEssentialPrepareForInstallationSteps(ctx, host) + host := &utils_test.TestContext.RegisterHost(infraEnvID).Host + utils_test.ValidHwInfoV6.Interfaces[0].IPV6Addresses = []string{ip} + utils_test.TestContext.GenerateEssentialHostStepsWithInventory(ctx, host, name, utils_test.ValidHwInfoV6) + utils_test.TestContext.GenerateEssentialPrepareForInstallationSteps(ctx, host) //update role as master hostkey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: host.ID.String(), } Eventually(func() error { @@ -762,7 +764,7 @@ func getDefaultAgentClusterInstallSpec(clusterDeploymentName string) *hiveext.Ag ServiceNetwork: []string{"172.30.0.0/16"}, NetworkType: models.ClusterNetworkTypeOpenShiftSDN, }, - SSHPublicKey: sshPublicKey, + SSHPublicKey: utils_test.SshPublicKey, ImageSetRef: &hivev1.ClusterImageSetReference{Name: clusterImageSetName}, ProvisionRequirements: hiveext.ProvisionRequirements{ ControlPlaneAgents: 3, @@ -786,7 +788,7 @@ func getDefaultNonePlatformAgentClusterInstallSpec(clusterDeploymentName string) NetworkType: models.ClusterNetworkTypeOpenShiftSDN, UserManagedNetworking: swag.Bool(true), }, - SSHPublicKey: sshPublicKey, + SSHPublicKey: utils_test.SshPublicKey, ImageSetRef: &hivev1.ClusterImageSetReference{Name: clusterImageSetName}, ProvisionRequirements: hiveext.ProvisionRequirements{ ControlPlaneAgents: 3, @@ -813,7 +815,7 @@ func getDefaultExternalPlatformAgentClusterInstallSpec(clusterDeploymentName str PlatformName: "oci", CloudControllerManager: "External", }, - SSHPublicKey: sshPublicKey, + SSHPublicKey: utils_test.SshPublicKey, ImageSetRef: &hivev1.ClusterImageSetReference{Name: clusterImageSetName}, ProvisionRequirements: hiveext.ProvisionRequirements{ ControlPlaneAgents: 3, @@ -834,7 +836,7 @@ func getDefaultSNOAgentClusterInstallSpec(clusterDeploymentName string) *hiveext ServiceNetwork: []string{"172.30.0.0/16"}, NetworkType: models.ClusterNetworkTypeOVNKubernetes, }, - SSHPublicKey: sshPublicKey, + SSHPublicKey: utils_test.SshPublicKey, ImageSetRef: &hivev1.ClusterImageSetReference{Name: clusterImageSetName}, ProvisionRequirements: hiveext.ProvisionRequirements{ ControlPlaneAgents: 1, @@ -857,7 +859,7 @@ func getDefaultAgentClusterIPv6InstallSpec(clusterDeploymentName string) *hiveex ServiceNetwork: []string{"2003:db8::/112"}, NetworkType: models.ClusterNetworkTypeOVNKubernetes, }, - SSHPublicKey: sshPublicKey, + SSHPublicKey: utils_test.SshPublicKey, ImageSetRef: &hivev1.ClusterImageSetReference{Name: clusterImageSetName}, ProvisionRequirements: hiveext.ProvisionRequirements{ ControlPlaneAgents: 3, @@ -874,10 +876,10 @@ func getDefaultInfraEnvSpec(secretRef *corev1.LocalObjectReference, return &v1beta1.InfraEnvSpec{ ClusterRef: &v1beta1.ClusterReference{ Name: clusterDeployment.ClusterName, - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, }, PullSecretRef: secretRef, - SSHAuthorizedKey: sshPublicKey, + SSHAuthorizedKey: utils_test.SshPublicKey, } } @@ -896,7 +898,7 @@ func getDefaultAgentSpec(clusterDeployment *hivev1.ClusterDeploymentSpec, return &v1beta1.AgentSpec{ ClusterDeploymentName: &v1beta1.ClusterReference{ Name: clusterDeployment.ClusterName, - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, }, Approved: true, Role: role, @@ -938,32 +940,32 @@ func printCRs(ctx context.Context, client k8sclient.Client) { PreprovisioningImageList metal3_v1alpha1.PreprovisioningImageList ) - multiErr = multierror.Append(multiErr, client.List(ctx, &agentList, k8sclient.InNamespace(Options.Namespace))) - multiErr = multierror.Append(multiErr, GinkgoResourceLogger("Agent", agentList)) + multiErr = multierror.Append(multiErr, client.List(ctx, &agentList, k8sclient.InNamespace(utils_test.Options.Namespace))) + multiErr = multierror.Append(multiErr, utils_test.GinkgoResourceLogger("Agent", agentList)) - multiErr = multierror.Append(multiErr, client.List(ctx, &aciList, k8sclient.InNamespace(Options.Namespace))) - multiErr = multierror.Append(multiErr, GinkgoResourceLogger("AgentClusterInstall", aciList)) + multiErr = multierror.Append(multiErr, client.List(ctx, &aciList, k8sclient.InNamespace(utils_test.Options.Namespace))) + multiErr = multierror.Append(multiErr, utils_test.GinkgoResourceLogger("AgentClusterInstall", aciList)) - multiErr = multierror.Append(multiErr, client.List(ctx, &clusterDeploymentList, k8sclient.InNamespace(Options.Namespace))) - multiErr = multierror.Append(multiErr, GinkgoResourceLogger("ClusterDeployment", clusterDeploymentList)) + multiErr = multierror.Append(multiErr, client.List(ctx, &clusterDeploymentList, k8sclient.InNamespace(utils_test.Options.Namespace))) + multiErr = multierror.Append(multiErr, utils_test.GinkgoResourceLogger("ClusterDeployment", clusterDeploymentList)) - multiErr = multierror.Append(multiErr, client.List(ctx, &clusterImageSetList, k8sclient.InNamespace(Options.Namespace))) - multiErr = multierror.Append(multiErr, GinkgoResourceLogger("ClusterImageSet", clusterImageSetList)) + multiErr = multierror.Append(multiErr, client.List(ctx, &clusterImageSetList, k8sclient.InNamespace(utils_test.Options.Namespace))) + multiErr = multierror.Append(multiErr, utils_test.GinkgoResourceLogger("ClusterImageSet", clusterImageSetList)) - multiErr = multierror.Append(multiErr, client.List(ctx, &infraEnvList, k8sclient.InNamespace(Options.Namespace))) - multiErr = multierror.Append(multiErr, GinkgoResourceLogger("InfraEnv", infraEnvList)) + multiErr = multierror.Append(multiErr, client.List(ctx, &infraEnvList, k8sclient.InNamespace(utils_test.Options.Namespace))) + multiErr = multierror.Append(multiErr, utils_test.GinkgoResourceLogger("InfraEnv", infraEnvList)) - multiErr = multierror.Append(multiErr, client.List(ctx, &nmStateConfigList, k8sclient.InNamespace(Options.Namespace))) - multiErr = multierror.Append(multiErr, GinkgoResourceLogger("NMStateConfig", nmStateConfigList)) + multiErr = multierror.Append(multiErr, client.List(ctx, &nmStateConfigList, k8sclient.InNamespace(utils_test.Options.Namespace))) + multiErr = multierror.Append(multiErr, utils_test.GinkgoResourceLogger("NMStateConfig", nmStateConfigList)) - multiErr = multierror.Append(multiErr, client.List(ctx, &classificationList, k8sclient.InNamespace(Options.Namespace))) - multiErr = multierror.Append(multiErr, GinkgoResourceLogger("AgentClassification", classificationList)) + multiErr = multierror.Append(multiErr, client.List(ctx, &classificationList, k8sclient.InNamespace(utils_test.Options.Namespace))) + multiErr = multierror.Append(multiErr, utils_test.GinkgoResourceLogger("AgentClassification", classificationList)) - multiErr = multierror.Append(multiErr, client.List(ctx, &bareMetalHostList, k8sclient.InNamespace(Options.Namespace))) - multiErr = multierror.Append(multiErr, GinkgoResourceLogger("BareMetalHost", bareMetalHostList)) + multiErr = multierror.Append(multiErr, client.List(ctx, &bareMetalHostList, k8sclient.InNamespace(utils_test.Options.Namespace))) + multiErr = multierror.Append(multiErr, utils_test.GinkgoResourceLogger("BareMetalHost", bareMetalHostList)) - multiErr = multierror.Append(multiErr, client.List(ctx, &PreprovisioningImageList, k8sclient.InNamespace(Options.Namespace))) - multiErr = multierror.Append(multiErr, GinkgoResourceLogger("PreprovisioningImage", PreprovisioningImageList)) + multiErr = multierror.Append(multiErr, client.List(ctx, &PreprovisioningImageList, k8sclient.InNamespace(utils_test.Options.Namespace))) + multiErr = multierror.Append(multiErr, utils_test.GinkgoResourceLogger("PreprovisioningImage", PreprovisioningImageList)) Expect(multiErr.ErrorOrNil()).To(BeNil()) } @@ -971,33 +973,33 @@ func printCRs(ctx context.Context, client k8sclient.Client) { func cleanUpCRs(ctx context.Context, client k8sclient.Client) { Eventually(func() error { - return client.DeleteAllOf(ctx, &hivev1.ClusterDeployment{}, k8sclient.InNamespace(Options.Namespace)) // Should also delete all agents + return client.DeleteAllOf(ctx, &hivev1.ClusterDeployment{}, k8sclient.InNamespace(utils_test.Options.Namespace)) // Should also delete all agents }, "1m", "2s").Should(BeNil()) Eventually(func() error { - return client.DeleteAllOf(ctx, &hivev1.ClusterImageSet{}, k8sclient.InNamespace(Options.Namespace)) + return client.DeleteAllOf(ctx, &hivev1.ClusterImageSet{}, k8sclient.InNamespace(utils_test.Options.Namespace)) }, "1m", "2s").Should(BeNil()) Eventually(func() error { - return client.DeleteAllOf(ctx, &v1beta1.InfraEnv{}, k8sclient.InNamespace(Options.Namespace)) + return client.DeleteAllOf(ctx, &v1beta1.InfraEnv{}, k8sclient.InNamespace(utils_test.Options.Namespace)) }, "1m", "2s").Should(BeNil()) Eventually(func() error { - return client.DeleteAllOf(ctx, &v1beta1.NMStateConfig{}, k8sclient.InNamespace(Options.Namespace)) + return client.DeleteAllOf(ctx, &v1beta1.NMStateConfig{}, k8sclient.InNamespace(utils_test.Options.Namespace)) }, "1m", "2s").Should(BeNil()) Eventually(func() error { - return client.DeleteAllOf(ctx, &v1beta1.AgentClassification{}, k8sclient.InNamespace(Options.Namespace)) + return client.DeleteAllOf(ctx, &v1beta1.AgentClassification{}, k8sclient.InNamespace(utils_test.Options.Namespace)) }, "1m", "2s").Should(BeNil()) Eventually(func() error { - return client.DeleteAllOf(ctx, &v1beta1.Agent{}, k8sclient.InNamespace(Options.Namespace)) + return client.DeleteAllOf(ctx, &v1beta1.Agent{}, k8sclient.InNamespace(utils_test.Options.Namespace)) }, "1m", "2s").Should(BeNil()) Eventually(func() error { - return client.DeleteAllOf(ctx, &metal3_v1alpha1.PreprovisioningImage{}, k8sclient.InNamespace(Options.Namespace)) + return client.DeleteAllOf(ctx, &metal3_v1alpha1.PreprovisioningImage{}, k8sclient.InNamespace(utils_test.Options.Namespace)) }, "1m", "2s").Should(BeNil()) Eventually(func() error { - return client.DeleteAllOf(ctx, &metal3_v1alpha1.BareMetalHost{}, k8sclient.InNamespace(Options.Namespace)) + return client.DeleteAllOf(ctx, &metal3_v1alpha1.BareMetalHost{}, k8sclient.InNamespace(utils_test.Options.Namespace)) }, "1m", "2s").Should(BeNil()) Eventually(func() error { var err error bareMetalHostList := &metal3_v1alpha1.BareMetalHostList{} - err = client.List(ctx, bareMetalHostList, k8sclient.InNamespace(Options.Namespace)) + err = client.List(ctx, bareMetalHostList, k8sclient.InNamespace(utils_test.Options.Namespace)) Expect(err).To(BeNil()) // Remove finalizers to avoid stuck BMHs funk.ForEach(bareMetalHostList.Items, func(bmh metal3_v1alpha1.BareMetalHost) { @@ -1015,15 +1017,15 @@ func cleanUpCRs(ctx context.Context, client k8sclient.Client) { APIVersion: "v1", }, ObjectMeta: metav1.ObjectMeta{ - Namespace: Options.Namespace, - Name: pullSecretName, + Namespace: utils_test.Options.Namespace, + Name: utils_test.PullSecretName, }, Type: corev1.SecretTypeDockerConfigJson, } psKey := types.NamespacedName{ - Namespace: Options.Namespace, - Name: pullSecretName, + Namespace: utils_test.Options.Namespace, + Name: utils_test.PullSecretName, } err := kubeClient.Get(ctx, psKey, &corev1.Secret{}) @@ -1054,7 +1056,7 @@ func verifyCleanUP(ctx context.Context, client k8sclient.Client) { By("Verify ClusterDeployment Cleanup") Eventually(func() int { clusterDeploymentList := &hivev1.ClusterDeploymentList{} - err := client.List(ctx, clusterDeploymentList, k8sclient.InNamespace(Options.Namespace)) + err := client.List(ctx, clusterDeploymentList, k8sclient.InNamespace(utils_test.Options.Namespace)) Expect(err).To(BeNil()) return len(clusterDeploymentList.Items) }, "1m", "20s").Should(Equal(0)) @@ -1062,7 +1064,7 @@ func verifyCleanUP(ctx context.Context, client k8sclient.Client) { By("Verify AgentClusterInstall Cleanup") Eventually(func() int { aciList := &hiveext.AgentClusterInstallList{} - err := client.List(ctx, aciList, k8sclient.InNamespace(Options.Namespace)) + err := client.List(ctx, aciList, k8sclient.InNamespace(utils_test.Options.Namespace)) Expect(err).To(BeNil()) return len(aciList.Items) }, "1m", "20s").Should(Equal(0)) @@ -1070,7 +1072,7 @@ func verifyCleanUP(ctx context.Context, client k8sclient.Client) { By("Verify ClusterImageSet Cleanup") Eventually(func() int { clusterImageSetList := &hivev1.ClusterImageSetList{} - err := client.List(ctx, clusterImageSetList, k8sclient.InNamespace(Options.Namespace)) + err := client.List(ctx, clusterImageSetList, k8sclient.InNamespace(utils_test.Options.Namespace)) Expect(err).To(BeNil()) return len(clusterImageSetList.Items) }, "2m", "2s").Should(Equal(0)) @@ -1078,7 +1080,7 @@ func verifyCleanUP(ctx context.Context, client k8sclient.Client) { By("Verify InfraEnv Cleanup") Eventually(func() int { infraEnvList := &v1beta1.InfraEnvList{} - err := client.List(ctx, infraEnvList, k8sclient.InNamespace(Options.Namespace)) + err := client.List(ctx, infraEnvList, k8sclient.InNamespace(utils_test.Options.Namespace)) Expect(err).To(BeNil()) return len(infraEnvList.Items) }, "2m", "2s").Should(Equal(0)) @@ -1086,7 +1088,7 @@ func verifyCleanUP(ctx context.Context, client k8sclient.Client) { By("Verify NMStateConfig Cleanup") Eventually(func() int { nmStateConfigList := &v1beta1.NMStateConfigList{} - err := client.List(ctx, nmStateConfigList, k8sclient.InNamespace(Options.Namespace)) + err := client.List(ctx, nmStateConfigList, k8sclient.InNamespace(utils_test.Options.Namespace)) Expect(err).To(BeNil()) return len(nmStateConfigList.Items) }, "2m", "10s").Should(Equal(0)) @@ -1094,7 +1096,7 @@ func verifyCleanUP(ctx context.Context, client k8sclient.Client) { By("Verify Agent Cleanup") Eventually(func() int { agentList := &v1beta1.AgentList{} - err := client.List(ctx, agentList, k8sclient.InNamespace(Options.Namespace)) + err := client.List(ctx, agentList, k8sclient.InNamespace(utils_test.Options.Namespace)) Expect(err).To(BeNil()) return len(agentList.Items) }, "2m", "2s").Should(Equal(0)) @@ -1102,7 +1104,7 @@ func verifyCleanUP(ctx context.Context, client k8sclient.Client) { By("Verify AgentClassification Cleanup") Eventually(func() int { classificationList := &v1beta1.AgentClassificationList{} - err := client.List(ctx, classificationList, k8sclient.InNamespace(Options.Namespace)) + err := client.List(ctx, classificationList, k8sclient.InNamespace(utils_test.Options.Namespace)) Expect(err).To(BeNil()) return len(classificationList.Items) }, "2m", "2s").Should(Equal(0)) @@ -1110,7 +1112,7 @@ func verifyCleanUP(ctx context.Context, client k8sclient.Client) { By("Verify BareMetalHost Cleanup") Eventually(func() int { bareMetalHostList := &metal3_v1alpha1.BareMetalHostList{} - err := client.List(ctx, bareMetalHostList, k8sclient.InNamespace(Options.Namespace)) + err := client.List(ctx, bareMetalHostList, k8sclient.InNamespace(utils_test.Options.Namespace)) Expect(err).To(BeNil()) return len(bareMetalHostList.Items) }, "2m", "2s").Should(Equal(0)) @@ -1161,7 +1163,7 @@ func createBMHCRDNameFromID(id string) string { const waitForReconcileTimeout = 30 -var _ = Describe("[kube-api]cluster installation", func() { +var _ = Describe("cluster installation", func() { if !Options.EnableKubeAPI { return } @@ -1192,7 +1194,7 @@ var _ = Describe("[kube-api]cluster installation", func() { infraNsName = types.NamespacedName{ Name: "infraenv" + randomNameSuffix(), - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, } infraEnvSpec = getDefaultInfraEnvSpec(secretRef, clusterDeploymentSpec) }) @@ -1219,7 +1221,7 @@ var _ = Describe("[kube-api]cluster installation", func() { return &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: providedMirrorRegistryCMName, - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, }, Data: data, } @@ -1246,7 +1248,7 @@ location = "%s" It("Failed to create the image registry configurations", func() { aciSpec.MirrorRegistryRef = &hiveext.MirrorRegistryConfigMapReference{ Name: providedMirrorRegistryCMName, - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, } deployClusterDeploymentCRD(ctx, kubeClient, clusterDeploymentSpec) @@ -1254,7 +1256,7 @@ location = "%s" var aci *hiveext.AgentClusterInstall Eventually(func() bool { aci = getAgentClusterInstallCRD(ctx, kubeClient, types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterInstallRef.Name, }) @@ -1272,7 +1274,7 @@ location = "%s" cm := corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: providedMirrorRegistryCMName, - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, }, } Expect(kubeClient.Delete(ctx, &cm)).ShouldNot(HaveOccurred()) @@ -1282,13 +1284,13 @@ location = "%s" It("Successfully creates the image registry configurations", func() { aciSpec.MirrorRegistryRef = &hiveext.MirrorRegistryConfigMapReference{ Name: providedMirrorRegistryCMName, - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, } createUserMirrorRegistryConfigmap(kubeClient, getSecureRegistryToml(), mirrorRegistryCertificate) clusterKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterName, } @@ -1300,7 +1302,7 @@ location = "%s" dbCluster = getClusterFromDB(ctx, kubeClient, db, clusterKey, waitForReconcileTimeout) aci = getAgentClusterInstallCRD(ctx, kubeClient, types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterInstallRef.Name, }) return aci != nil && dbCluster != nil && dbCluster.MirrorRegistryConfiguration != "" @@ -1335,7 +1337,7 @@ location = "%s" By("Verify full-iso requested on creation of infra-env") infraEnvKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: infraNsName.Name, } var infraEnv *common.InfraEnv @@ -1349,7 +1351,7 @@ location = "%s" Expect(err).ToNot(HaveOccurred()) Eventually(func() bool { infraEnv = getInfraEnvFromDBByKubeKey(ctx, db, types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: infraNsName.Name, }, waitForReconcileTimeout) return string(*infraEnv.InfraEnv.Type) == "minimal-iso" @@ -1361,7 +1363,7 @@ location = "%s" err = kubeClient.Update(ctx, infraEnvCR) Expect(err).ToNot(HaveOccurred()) infraEnvKey = types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: infraNsName.Name, } Eventually(func() bool { @@ -1380,7 +1382,7 @@ location = "%s" By("Verify default image type requested on creation of infra-env") infraEnvKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: infraNsName.Name, } var infraEnv *common.InfraEnv @@ -1397,7 +1399,7 @@ location = "%s" Eventually(func() bool { infraEnv = getInfraEnvFromDBByKubeKey(ctx, db, types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: infraNsName.Name, }, waitForReconcileTimeout) return *infraEnv.InfraEnv.Type == models.ImageTypeFullIso @@ -1406,8 +1408,8 @@ location = "%s" It("Pull Secret validation error", func() { By("setting pull secret with wrong data") - updateSecret(ctx, kubeClient, pullSecretName, map[string]string{ - corev1.DockerConfigJsonKey: WrongPullSecret}) + updateSecret(ctx, kubeClient, utils_test.PullSecretName, map[string]string{ + corev1.DockerConfigJsonKey: utils_test.WrongPullSecret}) By("Create cluster") deployClusterDeploymentCRD(ctx, kubeClient, clusterDeploymentSpec) @@ -1416,7 +1418,7 @@ location = "%s" By("verify conditions") installkey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterInstallRef.Name, } checkAgentClusterInstallCondition(ctx, installkey, @@ -1434,28 +1436,28 @@ location = "%s" deployInfraEnvCRD(ctx, kubeClient, infraNsName.Name, infraEnvSpec) deployAgentClusterInstallCRD(ctx, kubeClient, aciV6Spec, clusterDeploymentSpec.ClusterInstallRef.Name) infraEnvKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: infraNsName.Name, } infraEnv := getInfraEnvFromDBByKubeKey(ctx, db, infraEnvKey, waitForReconcileTimeout) By("register hosts") configureLocalAgentClient(infraEnv.ID.String()) hosts := make([]*models.Host, 0) - ips := hostutil.GenerateIPv6Addresses(3, defaultCIDRv6) + ips := hostutil.GenerateIPv6Addresses(3, utils_test.DefaultCIDRv6) for i := 0; i < 3; i++ { hostname := fmt.Sprintf("h%d", i) host := registerIPv6MasterNode(ctx, *infraEnv.ID, hostname, ips[i]) hosts = append(hosts, host) } - generateFullMeshConnectivity(ctx, ips[0], hosts...) - generateVerifyVipsPostStepReply(ctx, hosts[0], []string{aciV6Spec.APIVIP}, []string{aciV6Spec.IngressVIP}, models.VipVerificationSucceeded) + utils_test.TestContext.GenerateFullMeshConnectivity(ctx, ips[0], hosts...) + utils_test.TestContext.GenerateVerifyVipsPostStepReply(ctx, hosts[0], []string{aciV6Spec.APIVIP}, []string{aciV6Spec.IngressVIP}, models.VipVerificationSucceeded) for _, h := range hosts { - generateDomainResolution(ctx, h, clusterDeploymentSpec.ClusterName, "hive.example.com") + utils_test.TestContext.GenerateDomainResolution(ctx, h, clusterDeploymentSpec.ClusterName, "hive.example.com") } By("verify validations are successfull") installkey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterInstallRef.Name, } checkAgentClusterInstallCondition(ctx, installkey, hiveext.ClusterValidatedCondition, hiveext.ClusterValidationsPassingReason) @@ -1486,7 +1488,7 @@ location = "%s" By("Spec Sync should fail with SDN Configuration") installkey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterInstallRef.Name, } checkAgentClusterInstallCondition(ctx, installkey, hiveext.ClusterSpecSyncedCondition, hiveext.ClusterInputErrorReason) @@ -1513,40 +1515,40 @@ location = "%s" deployInfraEnvCRD(ctx, kubeClient, infraNsName.Name, infraEnvSpec) deployAgentClusterInstallCRD(ctx, kubeClient, aciSpec, clusterDeploymentSpec.ClusterInstallRef.Name) clusterKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterName, } infraEnvKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: infraNsName.Name, } infraEnv := getInfraEnvFromDBByKubeKey(ctx, db, infraEnvKey, waitForReconcileTimeout) configureLocalAgentClient(infraEnv.ID.String()) hosts := make([]*models.Host, 0) - ips := hostutil.GenerateIPv4Addresses(3, defaultCIDRv4) + ips := hostutil.GenerateIPv4Addresses(3, utils_test.DefaultCIDRv4) for i := 0; i < 3; i++ { hostname := fmt.Sprintf("h%d", i) - host := registerNode(ctx, *infraEnv.ID, hostname, ips[i]) + host := utils_test.TestContext.RegisterNode(ctx, *infraEnv.ID, hostname, ips[i]) hosts = append(hosts, host) } for _, host := range hosts { checkAgentCondition(ctx, host.ID.String(), v1beta1.ValidatedCondition, v1beta1.ValidationsFailingReason) hostkey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: host.ID.String(), } agent := getAgentCRD(ctx, kubeClient, hostkey) Expect(agent.Status.ValidationsInfo).ToNot(BeNil()) } - generateFullMeshConnectivity(ctx, ips[0], hosts...) + utils_test.TestContext.GenerateFullMeshConnectivity(ctx, ips[0], hosts...) for _, h := range hosts { - generateDomainResolution(ctx, h, clusterDeploymentSpec.ClusterName, "hive.example.com") + utils_test.TestContext.GenerateDomainResolution(ctx, h, clusterDeploymentSpec.ClusterName, "hive.example.com") } By("Approve Agents") for _, host := range hosts { hostkey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: host.ID.String(), } Eventually(func() error { @@ -1556,7 +1558,7 @@ location = "%s" }, "30s", "10s").Should(BeNil()) } installkey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterInstallRef.Name, } By("verify default platform type status") @@ -1591,40 +1593,40 @@ location = "%s" deployAgentClusterInstallCRD(ctx, kubeClient, aciSpec, clusterDeploymentSpec.ClusterInstallRef.Name) clusterKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterName, } infraEnvKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: infraNsName.Name, } infraEnv := getInfraEnvFromDBByKubeKey(ctx, db, infraEnvKey, waitForReconcileTimeout) configureLocalAgentClient(infraEnv.ID.String()) hosts := make([]*models.Host, 0) - ips := hostutil.GenerateIPv4Addresses(3, defaultCIDRv4) + ips := hostutil.GenerateIPv4Addresses(3, utils_test.DefaultCIDRv4) for i := 0; i < 3; i++ { hostname := fmt.Sprintf("h%d", i) - host := registerNodeWithInventory(ctx, *infraEnv.ID, hostname, ips[i], getDefaultNutanixInventory(ips[i])) + host := utils_test.TestContext.RegisterNodeWithInventory(ctx, *infraEnv.ID, hostname, ips[i], utils_test.TestContext.GetDefaultNutanixInventory(ips[i])) hosts = append(hosts, host) } for _, host := range hosts { checkAgentCondition(ctx, host.ID.String(), v1beta1.ValidatedCondition, v1beta1.ValidationsFailingReason) hostkey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: host.ID.String(), } agent := getAgentCRD(ctx, kubeClient, hostkey) Expect(agent.Status.ValidationsInfo).ToNot(BeNil()) } - generateFullMeshConnectivity(ctx, ips[0], hosts...) + utils_test.TestContext.GenerateFullMeshConnectivity(ctx, ips[0], hosts...) for _, h := range hosts { - generateDomainResolution(ctx, h, clusterDeploymentSpec.ClusterName, "hive.example.com") + utils_test.TestContext.GenerateDomainResolution(ctx, h, clusterDeploymentSpec.ClusterName, "hive.example.com") } By("Approve Agents") for _, host := range hosts { hostkey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: host.ID.String(), } Eventually(func() error { @@ -1634,7 +1636,7 @@ location = "%s" }, "30s", "10s").Should(BeNil()) } installkey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterInstallRef.Name, } By("verify nutanix platform type status and spec") @@ -1713,41 +1715,41 @@ location = "%s" deployAgentClusterInstallCRD(ctx, kubeClient, aciSpecExternalPlatform, clusterDeploymentSpec.ClusterInstallRef.Name) clusterKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterName, } infraEnvKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: infraNsName.Name, } infraEnv := getInfraEnvFromDBByKubeKey(ctx, db, infraEnvKey, waitForReconcileTimeout) configureLocalAgentClient(infraEnv.ID.String()) hosts := make([]*models.Host, 0) - ips := hostutil.GenerateIPv4Addresses(3, defaultCIDRv4) + ips := hostutil.GenerateIPv4Addresses(3, utils_test.DefaultCIDRv4) for i := 0; i < 3; i++ { hostname := fmt.Sprintf("h%d", i) - host := registerNodeWithInventory(ctx, *infraEnv.ID, hostname, ips[i], getDefaultExternalInventory(ips[i])) + host := utils_test.TestContext.RegisterNodeWithInventory(ctx, *infraEnv.ID, hostname, ips[i], utils_test.TestContext.GetDefaultExternalInventory(ips[i])) hosts = append(hosts, host) } for _, host := range hosts { checkAgentCondition(ctx, host.ID.String(), v1beta1.ValidatedCondition, v1beta1.ValidationsFailingReason) hostkey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: host.ID.String(), } agent := getAgentCRD(ctx, kubeClient, hostkey) Expect(agent.Status.ValidationsInfo).ToNot(BeNil()) } - generateFullMeshConnectivity(ctx, ips[0], hosts...) + utils_test.TestContext.GenerateFullMeshConnectivity(ctx, ips[0], hosts...) for _, h := range hosts { - generateDomainResolution(ctx, h, clusterDeploymentSpec.ClusterName, "hive.example.com") - generateCommonDomainReply(ctx, h, clusterDeploymentSpec.ClusterName, clusterDeploymentSpec.BaseDomain) + utils_test.TestContext.GenerateDomainResolution(ctx, h, clusterDeploymentSpec.ClusterName, "hive.example.com") + utils_test.TestContext.GenerateCommonDomainReply(ctx, h, clusterDeploymentSpec.ClusterName, clusterDeploymentSpec.BaseDomain) } By("Approve Agents") for _, host := range hosts { hostkey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: host.ID.String(), } Eventually(func() error { @@ -1757,7 +1759,7 @@ location = "%s" }, "30s", "10s").Should(BeNil()) } installkey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterInstallRef.Name, } By("verify external platform type status and spec") @@ -1788,40 +1790,40 @@ location = "%s" deployAgentClusterInstallCRD(ctx, kubeClient, aciSpec, clusterDeploymentSpec.ClusterInstallRef.Name) clusterKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterName, } infraEnvKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: infraNsName.Name, } infraEnv := getInfraEnvFromDBByKubeKey(ctx, db, infraEnvKey, waitForReconcileTimeout) configureLocalAgentClient(infraEnv.ID.String()) hosts := make([]*models.Host, 0) - ips := hostutil.GenerateIPv4Addresses(3, defaultCIDRv4) + ips := hostutil.GenerateIPv4Addresses(3, utils_test.DefaultCIDRv4) for i := 0; i < 3; i++ { hostname := fmt.Sprintf("h%d", i) - host := registerNodeWithInventory(ctx, *infraEnv.ID, hostname, ips[i], getDefaultVmwareInventory(ips[i])) + host := utils_test.TestContext.RegisterNodeWithInventory(ctx, *infraEnv.ID, hostname, ips[i], utils_test.TestContext.GetDefaultVmwareInventory(ips[i])) hosts = append(hosts, host) } for _, host := range hosts { checkAgentCondition(ctx, host.ID.String(), v1beta1.ValidatedCondition, v1beta1.ValidationsFailingReason) hostkey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: host.ID.String(), } agent := getAgentCRD(ctx, kubeClient, hostkey) Expect(agent.Status.ValidationsInfo).ToNot(BeNil()) } - generateFullMeshConnectivity(ctx, ips[0], hosts...) + utils_test.TestContext.GenerateFullMeshConnectivity(ctx, ips[0], hosts...) for _, h := range hosts { - generateDomainResolution(ctx, h, clusterDeploymentSpec.ClusterName, "hive.example.com") + utils_test.TestContext.GenerateDomainResolution(ctx, h, clusterDeploymentSpec.ClusterName, "hive.example.com") } By("Approve Agents") for _, host := range hosts { hostkey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: host.ID.String(), } Eventually(func() error { @@ -1831,7 +1833,7 @@ location = "%s" }, "30s", "10s").Should(BeNil()) } installkey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterInstallRef.Name, } By("verify vsphere platform type status and spec") @@ -1859,41 +1861,41 @@ location = "%s" deployInfraEnvCRD(ctx, kubeClient, infraNsName.Name, infraEnvSpec) deployAgentClusterInstallCRD(ctx, kubeClient, aciSpecNonePlatform, clusterDeploymentSpec.ClusterInstallRef.Name) clusterKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterName, } infraEnvKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: infraNsName.Name, } infraEnv := getInfraEnvFromDBByKubeKey(ctx, db, infraEnvKey, waitForReconcileTimeout) configureLocalAgentClient(infraEnv.ID.String()) hosts := make([]*models.Host, 0) - ips := hostutil.GenerateIPv4Addresses(3, defaultCIDRv4) + ips := hostutil.GenerateIPv4Addresses(3, utils_test.DefaultCIDRv4) for i := 0; i < 3; i++ { hostname := fmt.Sprintf("h%d", i) - host := registerNode(ctx, *infraEnv.ID, hostname, ips[i]) + host := utils_test.TestContext.RegisterNode(ctx, *infraEnv.ID, hostname, ips[i]) hosts = append(hosts, host) } for _, host := range hosts { checkAgentCondition(ctx, host.ID.String(), v1beta1.ValidatedCondition, v1beta1.ValidationsFailingReason) hostkey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: host.ID.String(), } agent := getAgentCRD(ctx, kubeClient, hostkey) Expect(agent.Status.ValidationsInfo).ToNot(BeNil()) } - generateFullMeshConnectivity(ctx, ips[0], hosts...) + utils_test.TestContext.GenerateFullMeshConnectivity(ctx, ips[0], hosts...) for _, h := range hosts { - generateDomainResolution(ctx, h, clusterDeploymentSpec.ClusterName, "hive.example.com") - generateCommonDomainReply(ctx, h, clusterDeploymentSpec.ClusterName, clusterDeploymentSpec.BaseDomain) + utils_test.TestContext.GenerateDomainResolution(ctx, h, clusterDeploymentSpec.ClusterName, "hive.example.com") + utils_test.TestContext.GenerateCommonDomainReply(ctx, h, clusterDeploymentSpec.ClusterName, clusterDeploymentSpec.BaseDomain) } By("Approve Agents") for _, host := range hosts { hostkey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: host.ID.String(), } Eventually(func() error { @@ -1903,7 +1905,7 @@ location = "%s" }, "30s", "10s").Should(BeNil()) } installkey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterInstallRef.Name, } By("Verify ClusterDeployment ReadyForInstallation") @@ -1936,13 +1938,13 @@ location = "%s" aciSNOSpec.IgnitionEndpoint = &hiveext.IgnitionEndpoint{ Url: "https://example.com", CaCertificateReference: &hiveext.CaCertificateReference{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: caCertificateSecretName, }, } deployAgentClusterInstallCRD(ctx, kubeClient, aciSNOSpec, clusterDeploymentSpec.ClusterInstallRef.Name) clusterKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterName, } b64Ca := b64.StdEncoding.EncodeToString([]byte(caCertificate)) @@ -1951,7 +1953,7 @@ location = "%s" return dbCluster != nil && *dbCluster.IgnitionEndpoint.CaCertificate == b64Ca }, "1m", "10s").Should(BeTrue()) infraEnvKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: infraNsName.Name, } infraEnv := getInfraEnvFromDBByKubeKey(ctx, db, infraEnvKey, waitForReconcileTimeout) @@ -1962,16 +1964,16 @@ location = "%s" defer func() { deleteSecret(ctx, kubeClient, ignitionTokenSecretName) }() - host := registerNode(ctx, *infraEnv.ID, "hostname1", defaultCIDRv4) + host := utils_test.TestContext.RegisterNode(ctx, *infraEnv.ID, "hostname1", utils_test.DefaultCIDRv4) hostkey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: host.ID.String(), } Eventually(func() error { agent := getAgentCRD(ctx, kubeClient, hostkey) agent.Spec.IgnitionEndpointTokenReference = &v1beta1.IgnitionEndpointTokenReference{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: ignitionTokenSecretName, } return kubeClient.Update(ctx, agent) @@ -1998,15 +2000,15 @@ location = "%s" deployAgentClusterInstallCRD(ctx, kubeClient, aciSNOSpec, clusterDeploymentSpec.ClusterInstallRef.Name) infraEnvKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: infraNsName.Name, } infraEnv := getInfraEnvFromDBByKubeKey(ctx, db, infraEnvKey, waitForReconcileTimeout) configureLocalAgentClient(infraEnv.ID.String()) - host := registerNode(ctx, *infraEnv.ID, "hostname1", defaultCIDRv4) + host := utils_test.TestContext.RegisterNode(ctx, *infraEnv.ID, "hostname1", utils_test.DefaultCIDRv4) hostkey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: host.ID.String(), } @@ -2065,7 +2067,7 @@ location = "%s" deployInfraEnvCRD(ctx, kubeClient, infraNsName.Name, infraEnvSpec) infraEnvKubeName := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: infraNsName.Name, } @@ -2101,7 +2103,7 @@ location = "%s" deployInfraEnvCRD(ctx, kubeClient, infraNsName.Name, infraEnvSpec) infraEnvKubeName := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: infraNsName.Name, } @@ -2141,7 +2143,7 @@ location = "%s" deployInfraEnvCRD(ctx, kubeClient, infraNsName.Name, infraEnvSpec) infraEnvKubeName := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: infraNsName.Name, } @@ -2156,22 +2158,22 @@ location = "%s" By("Register Agent to InfraEnv") infraEnvKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: infraNsName.Name, } infraEnv := getInfraEnvFromDBByKubeKey(ctx, db, infraEnvKey, waitForReconcileTimeout) configureLocalAgentClient(infraEnv.ID.String()) - host := ®isterHost(*infraEnv.ID).Host - hwInfo := validHwInfo - hwInfo.Interfaces[0].IPV4Addresses = []string{defaultCIDRv4} - generateHWPostStepReply(ctx, host, hwInfo, "hostname1") + host := &utils_test.TestContext.RegisterHost(*infraEnv.ID).Host + hwInfo := utils_test.ValidHwInfo + hwInfo.Interfaces[0].IPV4Addresses = []string{utils_test.DefaultCIDRv4} + utils_test.TestContext.GenerateHWPostStepReply(ctx, host, hwInfo, "hostname1") By("Verify agent and host are not bound") h, err := common.GetHostFromDB(db, infraEnv.ID.String(), host.ID.String()) Expect(err).To(BeNil()) Expect(h.ClusterID).To(BeNil()) key := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: host.ID.String(), } Eventually(func() bool { @@ -2188,7 +2190,7 @@ location = "%s" deployInfraEnvCRD(ctx, kubeClient, infraNsName.Name, infraEnvSpec) infraEnvKubeName := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: infraNsName.Name, } @@ -2199,19 +2201,19 @@ location = "%s" By("Register Agent to InfraEnv") infraEnvKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: infraNsName.Name, } infraEnv := getInfraEnvFromDBByKubeKey(ctx, db, infraEnvKey, waitForReconcileTimeout) configureLocalAgentClient(infraEnv.ID.String()) - host := ®isterHost(*infraEnv.ID).Host - hwInfo := validHwInfo - hwInfo.Interfaces[0].IPV4Addresses = []string{defaultCIDRv4} - generateHWPostStepReply(ctx, host, hwInfo, "hostname1") + host := &utils_test.TestContext.RegisterHost(*infraEnv.ID).Host + hwInfo := utils_test.ValidHwInfo + hwInfo.Interfaces[0].IPV4Addresses = []string{utils_test.DefaultCIDRv4} + utils_test.TestContext.GenerateHWPostStepReply(ctx, host, hwInfo, "hostname1") By("Verify agent inventory labels") key := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: host.ID.String(), } var agentLabels map[string]string @@ -2224,13 +2226,13 @@ location = "%s" Expect(agentLabels["inventory.agent-install.openshift.io/storage-hasnonrotationaldisk"]).To(Equal("true")) Expect(agentLabels["inventory.agent-install.openshift.io/cpu-architecture"]).To(Equal("x86_64")) Expect(agentLabels["inventory.agent-install.openshift.io/cpu-virtenabled"]).To(Equal("false")) - Expect(agentLabels["inventory.agent-install.openshift.io/host-manufacturer"]).To(Equal(validHwInfo.SystemVendor.Manufacturer)) - Expect(agentLabels["inventory.agent-install.openshift.io/host-productname"]).To(Equal(validHwInfo.SystemVendor.ProductName)) - Expect(agentLabels["inventory.agent-install.openshift.io/host-isvirtual"]).To(Equal(strconv.FormatBool(validHwInfo.SystemVendor.Virtual))) + Expect(agentLabels["inventory.agent-install.openshift.io/host-manufacturer"]).To(Equal(utils_test.ValidHwInfo.SystemVendor.Manufacturer)) + Expect(agentLabels["inventory.agent-install.openshift.io/host-productname"]).To(Equal(utils_test.ValidHwInfo.SystemVendor.ProductName)) + Expect(agentLabels["inventory.agent-install.openshift.io/host-isvirtual"]).To(Equal(strconv.FormatBool(utils_test.ValidHwInfo.SystemVendor.Virtual))) By("Verify agent classification labels") classificationXXL := v1beta1.AgentClassification{ - ObjectMeta: metav1.ObjectMeta{Name: "xxl", Namespace: Options.Namespace}, + ObjectMeta: metav1.ObjectMeta{Name: "xxl", Namespace: utils_test.Options.Namespace}, Spec: v1beta1.AgentClassificationSpec{ LabelKey: "size", LabelValue: "xxl", @@ -2255,7 +2257,7 @@ location = "%s" deployInfraEnvCRD(ctx, kubeClient, infraNsName.Name, infraEnvSpec) infraEnvKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: infraNsName.Name, } Eventually(func() string { @@ -2281,7 +2283,7 @@ location = "%s" deployInfraEnvCRD(ctx, kubeClient, infraNsName.Name, infraEnvSpec) infraEnvKubeName := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: infraNsName.Name, } checkInfraEnvCondition(ctx, infraEnvKubeName, v1beta1.ImageCreatedCondition, @@ -2299,7 +2301,7 @@ location = "%s" deployAgentClusterInstallCRD(ctx, kubeClient, aciSpec, clusterDeploymentSpec.ClusterInstallRef.Name) clusterKubeName := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterName, } cluster := getClusterFromDB(ctx, kubeClient, db, clusterKubeName, waitForReconcileTimeout) @@ -2310,7 +2312,7 @@ location = "%s" infraEnvSpec.CpuArchitecture = "arm64" infraEnvArm := types.NamespacedName{ Name: "infraenv" + randomNameSuffix(), - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, } deployInfraEnvCRD(ctx, kubeClient, infraEnvArm.Name, infraEnvSpec) @@ -2325,7 +2327,7 @@ location = "%s" infraEnvSpec.CpuArchitecture = "x86_64" infraEnvX86 := types.NamespacedName{ Name: "infraenv" + randomNameSuffix(), - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, } deployInfraEnvCRD(ctx, kubeClient, infraEnvX86.Name, infraEnvSpec) @@ -2340,7 +2342,7 @@ location = "%s" infraEnvSpec.CpuArchitecture = "fake" infraEnvFake := types.NamespacedName{ Name: "infraenv" + randomNameSuffix(), - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, } deployInfraEnvCRD(ctx, kubeClient, infraEnvFake.Name, infraEnvSpec) @@ -2353,34 +2355,34 @@ location = "%s" deployInfraEnvCRD(ctx, kubeClient, infraNsName.Name, infraEnvSpec) deployAgentClusterInstallCRD(ctx, kubeClient, aciSpec, clusterDeploymentSpec.ClusterInstallRef.Name) clusterKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterName, } infraEnvKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: infraNsName.Name, } infraEnv := getInfraEnvFromDBByKubeKey(ctx, db, infraEnvKey, waitForReconcileTimeout) configureLocalAgentClient(infraEnv.ID.String()) hosts := make([]*models.Host, 0) - ips := hostutil.GenerateIPv4Addresses(3, defaultCIDRv4) + ips := hostutil.GenerateIPv4Addresses(3, utils_test.DefaultCIDRv4) for i := 0; i < 3; i++ { hostname := fmt.Sprintf("h%d", i) - host := registerNode(ctx, *infraEnv.ID, hostname, ips[i]) + host := utils_test.TestContext.RegisterNode(ctx, *infraEnv.ID, hostname, ips[i]) hosts = append(hosts, host) } for _, host := range hosts { checkAgentCondition(ctx, host.ID.String(), v1beta1.ValidatedCondition, v1beta1.ValidationsFailingReason) } - generateFullMeshConnectivity(ctx, ips[0], hosts...) + utils_test.TestContext.GenerateFullMeshConnectivity(ctx, ips[0], hosts...) for _, h := range hosts { - generateDomainResolution(ctx, h, clusterDeploymentSpec.ClusterName, "hive.example.com") + utils_test.TestContext.GenerateDomainResolution(ctx, h, clusterDeploymentSpec.ClusterName, "hive.example.com") } By("Approve Agents") for _, host := range hosts { hostkey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: host.ID.String(), } Eventually(func() error { @@ -2393,7 +2395,7 @@ location = "%s" }, "30s", "10s").Should(BeNil()) } installkey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterInstallRef.Name, } By("Verify ClusterDeployment ReadyForInstallation") @@ -2412,21 +2414,21 @@ location = "%s" deployInfraEnvCRD(ctx, kubeClient, infraNsName.Name, infraEnvSpec) deployAgentClusterInstallCRD(ctx, kubeClient, aciSpec, clusterDeploymentSpec.ClusterInstallRef.Name) infraEnvKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: infraNsName.Name, } infraEnv := getInfraEnvFromDBByKubeKey(ctx, db, infraEnvKey, waitForReconcileTimeout) configureLocalAgentClient(infraEnv.ID.String()) - host := registerNode(ctx, *infraEnv.ID, "hostname1", defaultCIDRv4) + host := utils_test.TestContext.RegisterNode(ctx, *infraEnv.ID, "hostname1", utils_test.DefaultCIDRv4) key := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: host.ID.String(), } Eventually(func() error { agent := getAgentCRD(ctx, kubeClient, key) agent.Spec.Hostname = "newhostname" agent.Spec.Approved = true - agent.Spec.InstallationDiskID = sdb.ID + agent.Spec.InstallationDiskID = utils_test.Sdb.ID return kubeClient.Update(ctx, agent) }, "30s", "10s").Should(BeNil()) @@ -2439,19 +2441,19 @@ location = "%s" h, err := common.GetHostFromDB(db, infraEnv.ID.String(), host.ID.String()) Expect(err).To(BeNil()) return h.InstallationDiskID - }, "2m", "10s").Should(Equal(sdb.ID)) + }, "2m", "10s").Should(Equal(utils_test.Sdb.ID)) Eventually(func() string { return getAgentCRD(ctx, kubeClient, key).Status.InstallationDiskID - }, "2m", "10s").Should(Equal(sdb.ID)) + }, "2m", "10s").Should(Equal(utils_test.Sdb.ID)) Eventually(func() string { return getAgentCRD(ctx, kubeClient, key).Spec.InstallationDiskID - }, "2m", "10s").Should(Equal(sdb.ID)) + }, "2m", "10s").Should(Equal(utils_test.Sdb.ID)) Eventually(func() bool { return conditionsv1.IsStatusConditionTrue(getAgentCRD(ctx, kubeClient, key).Status.Conditions, v1beta1.SpecSyncedCondition) }, "2m", "10s").Should(Equal(true)) Eventually(func() string { return getAgentCRD(ctx, kubeClient, key).Status.Inventory.SystemVendor.Manufacturer - }, "2m", "10s").Should(Equal(validHwInfo.SystemVendor.Manufacturer)) + }, "2m", "10s").Should(Equal(utils_test.ValidHwInfo.SystemVendor.Manufacturer)) }) It("deploy clusterDeployment with agent,bmh and ignition config override", func() { @@ -2459,18 +2461,18 @@ location = "%s" deployInfraEnvCRD(ctx, kubeClient, infraNsName.Name, infraEnvSpec) deployAgentClusterInstallCRD(ctx, kubeClient, aciSpec, clusterDeploymentSpec.ClusterInstallRef.Name) infraEnvKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: infraNsName.Name, } infraEnv := getInfraEnvFromDBByKubeKey(ctx, db, infraEnvKey, waitForReconcileTimeout) configureLocalAgentClient(infraEnv.ID.String()) - host := registerNode(ctx, *infraEnv.ID, "hostname1", defaultCIDRv4) + host := utils_test.TestContext.RegisterNode(ctx, *infraEnv.ID, "hostname1", utils_test.DefaultCIDRv4) key := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: host.ID.String(), } bmhKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: createBMHCRDNameFromID(host.ID.String()), } @@ -2530,18 +2532,18 @@ location = "%s" deployInfraEnvCRD(ctx, kubeClient, infraNsName.Name, infraEnvSpec) deployAgentClusterInstallCRD(ctx, kubeClient, aciSpec, clusterDeploymentSpec.ClusterInstallRef.Name) infraEnvKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: infraNsName.Name, } infraEnv := getInfraEnvFromDBByKubeKey(ctx, db, infraEnvKey, waitForReconcileTimeout) configureLocalAgentClient(infraEnv.ID.String()) - host := registerNode(ctx, *infraEnv.ID, "hostname1", defaultCIDRv4) + host := utils_test.TestContext.RegisterNode(ctx, *infraEnv.ID, "hostname1", utils_test.DefaultCIDRv4) key := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: host.ID.String(), } bmhKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: createBMHCRDNameFromID(host.ID.String()), } @@ -2592,40 +2594,40 @@ location = "%s" deployInfraEnvCRD(ctx, kubeClient, infraNsName.Name, infraEnvSpec) deployAgentClusterInstallCRD(ctx, kubeClient, aciSpec, clusterDeploymentSpec.ClusterInstallRef.Name) installkey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterInstallRef.Name, } infraEnvKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: infraNsName.Name, } infraEnv := getInfraEnvFromDBByKubeKey(ctx, db, infraEnvKey, waitForReconcileTimeout) configureLocalAgentClient(infraEnv.ID.String()) hosts := make([]*models.Host, 0) - ips := hostutil.GenerateIPv4Addresses(3, defaultCIDRv4) + ips := hostutil.GenerateIPv4Addresses(3, utils_test.DefaultCIDRv4) for i := 0; i < 3; i++ { hostname := fmt.Sprintf("h%d", i) - host := registerNode(ctx, *infraEnv.ID, hostname, ips[i]) + host := utils_test.TestContext.RegisterNode(ctx, *infraEnv.ID, hostname, ips[i]) hosts = append(hosts, host) } for _, host := range hosts { checkAgentCondition(ctx, host.ID.String(), v1beta1.ValidatedCondition, v1beta1.ValidationsFailingReason) hostkey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: host.ID.String(), } agent := getAgentCRD(ctx, kubeClient, hostkey) Expect(agent.Status.ValidationsInfo).ToNot(BeNil()) } - generateFullMeshConnectivity(ctx, ips[0], hosts...) + utils_test.TestContext.GenerateFullMeshConnectivity(ctx, ips[0], hosts...) for _, h := range hosts { - generateDomainResolution(ctx, h, clusterDeploymentSpec.ClusterName, "hive.example.com") + utils_test.TestContext.GenerateDomainResolution(ctx, h, clusterDeploymentSpec.ClusterName, "hive.example.com") } By("Invalid ignition config - invalid json") for _, host := range hosts { hostkey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: host.ID.String(), } @@ -2655,7 +2657,7 @@ location = "%s" By("Approve Agents") for _, host := range hosts { hostkey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: host.ID.String(), } Eventually(func() error { @@ -2674,14 +2676,14 @@ location = "%s" deployInfraEnvCRD(ctx, kubeClient, infraNsName.Name, infraEnvSpec) deployAgentClusterInstallCRD(ctx, kubeClient, aciSpec, clusterDeploymentSpec.ClusterInstallRef.Name) infraEnvKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: infraNsName.Name, } infraEnv := getInfraEnvFromDBByKubeKey(ctx, db, infraEnvKey, waitForReconcileTimeout) configureLocalAgentClient(infraEnv.ID.String()) - host := registerNode(ctx, *infraEnv.ID, "hostname1", defaultCIDRv4) + host := utils_test.TestContext.RegisterNode(ctx, *infraEnv.ID, "hostname1", utils_test.DefaultCIDRv4) key := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: host.ID.String(), } @@ -2737,14 +2739,14 @@ location = "%s" deployInfraEnvCRD(ctx, kubeClient, infraNsName.Name, infraEnvSpec) deployAgentClusterInstallCRD(ctx, kubeClient, aciSpec, clusterDeploymentSpec.ClusterInstallRef.Name) infraEnvKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: infraNsName.Name, } infraEnv := getInfraEnvFromDBByKubeKey(ctx, db, infraEnvKey, waitForReconcileTimeout) configureLocalAgentClient(infraEnv.ID.String()) - host := registerNode(ctx, *infraEnv.ID, "hostname1", defaultCIDRv4) + host := utils_test.TestContext.RegisterNode(ctx, *infraEnv.ID, "hostname1", utils_test.DefaultCIDRv4) key := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: host.ID.String(), } @@ -2796,18 +2798,18 @@ location = "%s" deployInfraEnvCRD(ctx, kubeClient, infraNsName.Name, infraEnvSpec) deployAgentClusterInstallCRD(ctx, kubeClient, aciSpec, clusterDeploymentSpec.ClusterInstallRef.Name) infraEnvKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: infraNsName.Name, } infraEnv := getInfraEnvFromDBByKubeKey(ctx, db, infraEnvKey, waitForReconcileTimeout) configureLocalAgentClient(infraEnv.ID.String()) - host := registerNode(ctx, *infraEnv.ID, "hostname1", defaultCIDRv4) + host := utils_test.TestContext.RegisterNode(ctx, *infraEnv.ID, "hostname1", utils_test.DefaultCIDRv4) key := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: host.ID.String(), } bmhKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: createBMHCRDNameFromID(host.ID.String()), } @@ -2877,12 +2879,12 @@ location = "%s" It("Should populate AdditionalTrustBundle on creation and update of infraenv", func() { infraEnvKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: infraNsName.Name, } deployInfraEnvCRD(ctx, kubeClient, infraNsName.Name, &v1beta1.InfraEnvSpec{ PullSecretRef: secretRef, - SSHAuthorizedKey: sshPublicKey, + SSHAuthorizedKey: utils_test.SshPublicKey, AdditionalTrustBundle: additionalTrustCertificate, }) Eventually(func() string { @@ -2903,11 +2905,11 @@ location = "%s" deployClusterDeploymentCRD(ctx, kubeClient, clusterDeploymentSpec) deployAgentClusterInstallCRD(ctx, kubeClient, aciSpec, clusterDeploymentSpec.ClusterInstallRef.Name) clusterKubeName := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterName, } installkey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterInstallRef.Name, } checkAgentClusterInstallCondition(ctx, installkey, hiveext.ClusterRequirementsMetCondition, hiveext.ClusterNotReadyReason) @@ -2927,13 +2929,13 @@ location = "%s" infraEnvSpec.AdditionalNTPSources = []string{"192.168.1.4"} deployInfraEnvCRD(ctx, kubeClient, infraNsName.Name, infraEnvSpec) infraEnvKubeName := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: infraNsName.Name, } // InfraEnv Reconcile takes longer, since it needs to generate the image. checkInfraEnvCondition(ctx, infraEnvKubeName, v1beta1.ImageCreatedCondition, v1beta1.ImageStateCreated) infraEnvKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: infraNsName.Name, } infraEnv := getInfraEnvFromDBByKubeKey(ctx, db, infraEnvKey, waitForReconcileTimeout) @@ -2958,7 +2960,7 @@ location = "%s" deployAgentClusterInstallCRD(ctx, kubeClient, aciSNOSpec, clusterDeploymentSpec.ClusterInstallRef.Name) clusterKubeName := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterName, } cluster := getClusterFromDB(ctx, kubeClient, db, clusterKubeName, waitForReconcileTimeout) @@ -2969,15 +2971,15 @@ location = "%s" deployClusterDeploymentCRD(ctx, kubeClient, clusterDeploymentSpec) deployAgentClusterInstallCRD(ctx, kubeClient, aciSpec, clusterDeploymentSpec.ClusterInstallRef.Name) clusterKubeName := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterName, } installkey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterInstallRef.Name, } infraEnvKubeName := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: infraNsName.Name, } checkAgentClusterInstallCondition(ctx, installkey, hiveext.ClusterRequirementsMetCondition, hiveext.ClusterNotReadyReason) @@ -3009,7 +3011,7 @@ location = "%s" }, ObjectMeta: metav1.ObjectMeta{ Namespace: "default", - Name: pullSecretName, + Name: utils_test.PullSecretName, }, StringData: data, Type: corev1.SecretTypeDockerConfigJson, @@ -3051,25 +3053,25 @@ location = "%s" Expect(err).To(BeNil()) clusterKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterName, } cluster := getClusterFromDB(ctx, kubeClient, db, clusterKey, waitForReconcileTimeout) configureLocalAgentClient(infraEnv.ID.String()) - host := registerNode(ctx, *infraEnv.ID, "hostname1", defaultCIDRv4) - ips := hostutil.GenerateIPv4Addresses(1, defaultCIDRv4) - generateFullMeshConnectivity(ctx, ips[0], host) + host := utils_test.TestContext.RegisterNode(ctx, *infraEnv.ID, "hostname1", utils_test.DefaultCIDRv4) + ips := hostutil.GenerateIPv4Addresses(1, utils_test.DefaultCIDRv4) + utils_test.TestContext.GenerateFullMeshConnectivity(ctx, ips[0], host) key := types.NamespacedName{ Namespace: "default", Name: host.ID.String(), } installkey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterInstallRef.Name, } - generateDomainResolution(ctx, host, clusterDeploymentSpec.ClusterName, "hive.example.com") + utils_test.TestContext.GenerateDomainResolution(ctx, host, clusterDeploymentSpec.ClusterName, "hive.example.com") By("Approve Agent") Eventually(func() error { @@ -3101,12 +3103,12 @@ location = "%s" return true }, "1m", "2s").Should(BeTrue()) - updateProgress(*host.ID, *infraEnv.ID, models.HostStageDone) + utils_test.TestContext.UpdateProgress(*host.ID, *infraEnv.ID, models.HostStageDone) By("Complete Installation") - completeInstallation(agentBMClient, *cluster.ID) + utils_test.TestContext.CompleteInstallation(*cluster.ID) isSuccess := true - _, err = agentBMClient.Installer.V2CompleteInstallation(ctx, &installer.V2CompleteInstallationParams{ + _, err = utils_test.TestContext.AgentBMClient.Installer.V2CompleteInstallation(ctx, &installer.V2CompleteInstallationParams{ ClusterID: *cluster.ID, CompletionParams: &models.CompletionParams{ IsSuccess: &isSuccess, @@ -3123,20 +3125,20 @@ location = "%s" It("deploy infraEnv before clusterDeployment", func() { deployInfraEnvCRD(ctx, kubeClient, infraNsName.Name, infraEnvSpec) infraEnvKubeName := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: infraNsName.Name, } deployClusterDeploymentCRD(ctx, kubeClient, clusterDeploymentSpec) deployAgentClusterInstallCRD(ctx, kubeClient, aciSpec, clusterDeploymentSpec.ClusterInstallRef.Name) installkey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterInstallRef.Name, } checkAgentClusterInstallCondition(ctx, installkey, hiveext.ClusterRequirementsMetCondition, hiveext.ClusterNotReadyReason) checkInfraEnvCondition(ctx, infraEnvKubeName, v1beta1.ImageCreatedCondition, v1beta1.ImageStateCreated) infraEnvKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: infraNsName.Name, } infraEnv := getInfraEnvFromDBByKubeKey(ctx, db, infraEnvKey, waitForReconcileTimeout) @@ -3153,7 +3155,7 @@ location = "%s" }, ObjectMeta: metav1.ObjectMeta{ Namespace: "default", - Name: pullSecretName, + Name: utils_test.PullSecretName, }, StringData: data, Type: corev1.SecretTypeDockerConfigJson, @@ -3185,7 +3187,7 @@ location = "%s" deployClusterDeploymentCRD(ctx, kubeClient, clusterDeploymentSpec) deployAgentClusterInstallCRD(ctx, kubeClient, aciSpec, clusterDeploymentSpec.ClusterInstallRef.Name) installkey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterInstallRef.Name, } checkAgentClusterInstallCondition(ctx, installkey, hiveext.ClusterRequirementsMetCondition, hiveext.ClusterNotReadyReason) @@ -3199,7 +3201,7 @@ location = "%s" deployClusterDeploymentCRD(ctx, kubeClient, clusterDeploymentSpec) deployAgentClusterInstallCRD(ctx, kubeClient, aciSpec, clusterDeploymentSpec.ClusterInstallRef.Name) installkey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterInstallRef.Name, } checkAgentClusterInstallCondition(ctx, installkey, hiveext.ClusterRequirementsMetCondition, hiveext.ClusterNotReadyReason) @@ -3207,7 +3209,7 @@ location = "%s" infraEnvSpec.IgnitionConfigOverride = badIgnitionConfigOverride deployInfraEnvCRD(ctx, kubeClient, infraNsName.Name, infraEnvSpec) infraEnvKubeName := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: infraNsName.Name, } checkInfraEnvCondition(ctx, infraEnvKubeName, v1beta1.ImageCreatedCondition, v1beta1.ImageStateFailedToCreate+": error parsing ignition: config is not valid") @@ -3216,11 +3218,11 @@ location = "%s" It("deploy clusterDeployment with schedulable masters", func() { deployClusterDeploymentCRD(ctx, kubeClient, clusterDeploymentSpec) clusterKubeName := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterName, } installkey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterInstallRef.Name, } verifySchedulableMastersSetup := func(mode bool) { @@ -3245,11 +3247,11 @@ location = "%s" It("deploy clusterDeployment with hyperthreading configuration", func() { deployClusterDeploymentCRD(ctx, kubeClient, clusterDeploymentSpec) clusterKubeName := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterName, } installkey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterInstallRef.Name, } verifyHyperthreadingSetup := func(mode string) { @@ -3294,11 +3296,11 @@ location = "%s" It("deploy clusterDeployment with disk encryption configuration", func() { deployClusterDeploymentCRD(ctx, kubeClient, clusterDeploymentSpec) clusterKubeName := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterName, } installkey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterInstallRef.Name, } verifyDiskEncryptionConfig := func(enableOn *string, mode *string, tangServers string) { @@ -3346,11 +3348,11 @@ location = "%s" noProxy := "acme.com" deployClusterDeploymentCRD(ctx, kubeClient, clusterDeploymentSpec) clusterKubeName := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterName, } installkey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterInstallRef.Name, } @@ -3387,11 +3389,11 @@ location = "%s" noProxy := "*" deployClusterDeploymentCRD(ctx, kubeClient, clusterDeploymentSpec) clusterKubeName := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterName, } installkey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterInstallRef.Name, } @@ -3424,13 +3426,13 @@ location = "%s" } deployInfraEnvCRD(ctx, kubeClient, infraNsName.Name, infraEnvSpec) infraEnvKubeName := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: infraNsName.Name, } // InfraEnv Reconcile takes longer, since it needs to generate the image. checkInfraEnvCondition(ctx, infraEnvKubeName, v1beta1.ImageCreatedCondition, v1beta1.ImageStateCreated) infraEnvKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: infraNsName.Name, } infraEnv := getInfraEnvFromDBByKubeKey(ctx, db, infraEnvKey, waitForReconcileTimeout) @@ -3445,11 +3447,11 @@ location = "%s" deployInfraEnvCRD(ctx, kubeClient, infraNsName.Name, infraEnvSpec) deployAgentClusterInstallCRD(ctx, kubeClient, aciSpec, clusterDeploymentSpec.ClusterInstallRef.Name) clusterKubeName := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterName, } installkey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterInstallRef.Name, } checkAgentClusterInstallCondition(ctx, installkey, hiveext.ClusterRequirementsMetCondition, hiveext.ClusterNotReadyReason) @@ -3473,11 +3475,11 @@ location = "%s" deployInfraEnvCRD(ctx, kubeClient, infraNsName.Name, infraEnvSpec) deployAgentClusterInstallCRD(ctx, kubeClient, aciSpec, clusterDeploymentSpec.ClusterInstallRef.Name) clusterKubeName := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterName, } installkey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterInstallRef.Name, } checkAgentClusterInstallCondition(ctx, installkey, hiveext.ClusterRequirementsMetCondition, hiveext.ClusterNotReadyReason) @@ -3514,20 +3516,20 @@ location = "%s" deployClusterDeploymentCRD(ctx, kubeClient, clusterDeploymentSpec) deployAgentClusterInstallCRD(ctx, kubeClient, aciSpec, clusterDeploymentSpec.ClusterInstallRef.Name) installkey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterInstallRef.Name, } checkAgentClusterInstallCondition(ctx, installkey, hiveext.ClusterRequirementsMetCondition, hiveext.ClusterNotReadyReason) infraEnvSpec.NMStateConfigLabelSelector = metav1.LabelSelector{MatchLabels: map[string]string{NMStateLabelName: NMStateLabelValue}} deployInfraEnvCRD(ctx, kubeClient, infraNsName.Name, infraEnvSpec) infraEnvKubeName := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: infraNsName.Name, } // InfraEnv Reconcile takes longer, since it needs to generate the image. checkInfraEnvCondition(ctx, infraEnvKubeName, v1beta1.ImageCreatedCondition, v1beta1.ImageStateCreated) infraEnvKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: infraNsName.Name, } infraEnv := getInfraEnvFromDBByKubeKey(ctx, db, infraEnvKey, waitForReconcileTimeout) @@ -3544,7 +3546,7 @@ location = "%s" deployInfraEnvCRD(ctx, kubeClient, infraNsName.Name, infraEnvSpec) infraEnvKubeName := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: infraNsName.Name, } @@ -3559,19 +3561,19 @@ location = "%s" By("Register Agent to InfraEnv") infraEnvKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: infraNsName.Name, } infraEnv := getInfraEnvFromDBByKubeKey(ctx, db, infraEnvKey, waitForReconcileTimeout) configureLocalAgentClient(infraEnv.ID.String()) - host := ®isterHost(*infraEnv.ID).Host - hwInfo := validHwInfo - hwInfo.Interfaces[0].IPV4Addresses = []string{defaultCIDRv4} - generateHWPostStepReply(ctx, host, hwInfo, "hostname1") + host := &utils_test.TestContext.RegisterHost(*infraEnv.ID).Host + hwInfo := utils_test.ValidHwInfo + hwInfo.Interfaces[0].IPV4Addresses = []string{utils_test.DefaultCIDRv4} + utils_test.TestContext.GenerateHWPostStepReply(ctx, host, hwInfo, "hostname1") By("Verify agent is not bind") hostKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: host.ID.String(), } Eventually(func() bool { @@ -3589,7 +3591,7 @@ location = "%s" Eventually(func() error { agent := getAgentCRD(ctx, kubeClient, hostKey) agent.Spec.ClusterDeploymentName = &v1beta1.ClusterReference{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: "ghostcd", } return kubeClient.Update(ctx, agent) @@ -3616,14 +3618,14 @@ location = "%s" deployClusterDeploymentCRD(ctx, kubeClient, clusterDeploymentSpec) deployAgentClusterInstallCRD(ctx, kubeClient, aciSpec, clusterDeploymentSpec.ClusterInstallRef.Name) installkey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterInstallRef.Name, } checkAgentClusterInstallCondition(ctx, installkey, hiveext.ClusterRequirementsMetCondition, hiveext.ClusterNotReadyReason) infraEnvSpec.NMStateConfigLabelSelector = metav1.LabelSelector{MatchLabels: map[string]string{NMStateLabelName: NMStateLabelValue}} deployInfraEnvCRD(ctx, kubeClient, infraNsName.Name, infraEnvSpec) infraEnvKubeName := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: infraNsName.Name, } // InfraEnv Reconcile takes longer, since it needs to generate the image. @@ -3654,21 +3656,21 @@ location = "%s" deployClusterDeploymentCRD(ctx, kubeClient, clusterDeploymentSpec) deployAgentClusterInstallCRD(ctx, kubeClient, aciSpec, clusterDeploymentSpec.ClusterInstallRef.Name) installkey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterInstallRef.Name, } checkAgentClusterInstallCondition(ctx, installkey, hiveext.ClusterRequirementsMetCondition, hiveext.ClusterNotReadyReason) infraEnvSpec.NMStateConfigLabelSelector = metav1.LabelSelector{MatchLabels: map[string]string{NMStateLabelName: NMStateLabelValue}} deployInfraEnvCRD(ctx, kubeClient, infraNsName.Name, infraEnvSpec) infraEnvKubeName := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: infraNsName.Name, } // InfraEnv Reconcile takes longer, since it needs to generate the image. checkInfraEnvCondition(ctx, infraEnvKubeName, v1beta1.ImageCreatedCondition, v1beta1.ImageStateCreated) infraEnvKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: infraNsName.Name, } infraEnv := getInfraEnvFromDBByKubeKey(ctx, db, infraEnvKey, waitForReconcileTimeout) @@ -3680,7 +3682,7 @@ location = "%s" // Delete NMState config nmstateConfigKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: "nmstate1", } Expect(kubeClient.Delete(ctx, getNMStateConfig(ctx, kubeClient, nmstateConfigKey))).ShouldNot(HaveOccurred()) @@ -3699,34 +3701,34 @@ location = "%s" By("Register host to pool") infraEnvKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: infraNsName.Name, } infraEnv := getInfraEnvFromDBByKubeKey(ctx, db, infraEnvKey, waitForReconcileTimeout) configureLocalAgentClient(infraEnv.ID.String()) - host := ®isterHost(*infraEnv.ID).Host - hwInfo := validHwInfo - hwInfo.Interfaces[0].IPV4Addresses = []string{defaultCIDRv4} - generateHWPostStepReply(ctx, host, hwInfo, "hostname1") + host := &utils_test.TestContext.RegisterHost(*infraEnv.ID).Host + hwInfo := utils_test.ValidHwInfo + hwInfo.Interfaces[0].IPV4Addresses = []string{utils_test.DefaultCIDRv4} + utils_test.TestContext.GenerateHWPostStepReply(ctx, host, hwInfo, "hostname1") By("Create cluster") deployClusterDeploymentCRD(ctx, kubeClient, clusterDeploymentSpec) deployAgentClusterInstallCRD(ctx, kubeClient, aciSNOSpec, clusterDeploymentSpec.ClusterInstallRef.Name) clusterKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterName, } getClusterFromDB(ctx, kubeClient, db, clusterKey, waitForReconcileTimeout) hostKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: host.ID.String(), } By("Bind Agent") Eventually(func() error { agent := getAgentCRD(ctx, kubeClient, hostKey) agent.Spec.ClusterDeploymentName = &v1beta1.ClusterReference{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterName, } return kubeClient.Update(ctx, agent) @@ -3742,9 +3744,9 @@ location = "%s" return agent.ClusterID != nil }, "30s", "1s").Should(BeTrue()) - registerHostByUUID(host.InfraEnvID, *host.ID) - generateEssentialHostSteps(ctx, host, "hostname1", defaultCIDRv4) - generateDomainResolution(ctx, host, clusterDeploymentSpec.ClusterName, "hive.example.com") + utils_test.TestContext.RegisterHostByUUID(host.InfraEnvID, *host.ID) + utils_test.TestContext.GenerateEssentialHostSteps(ctx, host, "hostname1", utils_test.DefaultCIDRv4) + utils_test.TestContext.GenerateDomainResolution(ctx, host, clusterDeploymentSpec.ClusterName, "hive.example.com") By("Unbind Agent") Eventually(func() error { @@ -3777,34 +3779,34 @@ location = "%s" By("Register host to pool") infraEnvKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: infraNsName.Name, } infraEnv := getInfraEnvFromDBByKubeKey(ctx, db, infraEnvKey, waitForReconcileTimeout) configureLocalAgentClient(infraEnv.ID.String()) - host := ®isterHost(*infraEnv.ID).Host - hwInfo := validHwInfo - hwInfo.Interfaces[0].IPV4Addresses = []string{defaultCIDRv4} - generateHWPostStepReply(ctx, host, hwInfo, "hostname1") + host := &utils_test.TestContext.RegisterHost(*infraEnv.ID).Host + hwInfo := utils_test.ValidHwInfo + hwInfo.Interfaces[0].IPV4Addresses = []string{utils_test.DefaultCIDRv4} + utils_test.TestContext.GenerateHWPostStepReply(ctx, host, hwInfo, "hostname1") By("Create cluster") deployClusterDeploymentCRD(ctx, kubeClient, clusterDeploymentSpec) deployAgentClusterInstallCRD(ctx, kubeClient, aciSNOSpec, clusterDeploymentSpec.ClusterInstallRef.Name) clusterKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterName, } getClusterFromDB(ctx, kubeClient, db, clusterKey, waitForReconcileTimeout) hostKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: host.ID.String(), } By("Bind Agent") Eventually(func() error { agent := getAgentCRD(ctx, kubeClient, hostKey) agent.Spec.ClusterDeploymentName = &v1beta1.ClusterReference{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterName, } return kubeClient.Update(ctx, agent) @@ -3820,9 +3822,9 @@ location = "%s" return agent.ClusterID != nil }, "30s", "1s").Should(BeTrue()) - registerHostByUUID(host.InfraEnvID, *host.ID) - generateEssentialHostSteps(ctx, host, "hostname1", defaultCIDRv4) - generateDomainResolution(ctx, host, clusterDeploymentSpec.ClusterName, "hive.example.com") + utils_test.TestContext.RegisterHostByUUID(host.InfraEnvID, *host.ID) + utils_test.TestContext.GenerateEssentialHostSteps(ctx, host, "hostname1", utils_test.DefaultCIDRv4) + utils_test.TestContext.GenerateDomainResolution(ctx, host, clusterDeploymentSpec.ClusterName, "hive.example.com") By("Delete ClusterDeployment") Expect(kubeClient.Delete(ctx, getClusterDeploymentCRD(ctx, kubeClient, clusterKey))).ShouldNot(HaveOccurred()) @@ -3847,36 +3849,36 @@ location = "%s" By("Register host to pool") infraEnvKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: infraNsName.Name, } infraEnv := getInfraEnvFromDBByKubeKey(ctx, db, infraEnvKey, waitForReconcileTimeout) configureLocalAgentClient(infraEnv.ID.String()) - host := ®isterHost(*infraEnv.ID).Host - ips := hostutil.GenerateIPv4Addresses(1, defaultCIDRv4) + host := &utils_test.TestContext.RegisterHost(*infraEnv.ID).Host + ips := hostutil.GenerateIPv4Addresses(1, utils_test.DefaultCIDRv4) - hwInfo := validHwInfo - hwInfo.Interfaces[0].IPV4Addresses = []string{defaultCIDRv4} - generateHWPostStepReply(ctx, host, hwInfo, "hostname1") + hwInfo := utils_test.ValidHwInfo + hwInfo.Interfaces[0].IPV4Addresses = []string{utils_test.DefaultCIDRv4} + utils_test.TestContext.GenerateHWPostStepReply(ctx, host, hwInfo, "hostname1") By("Create source CD") deployClusterDeploymentCRD(ctx, kubeClient, clusterDeploymentSpec) deployAgentClusterInstallCRD(ctx, kubeClient, aciSNOSpec, clusterDeploymentSpec.ClusterInstallRef.Name) clusterKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterName, } getClusterFromDB(ctx, kubeClient, db, clusterKey, waitForReconcileTimeout) hostKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: host.ID.String(), } By("Bind Agent") Eventually(func() error { agent := getAgentCRD(ctx, kubeClient, hostKey) agent.Spec.ClusterDeploymentName = &v1beta1.ClusterReference{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterName, } return kubeClient.Update(ctx, agent) @@ -3892,11 +3894,11 @@ location = "%s" return agent.ClusterID != nil }, "30s", "1s").Should(BeTrue()) - registerHostByUUID(host.InfraEnvID, *host.ID) - hwInfo.Interfaces[0].IPV4Addresses = []string{defaultCIDRv4} - generateHWPostStepReply(ctx, host, hwInfo, "hostname1") - generateEssentialHostSteps(ctx, host, "hostname1", defaultCIDRv4) - generateDomainResolution(ctx, host, clusterDeploymentSpec.ClusterName, "hive.example.com") + utils_test.TestContext.RegisterHostByUUID(host.InfraEnvID, *host.ID) + hwInfo.Interfaces[0].IPV4Addresses = []string{utils_test.DefaultCIDRv4} + utils_test.TestContext.GenerateHWPostStepReply(ctx, host, hwInfo, "hostname1") + utils_test.TestContext.GenerateEssentialHostSteps(ctx, host, "hostname1", utils_test.DefaultCIDRv4) + utils_test.TestContext.GenerateDomainResolution(ctx, host, clusterDeploymentSpec.ClusterName, "hive.example.com") By("Create target CD") targetCDSpec := getDefaultClusterDeploymentSpec(secretRef) @@ -3905,7 +3907,7 @@ location = "%s" deployAgentClusterInstallCRD(ctx, kubeClient, targetAciSNOSpec, targetCDSpec.ClusterInstallRef.Name) targetClusterKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: targetCDSpec.ClusterName, } getClusterFromDB(ctx, kubeClient, db, targetClusterKey, waitForReconcileTimeout) @@ -3914,7 +3916,7 @@ location = "%s" Eventually(func() error { agent := getAgentCRD(ctx, kubeClient, hostKey) agent.Spec.ClusterDeploymentName = &v1beta1.ClusterReference{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: targetCDSpec.ClusterName, } return kubeClient.Update(ctx, agent) @@ -3927,9 +3929,9 @@ location = "%s" }, "1m", "10s").Should(BeTrue()) By("Register to pool again") - registerHostByUUID(host.InfraEnvID, *host.ID) - generateFullMeshConnectivity(ctx, ips[0], host) - generateHWPostStepReply(ctx, host, hwInfo, "hostname1") + utils_test.TestContext.RegisterHostByUUID(host.InfraEnvID, *host.ID) + utils_test.TestContext.GenerateFullMeshConnectivity(ctx, ips[0], host) + utils_test.TestContext.GenerateHWPostStepReply(ctx, host, hwInfo, "hostname1") By("Wait for Agent State - binding") Eventually(func() bool { @@ -3937,10 +3939,10 @@ location = "%s" return agent.Status.DebugInfo.State == models.HostStatusBinding }, "1m", "10s").Should(BeTrue()) - registerHostByUUID(host.InfraEnvID, *host.ID) - generateHWPostStepReply(ctx, host, hwInfo, "hostname1") - generateEssentialHostSteps(ctx, host, "hostname1", defaultCIDRv4) - generateDomainResolution(ctx, host, targetCDSpec.ClusterName, "hive.example.com") + utils_test.TestContext.RegisterHostByUUID(host.InfraEnvID, *host.ID) + utils_test.TestContext.GenerateHWPostStepReply(ctx, host, hwInfo, "hostname1") + utils_test.TestContext.GenerateEssentialHostSteps(ctx, host, "hostname1", utils_test.DefaultCIDRv4) + utils_test.TestContext.GenerateDomainResolution(ctx, host, targetCDSpec.ClusterName, "hive.example.com") By("Wait for Agent State - Known") Eventually(func() bool { @@ -3971,7 +3973,7 @@ location = "%s" } infraEnvSpec.KernelArguments = kargs infraEnvKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: infraNsName.Name, } deployInfraEnvCRD(ctx, kubeClient, infraNsName.Name, infraEnvSpec) @@ -3981,7 +3983,7 @@ location = "%s" setAndCheck := func(kargs []v1beta1.KernelArgument) { infraEnvKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: infraNsName.Name, } @@ -4004,7 +4006,7 @@ location = "%s" deployInfraEnvCRD(ctx, kubeClient, infraNsName.Name, infraEnvSpec) infraEnvKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: infraNsName.Name, } dbInfraEnv := getInfraEnvFromDBByKubeKey(ctx, db, infraEnvKey, waitForReconcileTimeout) @@ -4045,7 +4047,7 @@ location = "%s" func(operation, value string) { infraEnvSpec.ClusterRef = nil infraEnvKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: infraNsName.Name, } deployInfraEnvCRD(ctx, kubeClient, infraNsName.Name, infraEnvSpec) @@ -4070,35 +4072,35 @@ location = "%s" By("Register host to pool") infraEnvKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: infraNsName.Name, } infraEnv := getInfraEnvFromDBByKubeKey(ctx, db, infraEnvKey, waitForReconcileTimeout) configureLocalAgentClient(infraEnv.ID.String()) - host := ®isterHost(*infraEnv.ID).Host - ips := hostutil.GenerateIPv4Addresses(1, defaultCIDRv4) - hwInfo := validHwInfo - hwInfo.Interfaces[0].IPV4Addresses = []string{defaultCIDRv4} - generateHWPostStepReply(ctx, host, hwInfo, "hostname1") + host := &utils_test.TestContext.RegisterHost(*infraEnv.ID).Host + ips := hostutil.GenerateIPv4Addresses(1, utils_test.DefaultCIDRv4) + hwInfo := utils_test.ValidHwInfo + hwInfo.Interfaces[0].IPV4Addresses = []string{utils_test.DefaultCIDRv4} + utils_test.TestContext.GenerateHWPostStepReply(ctx, host, hwInfo, "hostname1") By("Create cluster") deployClusterDeploymentCRD(ctx, kubeClient, clusterDeploymentSpec) deployAgentClusterInstallCRD(ctx, kubeClient, aciSNOSpec, clusterDeploymentSpec.ClusterInstallRef.Name) clusterKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterName, } getClusterFromDB(ctx, kubeClient, db, clusterKey, waitForReconcileTimeout) hostKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: host.ID.String(), } By("Bind Agent") Eventually(func() error { agent := getAgentCRD(ctx, kubeClient, hostKey) agent.Spec.ClusterDeploymentName = &v1beta1.ClusterReference{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterName, } return kubeClient.Update(ctx, agent) @@ -4114,10 +4116,10 @@ location = "%s" return agent.ClusterID != nil }, "30s", "1s").Should(BeTrue()) - registerHostByUUID(host.InfraEnvID, *host.ID) - generateFullMeshConnectivity(ctx, ips[0], host) - generateEssentialHostSteps(ctx, host, "hostname1", defaultCIDRv4) - generateDomainResolution(ctx, host, clusterDeploymentSpec.ClusterName, "hive.example.com") + utils_test.TestContext.RegisterHostByUUID(host.InfraEnvID, *host.ID) + utils_test.TestContext.GenerateFullMeshConnectivity(ctx, ips[0], host) + utils_test.TestContext.GenerateEssentialHostSteps(ctx, host, "hostname1", utils_test.DefaultCIDRv4) + utils_test.TestContext.GenerateDomainResolution(ctx, host, clusterDeploymentSpec.ClusterName, "hive.example.com") By("Wait for Agent to be Known Bound") Eventually(func() bool { @@ -4152,7 +4154,7 @@ location = "%s" deployInfraEnvCRD(ctx, kubeClient, infraNsName.Name, infraEnvSpec) infraEnvKubeName := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: infraNsName.Name, } @@ -4167,21 +4169,21 @@ location = "%s" By("Register Agent to InfraEnv") infraEnvKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: infraNsName.Name, } infraEnv := getInfraEnvFromDBByKubeKey(ctx, db, infraEnvKey, waitForReconcileTimeout) configureLocalAgentClient(infraEnv.ID.String()) - host := ®isterHost(*infraEnv.ID).Host - ips := hostutil.GenerateIPv4Addresses(1, defaultCIDRv4) - hwInfo := validHwInfo - hwInfo.Interfaces[0].IPV4Addresses = []string{defaultCIDRv4} - generateFullMeshConnectivity(ctx, ips[0], host) - generateHWPostStepReply(ctx, host, hwInfo, "hostname1") + host := &utils_test.TestContext.RegisterHost(*infraEnv.ID).Host + ips := hostutil.GenerateIPv4Addresses(1, utils_test.DefaultCIDRv4) + hwInfo := utils_test.ValidHwInfo + hwInfo.Interfaces[0].IPV4Addresses = []string{utils_test.DefaultCIDRv4} + utils_test.TestContext.GenerateFullMeshConnectivity(ctx, ips[0], host) + utils_test.TestContext.GenerateHWPostStepReply(ctx, host, hwInfo, "hostname1") By("Verify agent is not bind") hostKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: host.ID.String(), } Eventually(func() bool { @@ -4201,7 +4203,7 @@ location = "%s" By("Check ACI condition ValidationsFailing") installkey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterInstallRef.Name, } checkAgentClusterInstallCondition(ctx, installkey, hiveext.ClusterValidatedCondition, hiveext.ClusterValidationsFailingReason) @@ -4210,7 +4212,7 @@ location = "%s" Eventually(func() error { agent := getAgentCRD(ctx, kubeClient, hostKey) agent.Spec.ClusterDeploymentName = &v1beta1.ClusterReference{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterName, } return kubeClient.Update(ctx, agent) @@ -4226,10 +4228,10 @@ location = "%s" return agent.ClusterID != nil }, "30s", "1s").Should(BeTrue()) - registerHostByUUID(host.InfraEnvID, *host.ID) - generateEssentialHostSteps(ctx, host, "hostname1", defaultCIDRv4) - generateDomainResolution(ctx, host, clusterDeploymentSpec.ClusterName, "hive.example.com") - generateEssentialPrepareForInstallationSteps(ctx, host) + utils_test.TestContext.RegisterHostByUUID(host.InfraEnvID, *host.ID) + utils_test.TestContext.GenerateEssentialHostSteps(ctx, host, "hostname1", utils_test.DefaultCIDRv4) + utils_test.TestContext.GenerateDomainResolution(ctx, host, clusterDeploymentSpec.ClusterName, "hive.example.com") + utils_test.TestContext.GenerateEssentialPrepareForInstallationSteps(ctx, host) By("Check ACI condition UnapprovedAgentsReason") checkAgentClusterInstallCondition(ctx, installkey, hiveext.ClusterRequirementsMetCondition, hiveext.ClusterUnapprovedAgentsReason) @@ -4245,7 +4247,7 @@ location = "%s" checkAgentClusterInstallCondition(ctx, installkey, hiveext.ClusterCompletedCondition, hiveext.ClusterInstallationInProgressReason) clusterKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterName, } Eventually(func() bool { @@ -4260,13 +4262,13 @@ location = "%s" checkAgentCondition(ctx, host.ID.String(), v1beta1.InstalledCondition, v1beta1.InstallationInProgressReason) - updateProgress(*host.ID, *infraEnv.ID, models.HostStageDone) + utils_test.TestContext.UpdateProgress(*host.ID, *infraEnv.ID, models.HostStageDone) By("Complete Installation") cluster := getClusterFromDB(ctx, kubeClient, db, clusterKey, waitForReconcileTimeout) - completeInstallation(agentBMClient, *cluster.ID) + utils_test.TestContext.CompleteInstallation(*cluster.ID) isSuccess := true - _, err := agentBMClient.Installer.V2CompleteInstallation(ctx, &installer.V2CompleteInstallationParams{ + _, err := utils_test.TestContext.AgentBMClient.Installer.V2CompleteInstallation(ctx, &installer.V2CompleteInstallationParams{ ClusterID: *cluster.ID, CompletionParams: &models.CompletionParams{ IsSuccess: &isSuccess, @@ -4311,28 +4313,28 @@ location = "%s" deployAgentClusterInstallCRD(ctx, kubeClient, aciSNOSpec, clusterDeploymentSpec.ClusterInstallRef.Name) clusterKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterName, } cluster := getClusterFromDB(ctx, kubeClient, db, clusterKey, waitForReconcileTimeout) infraEnvKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: infraNsName.Name, } infraEnv := getInfraEnvFromDBByKubeKey(ctx, db, infraEnvKey, waitForReconcileTimeout) configureLocalAgentClient(infraEnv.ID.String()) - host := registerNode(ctx, *infraEnv.ID, "hostname1", defaultCIDRv4) - ips := hostutil.GenerateIPv4Addresses(1, defaultCIDRv4) - generateFullMeshConnectivity(ctx, ips[0], host) + host := utils_test.TestContext.RegisterNode(ctx, *infraEnv.ID, "hostname1", utils_test.DefaultCIDRv4) + ips := hostutil.GenerateIPv4Addresses(1, utils_test.DefaultCIDRv4) + utils_test.TestContext.GenerateFullMeshConnectivity(ctx, ips[0], host) key := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: host.ID.String(), } installkey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterInstallRef.Name, } - generateDomainResolution(ctx, host, clusterDeploymentSpec.ClusterName, "hive.example.com") + utils_test.TestContext.GenerateDomainResolution(ctx, host, clusterDeploymentSpec.ClusterName, "hive.example.com") By("Check that ACI Event URL is valid") Eventually(func() string { aci := getAgentClusterInstallCRD(ctx, kubeClient, installkey) @@ -4432,7 +4434,7 @@ location = "%s" models.HostStageWaitingForBootkube, models.HostStageWritingImageToDisk, models.HostStageRebooting, models.HostStageJoined, models.HostStageDone, } - updateHostProgressWithInfo(*host.ID, *infraEnv.ID, installProgress, installInfo) + utils_test.TestContext.UpdateHostProgressWithInfo(*host.ID, *infraEnv.ID, installProgress, installInfo) Eventually(func() bool { agent := getAgentCRD(ctx, kubeClient, key) @@ -4448,9 +4450,9 @@ location = "%s" }, "30s", "10s").Should(Equal(int64(80))) By("Check ACI Logs URL exists") - kubeconfigFile, err := os.Open("test_kubeconfig") + kubeconfigFile, err := os.Open("../test_kubeconfig") Expect(err).NotTo(HaveOccurred()) - _, err = agentBMClient.Installer.V2UploadLogs(ctx, &installer.V2UploadLogsParams{ClusterID: *cluster.ID, + _, err = utils_test.TestContext.AgentBMClient.Installer.V2UploadLogs(ctx, &installer.V2UploadLogsParams{ClusterID: *cluster.ID, InfraEnvID: infraEnv.ID, HostID: host.ID, LogsType: string(models.LogsTypeHost), Upfile: kubeconfigFile}) Expect(err).NotTo(HaveOccurred()) kubeconfigFile.Close() @@ -4469,24 +4471,24 @@ location = "%s" By("Check kubeconfig before install is finished") configSecretRef := getAgentClusterInstallCRD(ctx, kubeClient, installkey).Spec.ClusterMetadata.AdminKubeconfigSecretRef configkey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: configSecretRef.Name, } configSecret := getSecret(ctx, kubeClient, configkey) Expect(configSecret.Data["kubeconfig"]).NotTo(BeNil()) By("Upload cluster logs") - kubeconfigFile, err1 := os.Open("test_kubeconfig") + kubeconfigFile, err1 := os.Open("../test_kubeconfig") Expect(err1).NotTo(HaveOccurred()) - _, err1 = agentBMClient.Installer.V2UploadLogs(ctx, &installer.V2UploadLogsParams{ClusterID: *cluster.ID, + _, err1 = utils_test.TestContext.AgentBMClient.Installer.V2UploadLogs(ctx, &installer.V2UploadLogsParams{ClusterID: *cluster.ID, InfraEnvID: infraEnv.ID, Upfile: kubeconfigFile, LogsType: string(models.LogsTypeController)}) Expect(err1).NotTo(HaveOccurred()) kubeconfigFile.Close() By("Complete Installation") - completeInstallation(agentBMClient, *cluster.ID) + utils_test.TestContext.CompleteInstallation(*cluster.ID) isSuccess := true - _, err = agentBMClient.Installer.V2CompleteInstallation(ctx, &installer.V2CompleteInstallationParams{ + _, err = utils_test.TestContext.AgentBMClient.Installer.V2CompleteInstallation(ctx, &installer.V2CompleteInstallationParams{ ClusterID: *cluster.ID, CompletionParams: &models.CompletionParams{ IsSuccess: &isSuccess, @@ -4502,7 +4504,7 @@ location = "%s" passwordSecretRef := getAgentClusterInstallCRD(ctx, kubeClient, installkey).Spec.ClusterMetadata.AdminPasswordSecretRef Expect(passwordSecretRef).NotTo(BeNil()) passwordkey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: passwordSecretRef.Name, } passwordSecret := getSecret(ctx, kubeClient, passwordkey) @@ -4551,19 +4553,19 @@ location = "%s" deployAgentClusterInstallCRD(ctx, kubeClient, aciSNOSpec, clusterDeploymentSpec.ClusterInstallRef.Name) clusterKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterName, } infraEnvKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: infraNsName.Name, } infraEnv := getInfraEnvFromDBByKubeKey(ctx, db, infraEnvKey, waitForReconcileTimeout) configureLocalAgentClient(infraEnv.ID.String()) - ips := hostutil.GenerateIPv4Addresses(2, defaultCIDRv4) - host := registerNode(ctx, *infraEnv.ID, "hostname1", ips[0]) + ips := hostutil.GenerateIPv4Addresses(2, utils_test.DefaultCIDRv4) + host := utils_test.TestContext.RegisterNode(ctx, *infraEnv.ID, "hostname1", ips[0]) key := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: host.ID.String(), } @@ -4589,14 +4591,14 @@ location = "%s" By("Register Agent to new Infraenv") infraEnv2Key := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: "infraenv2", } infraEnv2 := getInfraEnvFromDBByKubeKey(ctx, db, infraEnv2Key, waitForReconcileTimeout) configureLocalAgentClient(infraEnv2.ID.String()) - h := ®isterHostByUUID(*infraEnv2.ID, *host.ID).Host - generateEssentialHostSteps(ctx, h, "hostname2", ips[1]) - generateEssentialPrepareForInstallationSteps(ctx, h) + h := &utils_test.TestContext.RegisterHostByUUID(*infraEnv2.ID, *host.ID).Host + utils_test.TestContext.GenerateEssentialHostSteps(ctx, h, "hostname2", ips[1]) + utils_test.TestContext.GenerateEssentialPrepareForInstallationSteps(ctx, h) By("Check Agent is updated with new ClusterDeployment") Eventually(func() string { @@ -4629,27 +4631,27 @@ location = "%s" deployAgentClusterInstallCRD(ctx, kubeClient, aciSNOSpec, clusterDeploymentSpec.ClusterInstallRef.Name) clusterKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterName, } infraEnvKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: infraNsName.Name, } infraEnv := getInfraEnvFromDBByKubeKey(ctx, db, infraEnvKey, waitForReconcileTimeout) configureLocalAgentClient(infraEnv.ID.String()) - host := registerNode(ctx, *infraEnv.ID, "hostname1", defaultCIDRv4) - ips := hostutil.GenerateIPv4Addresses(1, defaultCIDRv4) - generateFullMeshConnectivity(ctx, ips[0], host) + host := utils_test.TestContext.RegisterNode(ctx, *infraEnv.ID, "hostname1", utils_test.DefaultCIDRv4) + ips := hostutil.GenerateIPv4Addresses(1, utils_test.DefaultCIDRv4) + utils_test.TestContext.GenerateFullMeshConnectivity(ctx, ips[0], host) key := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: host.ID.String(), } installkey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterInstallRef.Name, } - generateDomainResolution(ctx, host, clusterDeploymentSpec.ClusterName, "hive.example.com") + utils_test.TestContext.GenerateDomainResolution(ctx, host, clusterDeploymentSpec.ClusterName, "hive.example.com") By("Approve Agent") Eventually(func() error { agent := getAgentCRD(ctx, kubeClient, key) @@ -4685,12 +4687,12 @@ location = "%s" deployInfraEnvCRD(ctx, kubeClient, infraNsName.Name, infraEnvSpec) deployAgentClusterInstallCRD(ctx, kubeClient, aciSpec, clusterDeploymentSpec.ClusterInstallRef.Name) clusterKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterName, } cluster := getClusterFromDB(ctx, kubeClient, db, clusterKey, waitForReconcileTimeout) infraEnvKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: infraNsName.Name, } infraEnv := getInfraEnvFromDBByKubeKey(ctx, db, infraEnvKey, waitForReconcileTimeout) @@ -4699,24 +4701,24 @@ location = "%s" for _, host := range hosts { checkAgentCondition(ctx, host.ID.String(), v1beta1.ValidatedCondition, v1beta1.ValidationsPassingReason) } - ips := hostutil.GenerateIPv4Addresses(3, defaultCIDRv4) + ips := hostutil.GenerateIPv4Addresses(3, utils_test.DefaultCIDRv4) for i := 0; i < 3; i++ { hostname := fmt.Sprintf("h%d", i) - host := registerNode(ctx, *infraEnv.ID, hostname, ips[i]) + host := utils_test.TestContext.RegisterNode(ctx, *infraEnv.ID, hostname, ips[i]) hosts = append(hosts, host) } for _, host := range hosts { checkAgentCondition(ctx, host.ID.String(), v1beta1.ValidatedCondition, v1beta1.ValidationsFailingReason) } - generateFullMeshConnectivity(ctx, ips[0], hosts...) + utils_test.TestContext.GenerateFullMeshConnectivity(ctx, ips[0], hosts...) for _, host := range hosts { - generateDomainResolution(ctx, host, clusterDeploymentSpec.ClusterName, "hive.example.com") + utils_test.TestContext.GenerateDomainResolution(ctx, host, clusterDeploymentSpec.ClusterName, "hive.example.com") } By("Approve Agents") for _, host := range hosts { hostkey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: host.ID.String(), } Eventually(func() error { @@ -4728,7 +4730,7 @@ location = "%s" By("Wait for installing") installkey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterInstallRef.Name, } checkAgentClusterInstallCondition(ctx, installkey, hiveext.ClusterCompletedCondition, hiveext.ClusterInstallationInProgressReason) @@ -4747,13 +4749,13 @@ location = "%s" } for _, host := range hosts { - updateProgress(*host.ID, *infraEnv.ID, models.HostStageDone) + utils_test.TestContext.UpdateProgress(*host.ID, *infraEnv.ID, models.HostStageDone) } By("Complete Installation") - completeInstallation(agentBMClient, *cluster.ID) + utils_test.TestContext.CompleteInstallation(*cluster.ID) isSuccess := true - _, err := agentBMClient.Installer.V2CompleteInstallation(ctx, &installer.V2CompleteInstallationParams{ + _, err := utils_test.TestContext.AgentBMClient.Installer.V2CompleteInstallation(ctx, &installer.V2CompleteInstallationParams{ ClusterID: *cluster.ID, CompletionParams: &models.CompletionParams{ IsSuccess: &isSuccess, @@ -4780,7 +4782,7 @@ location = "%s" passwordSecretRef := getAgentClusterInstallCRD(ctx, kubeClient, installkey).Spec.ClusterMetadata.AdminPasswordSecretRef Expect(passwordSecretRef).NotTo(BeNil()) passwordkey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: passwordSecretRef.Name, } passwordSecret := getSecret(ctx, kubeClient, passwordkey) @@ -4789,7 +4791,7 @@ location = "%s" configSecretRef := getAgentClusterInstallCRD(ctx, kubeClient, installkey).Spec.ClusterMetadata.AdminKubeconfigSecretRef Expect(passwordSecretRef).NotTo(BeNil()) configkey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: configSecretRef.Name, } configSecret := getSecret(ctx, kubeClient, configkey) @@ -4801,31 +4803,31 @@ location = "%s" deployInfraEnvCRD(ctx, kubeClient, infraNsName.Name, infraEnvSpec) deployAgentClusterInstallCRD(ctx, kubeClient, aciSpec, clusterDeploymentSpec.ClusterInstallRef.Name) clusterKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterName, } cluster := getClusterFromDB(ctx, kubeClient, db, clusterKey, waitForReconcileTimeout) infraEnvKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: infraNsName.Name, } infraEnv := getInfraEnvFromDBByKubeKey(ctx, db, infraEnvKey, waitForReconcileTimeout) configureLocalAgentClient(infraEnv.ID.String()) hosts := make([]*models.Host, 0) - ips := hostutil.GenerateIPv4Addresses(5, defaultCIDRv4) + ips := hostutil.GenerateIPv4Addresses(5, utils_test.DefaultCIDRv4) for i := 0; i < 3; i++ { hostname := fmt.Sprintf("h%d", i) - host := registerNode(ctx, *infraEnv.ID, hostname, ips[i]) + host := utils_test.TestContext.RegisterNode(ctx, *infraEnv.ID, hostname, ips[i]) hosts = append(hosts, host) } - generateFullMeshConnectivity(ctx, ips[0], hosts...) + utils_test.TestContext.GenerateFullMeshConnectivity(ctx, ips[0], hosts...) for _, h := range hosts { - generateDomainResolution(ctx, h, clusterDeploymentSpec.ClusterName, "hive.example.com") + utils_test.TestContext.GenerateDomainResolution(ctx, h, clusterDeploymentSpec.ClusterName, "hive.example.com") } By("Check ACI Logs URL is empty") // Should not show the URL since no logs yet to be collected installkey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterInstallRef.Name, } Eventually(func() string { @@ -4835,7 +4837,7 @@ location = "%s" By("Approve Agents") for _, host := range hosts { hostkey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: host.ID.String(), } Eventually(func() error { @@ -4859,11 +4861,11 @@ location = "%s" By("Upload hosts logs during installation") for _, host := range hosts { - updateProgress(*host.ID, *infraEnv.ID, models.HostStageDone) + utils_test.TestContext.UpdateProgress(*host.ID, *infraEnv.ID, models.HostStageDone) - kubeconfigFile, err := os.Open("test_kubeconfig") + kubeconfigFile, err := os.Open("../test_kubeconfig") Expect(err).NotTo(HaveOccurred()) - _, err = agentBMClient.Installer.V2UploadLogs(ctx, &installer.V2UploadLogsParams{ClusterID: *cluster.ID, + _, err = utils_test.TestContext.AgentBMClient.Installer.V2UploadLogs(ctx, &installer.V2UploadLogsParams{ClusterID: *cluster.ID, InfraEnvID: infraEnv.ID, HostID: host.ID, LogsType: string(models.LogsTypeHost), Upfile: kubeconfigFile}) Expect(err).NotTo(HaveOccurred()) kubeconfigFile.Close() @@ -4876,9 +4878,9 @@ location = "%s" }, "30s", "10s").ShouldNot(Equal("")) By("Upload cluster logs") - kubeconfigFile, err1 := os.Open("test_kubeconfig") + kubeconfigFile, err1 := os.Open("../test_kubeconfig") Expect(err1).NotTo(HaveOccurred()) - _, err1 = agentBMClient.Installer.V2UploadLogs(ctx, &installer.V2UploadLogsParams{ClusterID: *cluster.ID, + _, err1 = utils_test.TestContext.AgentBMClient.Installer.V2UploadLogs(ctx, &installer.V2UploadLogsParams{ClusterID: *cluster.ID, InfraEnvID: infraEnv.ID, Upfile: kubeconfigFile, LogsType: string(models.LogsTypeController)}) Expect(err1).NotTo(HaveOccurred()) kubeconfigFile.Close() @@ -4926,9 +4928,9 @@ location = "%s" }, "30s", "1s").Should(Equal(aciSpec.IngressVIP)) By("Complete Installation") - completeInstallation(agentBMClient, *cluster.ID) + utils_test.TestContext.CompleteInstallation(*cluster.ID) isSuccess := true - _, err := agentBMClient.Installer.V2CompleteInstallation(ctx, &installer.V2CompleteInstallationParams{ + _, err := utils_test.TestContext.AgentBMClient.Installer.V2CompleteInstallation(ctx, &installer.V2CompleteInstallationParams{ ClusterID: *cluster.ID, CompletionParams: &models.CompletionParams{ IsSuccess: &isSuccess, @@ -4961,20 +4963,20 @@ location = "%s" By("Add Day 2 host") configureLocalAgentClient(infraEnv.ID.String()) - day2Host1 := registerNode(ctx, *infraEnv.ID, "firsthostnameday2", ips[3]) - generateApiVipPostStepReply(ctx, day2Host1, &cluster.Cluster, true) - generateFullMeshConnectivity(ctx, ips[3], day2Host1) - generateDomainResolution(ctx, day2Host1, clusterDeploymentSpec.ClusterName, "hive.example.com") + day2Host1 := utils_test.TestContext.RegisterNode(ctx, *infraEnv.ID, "firsthostnameday2", ips[3]) + utils_test.TestContext.GenerateApiVipPostStepReply(ctx, day2Host1, &cluster.Cluster, true) + utils_test.TestContext.GenerateFullMeshConnectivity(ctx, ips[3], day2Host1) + utils_test.TestContext.GenerateDomainResolution(ctx, day2Host1, clusterDeploymentSpec.ClusterName, "hive.example.com") By("Add a second Day 2 host") - day2Host2 := registerNode(ctx, *infraEnv.ID, "secondhostnameday2", ips[4]) - generateApiVipPostStepReply(ctx, day2Host2, &cluster.Cluster, true) - generateFullMeshConnectivity(ctx, ips[4], day2Host2) - generateDomainResolution(ctx, day2Host2, clusterDeploymentSpec.ClusterName, "hive.example.com") + day2Host2 := utils_test.TestContext.RegisterNode(ctx, *infraEnv.ID, "secondhostnameday2", ips[4]) + utils_test.TestContext.GenerateApiVipPostStepReply(ctx, day2Host2, &cluster.Cluster, true) + utils_test.TestContext.GenerateFullMeshConnectivity(ctx, ips[4], day2Host2) + utils_test.TestContext.GenerateDomainResolution(ctx, day2Host2, clusterDeploymentSpec.ClusterName, "hive.example.com") By("Approve Day 2 agents") k1 := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: day2Host1.ID.String(), } Eventually(func() error { @@ -4984,7 +4986,7 @@ location = "%s" }, "30s", "10s").Should(BeNil()) k2 := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: day2Host2.ID.String(), } Eventually(func() error { @@ -5011,7 +5013,7 @@ location = "%s" aciSNOSpec.Networking.MachineNetwork = []hiveext.MachineNetworkEntry{{CIDR: "1.2.3.5/24"}} deployAgentClusterInstallCRD(ctx, kubeClient, aciSpec, clusterDeploymentSpec.ClusterInstallRef.Name) installkey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterInstallRef.Name, } checkAgentClusterInstallCondition(ctx, installkey, hiveext.ClusterRequirementsMetCondition, hiveext.ClusterNotReadyReason) @@ -5022,7 +5024,7 @@ location = "%s" aciSNOSpec.Networking.MachineNetwork = []hiveext.MachineNetworkEntry{} deployAgentClusterInstallCRD(ctx, kubeClient, aciSNOSpec, clusterDeploymentSpec.ClusterInstallRef.Name) installkey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterInstallRef.Name, } checkAgentClusterInstallCondition(ctx, installkey, hiveext.ClusterRequirementsMetCondition, hiveext.ClusterNotReadyReason) @@ -5033,7 +5035,7 @@ location = "%s" aciSpec.ImageSetRef.Name = "invalid" deployAgentClusterInstallCRD(ctx, kubeClient, aciSpec, clusterDeploymentSpec.ClusterInstallRef.Name) installkey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterInstallRef.Name, } checkAgentClusterInstallCondition(ctx, installkey, hiveext.ClusterSpecSyncedCondition, hiveext.ClusterBackendErrorReason) @@ -5042,7 +5044,7 @@ location = "%s" It("deploy clusterDeployment with missing clusterImageSet", func() { // Remove ClusterImageSet that was created in the test setup imageSetKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: aciSpec.ImageSetRef.Name, } Expect(kubeClient.Delete(ctx, getClusterImageSetCRD(ctx, kubeClient, imageSetKey))).ShouldNot(HaveOccurred()) @@ -5050,7 +5052,7 @@ location = "%s" deployAgentClusterInstallCRD(ctx, kubeClient, aciSpec, clusterDeploymentSpec.ClusterInstallRef.Name) installkey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterInstallRef.Name, } checkAgentClusterInstallCondition(ctx, installkey, hiveext.ClusterSpecSyncedCondition, hiveext.ClusterBackendErrorReason) @@ -5064,11 +5066,11 @@ location = "%s" deployClusterDeploymentCRD(ctx, kubeClient, clusterDeploymentSpec) deployAgentClusterInstallCRD(ctx, kubeClient, aciSpec, clusterDeploymentSpec.ClusterInstallRef.Name) clusterKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterName, } installkey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterInstallRef.Name, } Eventually(func() bool { @@ -5105,19 +5107,19 @@ spec: deployAgentClusterInstallCRD(ctx, kubeClient, aciSNOSpec, clusterDeploymentSpec.ClusterInstallRef.Name) deployInfraEnvCRD(ctx, kubeClient, infraNsName.Name, infraEnvSpec) infraEnvKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: infraNsName.Name, } infraEnv := getInfraEnvFromDBByKubeKey(ctx, db, infraEnvKey, waitForReconcileTimeout) configureLocalAgentClient(infraEnv.ID.String()) - host := registerNode(ctx, *infraEnv.ID, "hostname1", defaultCIDRv4) - ips := hostutil.GenerateIPv4Addresses(1, defaultCIDRv4) - generateFullMeshConnectivity(ctx, ips[0], host) + host := utils_test.TestContext.RegisterNode(ctx, *infraEnv.ID, "hostname1", utils_test.DefaultCIDRv4) + ips := hostutil.GenerateIPv4Addresses(1, utils_test.DefaultCIDRv4) + utils_test.TestContext.GenerateFullMeshConnectivity(ctx, ips[0], host) key := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: host.ID.String(), } - generateDomainResolution(ctx, host, clusterDeploymentSpec.ClusterName, "hive.example.com") + utils_test.TestContext.GenerateDomainResolution(ctx, host, clusterDeploymentSpec.ClusterName, "hive.example.com") By("Approve Agent") Eventually(func() error { agent := getAgentCRD(ctx, kubeClient, key) @@ -5125,7 +5127,7 @@ spec: return kubeClient.Update(ctx, agent) }, "30s", "10s").Should(BeNil()) installkey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterInstallRef.Name, } checkAgentClusterInstallCondition(ctx, installkey, hiveext.ClusterRequirementsMetCondition, hiveext.ClusterReadyReason) @@ -5175,19 +5177,19 @@ spec: deployAgentClusterInstallCRD(ctx, kubeClient, aciSNOSpec, clusterDeploymentSpec.ClusterInstallRef.Name) deployInfraEnvCRD(ctx, kubeClient, infraNsName.Name, infraEnvSpec) infraEnvKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: infraNsName.Name, } infraEnv := getInfraEnvFromDBByKubeKey(ctx, db, infraEnvKey, waitForReconcileTimeout) configureLocalAgentClient(infraEnv.ID.String()) - host := registerNode(ctx, *infraEnv.ID, "hostname1", defaultCIDRv4) - ips := hostutil.GenerateIPv4Addresses(1, defaultCIDRv4) - generateFullMeshConnectivity(ctx, ips[0], host) + host := utils_test.TestContext.RegisterNode(ctx, *infraEnv.ID, "hostname1", utils_test.DefaultCIDRv4) + ips := hostutil.GenerateIPv4Addresses(1, utils_test.DefaultCIDRv4) + utils_test.TestContext.GenerateFullMeshConnectivity(ctx, ips[0], host) key := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: host.ID.String(), } - generateDomainResolution(ctx, host, clusterDeploymentSpec.ClusterName, "hive.example.com") + utils_test.TestContext.GenerateDomainResolution(ctx, host, clusterDeploymentSpec.ClusterName, "hive.example.com") By("Approve Agent") Eventually(func() error { agent := getAgentCRD(ctx, kubeClient, key) @@ -5195,7 +5197,7 @@ spec: return kubeClient.Update(ctx, agent) }, "30s", "10s").Should(BeNil()) installkey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterInstallRef.Name, } checkAgentClusterInstallCondition(ctx, installkey, hiveext.ClusterRequirementsMetCondition, hiveext.ClusterReadyReason) @@ -5232,16 +5234,16 @@ spec: deployAgentClusterInstallCRD(ctx, kubeClient, aciSpec, clusterDeploymentSpec.ClusterInstallRef.Name) deployInfraEnvCRD(ctx, kubeClient, infraNsName.Name, infraEnvSpec) clusterKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterName, } installkey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterInstallRef.Name, } checkAgentClusterInstallCondition(ctx, installkey, hiveext.ClusterRequirementsMetCondition, hiveext.ClusterNotReadyReason) infraEnvKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: infraNsName.Name, } infraEnv := getInfraEnvFromDBByKubeKey(ctx, db, infraEnvKey, waitForReconcileTimeout) @@ -5256,7 +5258,7 @@ spec: By("Register a Host and validate that an agent CR was created") configureLocalAgentClient(infraEnv.ID.String()) - registerNode(ctx, *infraEnv.ID, "hostname1", defaultCIDRv4) + utils_test.TestContext.RegisterNode(ctx, *infraEnv.ID, "hostname1", utils_test.DefaultCIDRv4) Eventually(func() int { return len(getClusterDeploymentAgents(ctx, kubeClient, clusterKey).Items) }, "2m", "2s").Should(Equal(1)) @@ -5293,7 +5295,7 @@ spec: deployInfraEnvCRD(ctx, kubeClient, infraNsName.Name, infraEnvSpec) deployAgentClusterInstallCRD(ctx, kubeClient, aciSpec, clusterDeploymentSpec.ClusterInstallRef.Name) clusterKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterName, } @@ -5318,11 +5320,11 @@ spec: deployInfraEnvCRD(ctx, kubeClient, infraNsName.Name, infraEnvSpec) deployAgentClusterInstallCRD(ctx, kubeClient, aciSpec, clusterDeploymentSpec.ClusterInstallRef.Name) clusterKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterName, } installkey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterInstallRef.Name, } cluster := getClusterFromDB(ctx, kubeClient, db, clusterKey, waitForReconcileTimeout) @@ -5352,7 +5354,7 @@ spec: deployAgentClusterInstallCRD(ctx, kubeClient, aciSpec, clusterDeploymentSpec.ClusterInstallRef.Name) installkey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterInstallRef.Name, } By("Verify InputError") @@ -5375,7 +5377,7 @@ spec: By("check ACI conditions") installkey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterInstallRef.Name, } checkAgentClusterInstallCondition(ctx, installkey, hiveext.ClusterSpecSyncedCondition, hiveext.ClusterSyncedOkReason) @@ -5396,7 +5398,7 @@ spec: By("check ACI conditions") installkey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterInstallRef.Name, } checkAgentClusterInstallCondition(ctx, installkey, hiveext.ClusterSpecSyncedCondition, hiveext.ClusterSyncedOkReason) @@ -5417,7 +5419,7 @@ spec: By("check ACI conditions") installkey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterInstallRef.Name, } checkAgentClusterInstallCondition(ctx, installkey, hiveext.ClusterSpecSyncedCondition, hiveext.ClusterSyncedOkReason) @@ -5434,7 +5436,7 @@ spec: deployAgentClusterInstallCRD(ctx, kubeClient, aciSpec, clusterDeploymentSpec.ClusterInstallRef.Name) installkey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterInstallRef.Name, } Eventually(func() bool { @@ -5455,7 +5457,7 @@ spec: deployAgentClusterInstallCRD(ctx, kubeClient, aciSpec, clusterDeploymentSpec.ClusterInstallRef.Name) installkey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterInstallRef.Name, } Eventually(func() bool { @@ -5517,7 +5519,7 @@ spec: aciSpec.Networking.NetworkType = models.ClusterCreateParamsNetworkTypeOVNKubernetes deployAgentClusterInstallCRD(ctx, kubeClient, aciSpec, clusterDeploymentSpec.ClusterInstallRef.Name) clusterKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterName, } Eventually(func() *common.Cluster { @@ -5527,7 +5529,7 @@ spec: By("Deploy infraenv") infraEnvKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: infraNsName.Name, } deployInfraEnvCRD(ctx, kubeClient, infraNsName.Name, infraEnvSpec) @@ -5541,28 +5543,28 @@ spec: Expect(infraEnv).ToNot(BeNil()) configureLocalAgentClient(infraEnv.ID.String()) - ips := hostutil.GenerateIPv4Addresses(5, defaultCIDRv4) + ips := hostutil.GenerateIPv4Addresses(5, utils_test.DefaultCIDRv4) hosts := make([]*models.Host, 0) for i := 0; i < 5; i++ { hostname := fmt.Sprintf("h%d", i) - hosts = append(hosts, registerNode(ctx, *infraEnv.ID, hostname, ips[i])) + hosts = append(hosts, utils_test.TestContext.RegisterNode(ctx, *infraEnv.ID, hostname, ips[i])) } for _, host := range hosts { checkAgentCondition(ctx, host.ID.String(), v1beta1.ValidatedCondition, v1beta1.ValidationsFailingReason) hostkey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: host.ID.String(), } agent := getAgentCRD(ctx, kubeClient, hostkey) Expect(agent.Status.ValidationsInfo).ToNot(BeNil()) } - generateFullMeshConnectivity(ctx, ips[0], hosts...) + utils_test.TestContext.GenerateFullMeshConnectivity(ctx, ips[0], hosts...) for _, h := range hosts { - generateDomainResolution(ctx, h, clusterDeploymentSpec.ClusterName, "hive.example.com") + utils_test.TestContext.GenerateDomainResolution(ctx, h, clusterDeploymentSpec.ClusterName, "hive.example.com") } // approving the agents so the infraenv could be deregistered successfully @@ -5572,7 +5574,7 @@ spec: for _, host := range hosts { hostkey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: host.ID.String(), } @@ -5609,7 +5611,7 @@ spec: }) }) -var _ = Describe("bmac reconcile flow", func() { +var _ = PDescribe("bmac reconcile flow", func() { // Disabled until MGMT-19596 is resolved if !Options.EnableKubeAPI { return } @@ -5630,27 +5632,27 @@ var _ = Describe("bmac reconcile flow", func() { infraNsName = types.NamespacedName{ Name: "infraenv", - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, } infraEnvSpec := getDefaultInfraEnvSpec(secretRef, clusterDeploymentSpec) deployInfraEnvCRD(ctx, kubeClient, infraNsName.Name, infraEnvSpec) infraEnvKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: infraNsName.Name, } infraEnv := getInfraEnvFromDBByKubeKey(ctx, db, infraEnvKey, waitForReconcileTimeout) configureLocalAgentClient(infraEnv.ID.String()) - host := registerNode(ctx, *infraEnv.ID, "hostname1", defaultCIDRv4) + host := utils_test.TestContext.RegisterNode(ctx, *infraEnv.ID, "hostname1", utils_test.DefaultCIDRv4) agentNsName = types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: host.ID.String(), } bmhSpec := metal3_v1alpha1.BareMetalHostSpec{} deployBMHCRD(ctx, kubeClient, host.ID.String(), &bmhSpec) bmhNsName = types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: createBMHCRDNameFromID(host.ID.String()), } }) @@ -5730,7 +5732,7 @@ var _ = Describe("bmac reconcile flow", func() { }) }) -var _ = Describe("PreprovisioningImage reconcile flow", func() { +var _ = PDescribe("PreprovisioningImage reconcile flow", func() { // Disabled until MGMT-19596 is resolved if !Options.EnableKubeAPI { return } @@ -5739,7 +5741,7 @@ var _ = Describe("PreprovisioningImage reconcile flow", func() { It("will correctly set the image url after an invalid infraenv is corrected", func() { infraNsName := types.NamespacedName{ Name: "infraenv", - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, } infraEnv := &v1beta1.InfraEnv{ TypeMeta: metav1.TypeMeta{ @@ -5747,7 +5749,7 @@ var _ = Describe("PreprovisioningImage reconcile flow", func() { APIVersion: getAPIVersion(), }, ObjectMeta: metav1.ObjectMeta{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: infraNsName.Name, Annotations: map[string]string{controllers.EnableIronicAgentAnnotation: "true"}, }, @@ -5759,7 +5761,7 @@ var _ = Describe("PreprovisioningImage reconcile flow", func() { Expect(kubeClient.Create(ctx, infraEnv)).To(Succeed()) ppiNsName := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: "test-image", } ppi := &metal3_v1alpha1.PreprovisioningImage{ @@ -5768,7 +5770,7 @@ var _ = Describe("PreprovisioningImage reconcile flow", func() { APIVersion: "metal3.io/v1alpha1", }, ObjectMeta: metav1.ObjectMeta{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: ppiNsName.Name, Labels: map[string]string{controllers.BMH_INFRA_ENV_LABEL: infraNsName.Name}, }, @@ -5795,7 +5797,7 @@ var _ = Describe("PreprovisioningImage reconcile flow", func() { // correct public key Eventually(func() error { infraEnv := getInfraEnvCRD(ctx, kubeClient, infraNsName) - infraEnv.Spec.SSHAuthorizedKey = sshPublicKey + infraEnv.Spec.SSHAuthorizedKey = utils_test.SshPublicKey return kubeClient.Update(ctx, infraEnv) }, "30s", "5s").Should(Succeed()) @@ -5832,23 +5834,23 @@ var _ = Describe("PreprovisioningImage reconcile flow", func() { infraNsName = types.NamespacedName{ Name: "infraenv", - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, } infraEnvSpec := getDefaultInfraEnvSpec(secretRef, clusterDeploymentSpec) deployInfraEnvCRD(ctx, kubeClient, infraNsName.Name, infraEnvSpec) infraEnvKey = types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: infraNsName.Name, } infraEnv := getInfraEnvFromDBByKubeKey(ctx, db, infraEnvKey, waitForReconcileTimeout) configureLocalAgentClient(infraEnv.ID.String()) - host := registerNode(ctx, *infraEnv.ID, "hostname1", defaultCIDRv4) + host := utils_test.TestContext.RegisterNode(ctx, *infraEnv.ID, "hostname1", utils_test.DefaultCIDRv4) ppiSpec := metal3_v1alpha1.PreprovisioningImageSpec{AcceptFormats: []metal3_v1alpha1.ImageFormat{metal3_v1alpha1.ImageFormatISO}} deployPPICRD(ctx, kubeClient, host.ID.String(), &ppiSpec) ppiNsName = types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: host.ID.String(), } }) @@ -5920,7 +5922,7 @@ var _ = Describe("PreprovisioningImage reconcile flow", func() { }) }) -var _ = Describe("restore Host by Agent flow", func() { +var _ = PDescribe("restore Host by Agent flow", func() { // Disabled until MGMT-19596 is resolved if !Options.EnableKubeAPI { return } @@ -5947,16 +5949,16 @@ var _ = Describe("restore Host by Agent flow", func() { deployClusterImageSetCRD(ctx, kubeClient, aciSNOSpec.ImageSetRef) clusterNsName = types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterName, } infraNsName = types.NamespacedName{ Name: "infraenv" + randomNameSuffix(), - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, } agentNsName = types.NamespacedName{ Name: "agent" + randomNameSuffix(), - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, } }) @@ -6034,7 +6036,7 @@ var _ = Describe("restore Host by Agent flow", func() { Eventually(func() error { agent := getAgentCRD(ctx, kubeClient, agentNsName) agent.Spec.ClusterDeploymentName = &v1beta1.ClusterReference{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterName, } return kubeClient.Update(ctx, agent) @@ -6078,7 +6080,7 @@ var _ = Describe("restore Host by Agent flow", func() { By("Verify cluster is in 'adding-hosts' status") clusterKey := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: clusterDeploymentSpec.ClusterName, } cluster := getClusterFromDB(ctx, kubeClient, db, clusterKey, waitForReconcileTimeout) @@ -6086,17 +6088,17 @@ var _ = Describe("restore Host by Agent flow", func() { Expect(*cluster.Status).Should(Equal(models.ClusterStatusAddingHosts)) By("Add Day 2 host") - ips := hostutil.GenerateIPv4Addresses(1, defaultCIDRv4) + ips := hostutil.GenerateIPv4Addresses(1, utils_test.DefaultCIDRv4) configureLocalAgentClient(infraEnv.ID.String()) - day2Host1 := registerNode(ctx, *infraEnv.ID, "firsthostnameday2", ips[0]) - generateApiVipPostStepReply(ctx, day2Host1, &cluster.Cluster, true) - generateFullMeshConnectivity(ctx, ips[0], day2Host1) - generateDomainResolution(ctx, day2Host1, clusterDeploymentSpec.ClusterName, "hive.example.com") - generateCommonDomainReply(ctx, day2Host1, clusterDeploymentSpec.ClusterName, clusterDeploymentSpec.BaseDomain) + day2Host1 := utils_test.TestContext.RegisterNode(ctx, *infraEnv.ID, "firsthostnameday2", ips[0]) + utils_test.TestContext.GenerateApiVipPostStepReply(ctx, day2Host1, &cluster.Cluster, true) + utils_test.TestContext.GenerateFullMeshConnectivity(ctx, ips[0], day2Host1) + utils_test.TestContext.GenerateDomainResolution(ctx, day2Host1, clusterDeploymentSpec.ClusterName, "hive.example.com") + utils_test.TestContext.GenerateCommonDomainReply(ctx, day2Host1, clusterDeploymentSpec.ClusterName, clusterDeploymentSpec.BaseDomain) By("Approve Day 2 agents") k1 := types.NamespacedName{ - Namespace: Options.Namespace, + Namespace: utils_test.Options.Namespace, Name: day2Host1.ID.String(), } Eventually(func() error { diff --git a/subsystem/manifests_test.go b/subsystem/manifests_test.go index b3828aa136d..902b7d9a7f8 100644 --- a/subsystem/manifests_test.go +++ b/subsystem/manifests_test.go @@ -18,6 +18,7 @@ import ( "github.com/openshift/assisted-service/internal/constants" "github.com/openshift/assisted-service/internal/usage" "github.com/openshift/assisted-service/models" + "github.com/openshift/assisted-service/subsystem/utils_test" ) var _ = Describe("manifests tests", func() { @@ -62,12 +63,12 @@ spec: ManifestSource: constants.ManifestSourceUserSupplied, } - registerClusterReply, err := userBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ + registerClusterReply, err := utils_test.TestContext.UserBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ NewClusterParams: &models.ClusterCreateParams{ Name: swag.String("test-cluster"), OpenshiftVersion: swag.String(openshiftVersion), PullSecret: swag.String(pullSecret), - SSHPublicKey: sshPublicKey, + SSHPublicKey: utils_test.SshPublicKey, BaseDNSDomain: "example.com", }, }) @@ -80,7 +81,7 @@ spec: var originalFilesAmount int By("List files before upload", func() { - response, err := userBMClient.Manifests.V2ListClusterManifests(ctx, &manifests.V2ListClusterManifestsParams{ + response, err := utils_test.TestContext.UserBMClient.Manifests.V2ListClusterManifests(ctx, &manifests.V2ListClusterManifestsParams{ ClusterID: *cluster.ID, }) Expect(err).ShouldNot(HaveOccurred()) @@ -88,7 +89,7 @@ spec: }) By("upload", func() { - response, err := userBMClient.Manifests.V2CreateClusterManifest(ctx, &manifests.V2CreateClusterManifestParams{ + response, err := utils_test.TestContext.UserBMClient.Manifests.V2CreateClusterManifest(ctx, &manifests.V2CreateClusterManifestParams{ ClusterID: *cluster.ID, CreateManifestParams: &models.CreateManifestParams{ Content: &base64Content, @@ -102,7 +103,7 @@ spec: }) By("List files after upload", func() { - response, err := userBMClient.Manifests.V2ListClusterManifests(ctx, &manifests.V2ListClusterManifestsParams{ + response, err := utils_test.TestContext.UserBMClient.Manifests.V2ListClusterManifests(ctx, &manifests.V2ListClusterManifestsParams{ ClusterID: *cluster.ID, }) @@ -123,7 +124,7 @@ spec: By("download", func() { buffer := new(bytes.Buffer) - _, err := userBMClient.Manifests.V2DownloadClusterManifest(ctx, &manifests.V2DownloadClusterManifestParams{ + _, err := utils_test.TestContext.UserBMClient.Manifests.V2DownloadClusterManifest(ctx, &manifests.V2DownloadClusterManifestParams{ ClusterID: *cluster.ID, FileName: manifestFile.FileName, Folder: &manifestFile.Folder, @@ -133,7 +134,7 @@ spec: }) By("update only content without rename", func() { - _, err := userBMClient.Manifests.V2UpdateClusterManifest(ctx, &manifests.V2UpdateClusterManifestParams{ + _, err := utils_test.TestContext.UserBMClient.Manifests.V2UpdateClusterManifest(ctx, &manifests.V2UpdateClusterManifestParams{ ClusterID: *cluster.ID, UpdateManifestParams: &models.UpdateManifestParams{ UpdatedContent: &base64UpdateContent, @@ -145,7 +146,7 @@ spec: }) By("List files after update", func() { - response, err := userBMClient.Manifests.V2ListClusterManifests(ctx, &manifests.V2ListClusterManifestsParams{ + response, err := utils_test.TestContext.UserBMClient.Manifests.V2ListClusterManifests(ctx, &manifests.V2ListClusterManifestsParams{ ClusterID: *cluster.ID, }) @@ -166,7 +167,7 @@ spec: By("download after update", func() { buffer := new(bytes.Buffer) - _, err := userBMClient.Manifests.V2DownloadClusterManifest(ctx, &manifests.V2DownloadClusterManifestParams{ + _, err := utils_test.TestContext.UserBMClient.Manifests.V2DownloadClusterManifest(ctx, &manifests.V2DownloadClusterManifestParams{ ClusterID: *cluster.ID, FileName: manifestFile.FileName, Folder: &manifestFile.Folder, @@ -176,7 +177,7 @@ spec: }) By("rename manifest", func() { - response, err := userBMClient.Manifests.V2UpdateClusterManifest(ctx, &manifests.V2UpdateClusterManifestParams{ + response, err := utils_test.TestContext.UserBMClient.Manifests.V2UpdateClusterManifest(ctx, &manifests.V2UpdateClusterManifestParams{ ClusterID: *cluster.ID, UpdateManifestParams: &models.UpdateManifestParams{ FileName: manifestFile.FileName, @@ -192,7 +193,7 @@ spec: By("delete", func() { fmt.Print("\nDelete\n") - _, err := userBMClient.Manifests.V2DeleteClusterManifest(ctx, &manifests.V2DeleteClusterManifestParams{ + _, err := utils_test.TestContext.UserBMClient.Manifests.V2DeleteClusterManifest(ctx, &manifests.V2DeleteClusterManifestParams{ ClusterID: *cluster.ID, FileName: renamedManifestFile.FileName, Folder: &renamedManifestFile.Folder, @@ -202,7 +203,7 @@ spec: By("List files after delete", func() { fmt.Print("\nList files after delete\n") - response, err := userBMClient.Manifests.V2ListClusterManifests(ctx, &manifests.V2ListClusterManifestsParams{ + response, err := utils_test.TestContext.UserBMClient.Manifests.V2ListClusterManifests(ctx, &manifests.V2ListClusterManifestsParams{ ClusterID: *cluster.ID, }) Expect(err).ShouldNot(HaveOccurred()) @@ -224,7 +225,7 @@ spec: var non_exiting_id = strfmt.UUID(uuid.New().String()) It("create manifest returns not found", func() { - _, err := userBMClient.Manifests.V2CreateClusterManifest(ctx, &manifests.V2CreateClusterManifestParams{ + _, err := utils_test.TestContext.UserBMClient.Manifests.V2CreateClusterManifest(ctx, &manifests.V2CreateClusterManifestParams{ ClusterID: non_exiting_id, CreateManifestParams: &models.CreateManifestParams{ Content: &base64Content, @@ -236,7 +237,7 @@ spec: }) It("update manifest returns not found", func() { - _, err := userBMClient.Manifests.V2UpdateClusterManifest(ctx, &manifests.V2UpdateClusterManifestParams{ + _, err := utils_test.TestContext.UserBMClient.Manifests.V2UpdateClusterManifest(ctx, &manifests.V2UpdateClusterManifestParams{ ClusterID: non_exiting_id, UpdateManifestParams: &models.UpdateManifestParams{ UpdatedContent: &base64Content, @@ -248,7 +249,7 @@ spec: }) It("list manifests returns not found", func() { - _, err := userBMClient.Manifests.V2ListClusterManifests(ctx, &manifests.V2ListClusterManifestsParams{ + _, err := utils_test.TestContext.UserBMClient.Manifests.V2ListClusterManifests(ctx, &manifests.V2ListClusterManifestsParams{ ClusterID: non_exiting_id, }) Expect(err).To(BeAssignableToTypeOf(manifests.NewV2ListClusterManifestsNotFound())) @@ -258,7 +259,7 @@ spec: It("download manifests returns not found", func() { buffer := new(bytes.Buffer) - _, err := userBMClient.Manifests.V2DownloadClusterManifest(ctx, &manifests.V2DownloadClusterManifestParams{ + _, err := utils_test.TestContext.UserBMClient.Manifests.V2DownloadClusterManifest(ctx, &manifests.V2DownloadClusterManifestParams{ ClusterID: non_exiting_id, FileName: manifestFile.FileName, Folder: &manifestFile.Folder, @@ -267,7 +268,7 @@ spec: }) It("delete manifests returns not found", func() { - _, err := userBMClient.Manifests.V2DeleteClusterManifest(ctx, &manifests.V2DeleteClusterManifestParams{ + _, err := utils_test.TestContext.UserBMClient.Manifests.V2DeleteClusterManifest(ctx, &manifests.V2DeleteClusterManifestParams{ ClusterID: non_exiting_id, FileName: manifestFile.FileName, Folder: &manifestFile.Folder, @@ -291,16 +292,16 @@ spec: By("install cluster", func() { registerHostsAndSetRoles(clusterID, *infraEnvID, minHosts, "test-cluster", "example.com") - reply, err := userBMClient.Installer.V2InstallCluster(context.Background(), &installer.V2InstallClusterParams{ClusterID: clusterID}) + reply, err := utils_test.TestContext.UserBMClient.Installer.V2InstallCluster(context.Background(), &installer.V2InstallClusterParams{ClusterID: clusterID}) Expect(err).NotTo(HaveOccurred()) c := reply.GetPayload() Expect(*c.Status).Should(Equal(models.ClusterStatusPreparingForInstallation)) - generateEssentialPrepareForInstallationSteps(ctx, c.Hosts...) - waitForLastInstallationCompletionStatus(clusterID, models.LastInstallationPreparationStatusSuccess) + utils_test.TestContext.GenerateEssentialPrepareForInstallationSteps(ctx, c.Hosts...) + utils_test.TestContext.WaitForLastInstallationCompletionStatus(clusterID, models.LastInstallationPreparationStatusSuccess) }) By("list manifests", func() { - response, err := userBMClient.Manifests.V2ListClusterManifests(ctx, &manifests.V2ListClusterManifestsParams{ + response, err := utils_test.TestContext.UserBMClient.Manifests.V2ListClusterManifests(ctx, &manifests.V2ListClusterManifestsParams{ ClusterID: clusterID, }) Expect(err).ShouldNot(HaveOccurred()) @@ -330,12 +331,12 @@ var _ = Describe("disk encryption", func() { By("cluster creation", func() { - registerClusterReply, err := userBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ + registerClusterReply, err := utils_test.TestContext.UserBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ NewClusterParams: &models.ClusterCreateParams{ Name: swag.String("test-cluster"), OpenshiftVersion: swag.String(openshiftVersion), PullSecret: swag.String(pullSecret), - SSHPublicKey: sshPublicKey, + SSHPublicKey: utils_test.SshPublicKey, BaseDNSDomain: "example.com", DiskEncryption: &models.DiskEncryption{ EnableOn: swag.String(models.DiskEncryptionEnableOnAll), @@ -354,7 +355,7 @@ var _ = Describe("disk encryption", func() { By("cluster update", func() { - updateClusterReply, err := userBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ + updateClusterReply, err := utils_test.TestContext.UserBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ ClusterUpdateParams: &models.V2ClusterUpdateParams{ DiskEncryption: &models.DiskEncryption{ EnableOn: swag.String(models.DiskEncryptionEnableOnMasters), @@ -568,12 +569,12 @@ spec: By("register cluster", func() { - registerClusterReply, err := userBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ + registerClusterReply, err := utils_test.TestContext.UserBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ NewClusterParams: &models.ClusterCreateParams{ Name: swag.String("test-cluster"), OpenshiftVersion: swag.String(openshiftVersion), PullSecret: swag.String(pullSecret), - SSHPublicKey: sshPublicKey, + SSHPublicKey: utils_test.SshPublicKey, BaseDNSDomain: "example.com", DiskEncryption: t.diskEncryption, }, @@ -585,11 +586,11 @@ spec: By("install cluster", func() { infraEnvID := registerInfraEnv(&clusterID, models.ImageTypeMinimalIso).ID registerHostsAndSetRoles(clusterID, *infraEnvID, minHosts, "test-cluster", "example.com") - reply, err := userBMClient.Installer.V2InstallCluster(ctx, &installer.V2InstallClusterParams{ClusterID: clusterID}) + reply, err := utils_test.TestContext.UserBMClient.Installer.V2InstallCluster(ctx, &installer.V2InstallClusterParams{ClusterID: clusterID}) Expect(err).NotTo(HaveOccurred()) c := reply.GetPayload() - generateEssentialPrepareForInstallationSteps(ctx, c.Hosts...) - waitForLastInstallationCompletionStatus(clusterID, models.LastInstallationPreparationStatusSuccess) + utils_test.TestContext.GenerateEssentialPrepareForInstallationSteps(ctx, c.Hosts...) + utils_test.TestContext.WaitForLastInstallationCompletionStatus(clusterID, models.LastInstallationPreparationStatusSuccess) }) By("verify manifests", func() { @@ -597,7 +598,7 @@ spec: for i, manifestName := range t.expectedManifestsNames { manifest := &bytes.Buffer{} - _, err := userBMClient.Manifests.V2DownloadClusterManifest(ctx, &manifests.V2DownloadClusterManifestParams{ + _, err := utils_test.TestContext.UserBMClient.Manifests.V2DownloadClusterManifest(ctx, &manifests.V2DownloadClusterManifestParams{ ClusterID: clusterID, FileName: manifestName, Folder: &openshiftFolder, @@ -617,12 +618,12 @@ spec: }) func verifyUsage(set bool, clusterID strfmt.UUID) { - getReply, err := userBMClient.Installer.V2GetCluster(context.TODO(), installer.NewV2GetClusterParams().WithClusterID(clusterID)) + getReply, err := utils_test.TestContext.UserBMClient.Installer.V2GetCluster(context.TODO(), installer.NewV2GetClusterParams().WithClusterID(clusterID)) Expect(err).ToNot(HaveOccurred()) c := &common.Cluster{Cluster: *getReply.Payload} if set { - verifyUsageSet(c.FeatureUsage, models.Usage{Name: usage.CustomManifest}) + utils_test.VerifyUsageSet(c.FeatureUsage, models.Usage{Name: usage.CustomManifest}) } else { - verifyUsageNotSet(c.FeatureUsage, usage.CustomManifest) + utils_test.VerifyUsageNotSet(c.FeatureUsage, usage.CustomManifest) } } diff --git a/subsystem/metrics_test.go b/subsystem/metrics_test.go index 80d14e93649..ada01d9a940 100644 --- a/subsystem/metrics_test.go +++ b/subsystem/metrics_test.go @@ -23,6 +23,7 @@ import ( "github.com/openshift/assisted-service/internal/host/hostutil" "github.com/openshift/assisted-service/models" "k8s.io/apimachinery/pkg/util/wait" + "github.com/openshift/assisted-service/subsystem/utils_test" ) const ( @@ -56,7 +57,7 @@ type clusterValidationResult struct { func isHostValidationInStatus(clusterID, infraEnvID, hostID strfmt.UUID, validationID models.HostValidationID, expectedStatus string) (bool, error) { var validationRes map[string][]hostValidationResult - h := getHostV2(infraEnvID, hostID) + h := utils_test.TestContext.GetHostV2(infraEnvID, hostID) if h.ValidationsInfo == "" { return false, nil } @@ -75,7 +76,7 @@ func isHostValidationInStatus(clusterID, infraEnvID, hostID strfmt.UUID, validat func isClusterValidationInStatus(clusterID strfmt.UUID, validationID models.ClusterValidationID, expectedStatus string) (bool, error) { var validationRes map[string][]clusterValidationResult - c := getCluster(clusterID) + c := utils_test.TestContext.GetCluster(clusterID) if c.ValidationsInfo == "" { return false, nil } @@ -181,7 +182,7 @@ func getMetricRecord(name string) (string, error) { } func getMetricEvents(ctx context.Context, clusterID strfmt.UUID) []*models.Event { - eventsReply, err := userBMClient.Events.V2ListEvents(ctx, &events.V2ListEventsParams{ + eventsReply, err := utils_test.TestContext.UserBMClient.Events.V2ListEvents(ctx, &events.V2ListEventsParams{ ClusterID: &clusterID, Categories: []string{"metrics"}, }) @@ -201,7 +202,7 @@ func filterMetricEvents(in []*models.Event, hostID strfmt.UUID, message string) func assertHostValidationEvent(ctx context.Context, clusterID strfmt.UUID, hostName string, validationID models.HostValidationID, isFailure bool) { - eventsReply, err := userBMClient.Events.V2ListEvents(ctx, &events.V2ListEventsParams{ + eventsReply, err := utils_test.TestContext.UserBMClient.Events.V2ListEvents(ctx, &events.V2ListEventsParams{ ClusterID: &clusterID, }) Expect(err).NotTo(HaveOccurred()) @@ -223,7 +224,7 @@ func assertHostValidationEvent(ctx context.Context, clusterID strfmt.UUID, hostN func assertClusterValidationEvent(ctx context.Context, clusterID strfmt.UUID, validationID models.ClusterValidationID, isFailure bool) { - eventsReply, err := userBMClient.Events.V2ListEvents(ctx, &events.V2ListEventsParams{ + eventsReply, err := utils_test.TestContext.UserBMClient.Events.V2ListEvents(ctx, &events.V2ListEventsParams{ ClusterID: &clusterID, }) Expect(err).NotTo(HaveOccurred()) @@ -245,7 +246,7 @@ func assertClusterValidationEvent(ctx context.Context, clusterID strfmt.UUID, va func assertNoValidationEvent(ctx context.Context, clusterID strfmt.UUID, hostName string, validationID models.HostValidationID) { - eventsReply, err := userBMClient.Events.V2ListEvents(ctx, &events.V2ListEventsParams{ + eventsReply, err := utils_test.TestContext.UserBMClient.Events.V2ListEvents(ctx, &events.V2ListEventsParams{ ClusterID: &clusterID, }) Expect(err).NotTo(HaveOccurred()) @@ -263,7 +264,7 @@ func assertNoValidationEvent(ctx context.Context, clusterID strfmt.UUID, hostNam func v2RegisterDay2Cluster(ctx context.Context) strfmt.UUID { openshiftClusterID := strfmt.UUID(uuid.New().String()) - c, err := userBMClient.Installer.V2ImportCluster(ctx, &installer.V2ImportClusterParams{ + c, err := utils_test.TestContext.UserBMClient.Installer.V2ImportCluster(ctx, &installer.V2ImportClusterParams{ NewImportClusterParams: &models.ImportClusterParams{ Name: swag.String("test-metrics-day2-cluster"), OpenshiftVersion: openshiftVersion, @@ -274,7 +275,7 @@ func v2RegisterDay2Cluster(ctx context.Context) strfmt.UUID { Expect(err).NotTo(HaveOccurred()) clusterID := *c.GetPayload().ID - _, err = userBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ + _, err = utils_test.TestContext.UserBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ ClusterUpdateParams: &models.V2ClusterUpdateParams{ PullSecret: swag.String(pullSecret), }, @@ -287,7 +288,7 @@ func v2RegisterDay2Cluster(ctx context.Context) strfmt.UUID { func metricsDeregisterCluster(ctx context.Context, clusterID strfmt.UUID) { - _, err := userBMClient.Installer.V2DeregisterCluster(ctx, &installer.V2DeregisterClusterParams{ + _, err := utils_test.TestContext.UserBMClient.Installer.V2DeregisterCluster(ctx, &installer.V2DeregisterClusterParams{ ClusterID: clusterID, }) Expect(err).NotTo(HaveOccurred()) @@ -322,7 +323,7 @@ var _ = Describe("Metrics tests", func() { BeforeEach(func() { var err error - clusterID, err = registerCluster(ctx, userBMClient, "test-cluster", pullSecret) + clusterID, err =utils_test.TestContext.RegisterCluster(ctx, utils_test.TestContext.UserBMClient, "test-cluster", pullSecret) Expect(err).NotTo(HaveOccurred()) infraEnvID = registerInfraEnv(&clusterID, models.ImageTypeMinimalIso).ID }) @@ -342,7 +343,7 @@ var _ = Describe("Metrics tests", func() { registerHostsAndSetRoles(clusterID, *infraEnvID, 3, "test-cluster", "example.com") c = installCluster(clusterID) for _, host := range c.Hosts { - waitForHostState(ctx, "installing", defaultWaitForHostStateTimeout, host) + waitForHostState(ctx, "installing", utils_test.DefaultWaitForHostStateTimeout, host) if host.Bootstrap { bootstrap = *host } @@ -366,7 +367,7 @@ var _ = Describe("Metrics tests", func() { t := tests[i] It(t.name, func() { //move the bootstrap host to the desired state - updateProgress(*bootstrap.ID, *infraEnvID, t.dstStage) + utils_test.TestContext.UpdateProgress(*bootstrap.ID, *infraEnvID, t.dstStage) //read metrics events evs := getMetricEvents(context.TODO(), clusterID) @@ -405,7 +406,7 @@ var _ = Describe("Metrics tests", func() { It("'connected' failed before reboot", func() { // create a validation success - h := ®isterHost(*infraEnvID).Host + h := &utils_test.TestContext.RegisterHost(*infraEnvID).Host waitForHostValidationStatus(clusterID, *infraEnvID, *h.ID, "success", models.HostValidationIDConnected) oldChangedMetricCounter := getValidationMetricCounter(string(models.HostValidationIDConnected), hostValidationChangedMetric) @@ -429,7 +430,7 @@ var _ = Describe("Metrics tests", func() { It("'connected' failed after reboot", func() { // create a validation success - h := ®isterHost(*infraEnvID).Host + h := &utils_test.TestContext.RegisterHost(*infraEnvID).Host waitForHostValidationStatus(clusterID, *infraEnvID, *h.ID, "success", models.HostValidationIDConnected) // create a validation failure @@ -450,7 +451,7 @@ var _ = Describe("Metrics tests", func() { It("'connected' got fixed", func() { // create a validation failure - h := ®isterHost(*infraEnvID).Host + h := &utils_test.TestContext.RegisterHost(*infraEnvID).Host checkedInAt := time.Now().Add(-2 * MaxHostDisconnectionTime) err := db.Model(h).UpdateColumns(&models.Host{CheckedInAt: strfmt.DateTime(checkedInAt)}).Error Expect(err).NotTo(HaveOccurred()) @@ -474,7 +475,7 @@ var _ = Describe("Metrics tests", func() { oldFailedMetricCounter := getValidationMetricCounter(string(models.HostValidationIDHasInventory), hostValidationFailedMetric) // create a validation failure - h := ®isterHost(*infraEnvID).Host + h := &utils_test.TestContext.RegisterHost(*infraEnvID).Host waitForHostValidationStatus(clusterID, *infraEnvID, *h.ID, "failure", models.HostValidationIDHasInventory) // check generated metrics @@ -485,11 +486,11 @@ var _ = Describe("Metrics tests", func() { It("'has-inventory' got fixed", func() { // create a validation failure - h := ®isterHost(*infraEnvID).Host + h := &utils_test.TestContext.RegisterHost(*infraEnvID).Host waitForHostValidationStatus(clusterID, *infraEnvID, *h.ID, "failure", models.HostValidationIDHasInventory) // create a validation success - generateHWPostStepReply(ctx, h, validHwInfo, "master-0") + utils_test.TestContext.GenerateHWPostStepReply(ctx, h, validHwInfo, "master-0") waitForHostValidationStatus(clusterID, *infraEnvID, *h.ID, "success", models.HostValidationIDHasInventory) // check generated events @@ -499,7 +500,7 @@ var _ = Describe("Metrics tests", func() { It("'has-min-hw-capacity' failed", func() { // create a validation success - h := ®isterHost(*infraEnvID).Host + h := &utils_test.TestContext.RegisterHost(*infraEnvID).Host err := db.Model(h).UpdateColumns(&models.Host{Inventory: generateValidInventory(), Status: &hostStatusInsufficient}).Error Expect(err).NotTo(HaveOccurred()) waitForHostValidationStatus(clusterID, *infraEnvID, *h.ID, "success", @@ -530,7 +531,7 @@ var _ = Describe("Metrics tests", func() { Interfaces: []*models.Interface{{IPV4Addresses: []string{"1.2.3.4/24"}}}, Routes: common.TestDefaultRouteConfiguration, } - generateHWPostStepReply(ctx, h, nonValidInventory, "master-0") + utils_test.TestContext.GenerateHWPostStepReply(ctx, h, nonValidInventory, "master-0") waitForHostValidationStatus(clusterID, *infraEnvID, *h.ID, "failure", models.HostValidationIDHasMinCPUCores, models.HostValidationIDHasMinMemory, @@ -563,7 +564,7 @@ var _ = Describe("Metrics tests", func() { It("'has-min-hw-capacity' got fixed", func() { // create a validation failure - h := ®isterHost(*infraEnvID).Host + h := &utils_test.TestContext.RegisterHost(*infraEnvID).Host nonValidInventory := &models.Inventory{ CPU: &models.CPU{Count: 1}, Memory: &models.Memory{PhysicalBytes: int64(4 * units.GiB), UsableBytes: int64(4 * units.GiB)}, @@ -572,7 +573,7 @@ var _ = Describe("Metrics tests", func() { Interfaces: []*models.Interface{{IPV4Addresses: []string{"1.2.3.4/24"}}}, Routes: common.TestDefaultRouteConfiguration, } - generateHWPostStepReply(ctx, h, nonValidInventory, "master-0") + utils_test.TestContext.GenerateHWPostStepReply(ctx, h, nonValidInventory, "master-0") waitForHostValidationStatus(clusterID, *infraEnvID, *h.ID, "failure", models.HostValidationIDHasMinCPUCores, models.HostValidationIDHasMinMemory, @@ -581,7 +582,7 @@ var _ = Describe("Metrics tests", func() { models.HostValidationIDHasMemoryForRole) // create a validation success - generateHWPostStepReply(ctx, h, validHwInfo, "master-0") + utils_test.TestContext.GenerateHWPostStepReply(ctx, h, validHwInfo, "master-0") waitForHostValidationStatus(clusterID, *infraEnvID, *h.ID, "success", models.HostValidationIDHasMinCPUCores, models.HostValidationIDHasMinMemory, @@ -606,7 +607,7 @@ var _ = Describe("Metrics tests", func() { oldFailedMetricCounter := getValidationMetricCounter(string(models.HostValidationIDMachineCidrDefined), hostValidationFailedMetric) // create a validation failure - h := ®isterHost(*infraEnvID).Host + h := &utils_test.TestContext.RegisterHost(*infraEnvID).Host waitForHostValidationStatus(clusterID, *infraEnvID, *h.ID, "failure", models.HostValidationIDMachineCidrDefined) // check generated metrics @@ -617,11 +618,11 @@ var _ = Describe("Metrics tests", func() { It("'machine-cidr-defined' got fixed", func() { // create a validation failure - h := ®isterHost(*infraEnvID).Host + h := &utils_test.TestContext.RegisterHost(*infraEnvID).Host waitForHostValidationStatus(clusterID, *infraEnvID, *h.ID, "failure", models.HostValidationIDMachineCidrDefined) // create a validation success - generateHWPostStepReply(ctx, h, validHwInfo, "master-0") + utils_test.TestContext.GenerateHWPostStepReply(ctx, h, validHwInfo, "master-0") waitForHostValidationStatus(clusterID, *infraEnvID, *h.ID, "success", models.HostValidationIDMachineCidrDefined) // check generated events @@ -631,10 +632,10 @@ var _ = Describe("Metrics tests", func() { It("'hostname-unique' failed", func() { // create a validation success - h1 := ®isterHost(*infraEnvID).Host - h2 := ®isterHost(*infraEnvID).Host - generateHWPostStepReply(ctx, h1, validHwInfo, "master-0") - generateHWPostStepReply(ctx, h2, validHwInfo, "master-1") + h1 := &utils_test.TestContext.RegisterHost(*infraEnvID).Host + h2 := &utils_test.TestContext.RegisterHost(*infraEnvID).Host + utils_test.TestContext.GenerateHWPostStepReply(ctx, h1, validHwInfo, "master-0") + utils_test.TestContext.GenerateHWPostStepReply(ctx, h2, validHwInfo, "master-1") waitForHostValidationStatus(clusterID, *infraEnvID, *h1.ID, "success", models.HostValidationIDHostnameUnique) waitForHostValidationStatus(clusterID, *infraEnvID, *h2.ID, "success", models.HostValidationIDHostnameUnique) @@ -642,8 +643,8 @@ var _ = Describe("Metrics tests", func() { oldFailedMetricCounter := getValidationMetricCounter(string(models.HostValidationIDHostnameUnique), hostValidationFailedMetric) // create a validation failure - generateHWPostStepReply(ctx, h1, validHwInfo, "nonUniqName") - generateHWPostStepReply(ctx, h2, validHwInfo, "nonUniqName") + utils_test.TestContext.GenerateHWPostStepReply(ctx, h1, validHwInfo, "nonUniqName") + utils_test.TestContext.GenerateHWPostStepReply(ctx, h2, validHwInfo, "nonUniqName") waitForHostValidationStatus(clusterID, *infraEnvID, *h1.ID, "failure", models.HostValidationIDHostnameUnique) waitForHostValidationStatus(clusterID, *infraEnvID, *h2.ID, "failure", models.HostValidationIDHostnameUnique) @@ -660,15 +661,15 @@ var _ = Describe("Metrics tests", func() { It("'hostname-unique' got fixed", func() { // create a validation failure - h1 := ®isterHost(*infraEnvID).Host - h2 := ®isterHost(*infraEnvID).Host - generateHWPostStepReply(ctx, h1, validHwInfo, "master-0") - generateHWPostStepReply(ctx, h2, validHwInfo, "master-0") + h1 := &utils_test.TestContext.RegisterHost(*infraEnvID).Host + h2 := &utils_test.TestContext.RegisterHost(*infraEnvID).Host + utils_test.TestContext.GenerateHWPostStepReply(ctx, h1, validHwInfo, "master-0") + utils_test.TestContext.GenerateHWPostStepReply(ctx, h2, validHwInfo, "master-0") waitForHostValidationStatus(clusterID, *infraEnvID, *h1.ID, "failure", models.HostValidationIDHostnameUnique) waitForHostValidationStatus(clusterID, *infraEnvID, *h2.ID, "failure", models.HostValidationIDHostnameUnique) // create a validation success - generateHWPostStepReply(ctx, h2, validHwInfo, "master-1") + utils_test.TestContext.GenerateHWPostStepReply(ctx, h2, validHwInfo, "master-1") waitForHostValidationStatus(clusterID, *infraEnvID, *h1.ID, "success", models.HostValidationIDHostnameUnique) waitForHostValidationStatus(clusterID, *infraEnvID, *h2.ID, "success", models.HostValidationIDHostnameUnique) @@ -680,8 +681,8 @@ var _ = Describe("Metrics tests", func() { It("'hostname-valid' failed", func() { // create a validation success - h := ®isterHost(*infraEnvID).Host - generateHWPostStepReply(ctx, h, validHwInfo, "master-0") + h := &utils_test.TestContext.RegisterHost(*infraEnvID).Host + utils_test.TestContext.GenerateHWPostStepReply(ctx, h, validHwInfo, "master-0") waitForHostValidationStatus(clusterID, *infraEnvID, *h.ID, "success", models.HostValidationIDHostnameValid) oldChangedMetricCounter := getValidationMetricCounter(string(models.HostValidationIDHostnameValid), hostValidationChangedMetric) @@ -689,7 +690,7 @@ var _ = Describe("Metrics tests", func() { // create a validation failure // 'localhost' is a forbidden host name - generateHWPostStepReply(ctx, h, validHwInfo, "localhost") + utils_test.TestContext.GenerateHWPostStepReply(ctx, h, validHwInfo, "localhost") waitForHostValidationStatus(clusterID, *infraEnvID, *h.ID, "failure", models.HostValidationIDHostnameValid) // check generated events @@ -704,13 +705,13 @@ var _ = Describe("Metrics tests", func() { It("'hostname-valid' got fixed", func() { // create a validation failure - h := ®isterHost(*infraEnvID).Host + h := &utils_test.TestContext.RegisterHost(*infraEnvID).Host // 'localhost' is a forbidden host name - generateHWPostStepReply(ctx, h, validHwInfo, "localhost") + utils_test.TestContext.GenerateHWPostStepReply(ctx, h, validHwInfo, "localhost") waitForHostValidationStatus(clusterID, *infraEnvID, *h.ID, "failure", models.HostValidationIDHostnameValid) // create a validation success - generateHWPostStepReply(ctx, h, validHwInfo, "master-0") + utils_test.TestContext.GenerateHWPostStepReply(ctx, h, validHwInfo, "master-0") waitForHostValidationStatus(clusterID, *infraEnvID, *h.ID, "success", models.HostValidationIDHostnameValid) // check generated events @@ -720,7 +721,7 @@ var _ = Describe("Metrics tests", func() { It("'belongs-to-machine-cidr' failed", func() { // create a validation success - h := ®isterHost(*infraEnvID).Host + h := &utils_test.TestContext.RegisterHost(*infraEnvID).Host err := db.Model(h).UpdateColumns(&models.Host{Inventory: generateValidInventoryWithInterface("1.2.3.4/24")}).Error Expect(err).NotTo(HaveOccurred()) waitForHostValidationStatus(clusterID, *infraEnvID, *h.ID, "success", models.HostValidationIDBelongsToMachineCidr) @@ -746,7 +747,7 @@ var _ = Describe("Metrics tests", func() { It("'belongs-to-machine-cidr' got fixed", func() { // create a validation failure - h := ®isterHost(*infraEnvID).Host + h := &utils_test.TestContext.RegisterHost(*infraEnvID).Host err := db.Model(h).UpdateColumns(&models.Host{Inventory: generateValidInventoryWithInterface("1.2.3.4/24")}).Error Expect(err).NotTo(HaveOccurred()) waitForHostValidationStatus(clusterID, *infraEnvID, *h.ID, "success", models.HostValidationIDBelongsToMachineCidr) @@ -770,15 +771,15 @@ var _ = Describe("Metrics tests", func() { day2InfraEnvID := registerInfraEnv(&day2ClusterID, models.ImageTypeMinimalIso).ID // create a validation success - h := registerNode(ctx, *day2InfraEnvID, "master-0", defaultCIDRv4) - generateApiVipPostStepReply(ctx, h, nil, true) + h := utils_test.TestContext.RegisterNode(ctx, *day2InfraEnvID, "master-0", defaultCIDRv4) + utils_test.TestContext.GenerateApiVipPostStepReply(ctx, h, nil, true) waitForHostValidationStatus(day2ClusterID, *day2InfraEnvID, *h.ID, "success", models.HostValidationIDIgnitionDownloadable) oldChangedMetricCounter := getValidationMetricCounter(string(models.HostValidationIDIgnitionDownloadable), hostValidationChangedMetric) oldFailedMetricCounter := getValidationMetricCounter(string(models.HostValidationIDIgnitionDownloadable), hostValidationFailedMetric) // create a validation failure - generateApiVipPostStepReply(ctx, h, nil, false) + utils_test.TestContext.GenerateApiVipPostStepReply(ctx, h, nil, false) waitForHostValidationStatus(day2ClusterID, *day2InfraEnvID, *h.ID, "failure", models.HostValidationIDIgnitionDownloadable) // check generated events @@ -796,12 +797,12 @@ var _ = Describe("Metrics tests", func() { day2InfraEnvID := registerInfraEnv(&day2ClusterID, models.ImageTypeMinimalIso).ID // create a validation failure - h := registerNode(ctx, *day2InfraEnvID, "master-0", defaultCIDRv4) - generateApiVipPostStepReply(ctx, h, nil, false) + h := utils_test.TestContext.RegisterNode(ctx, *day2InfraEnvID, "master-0", defaultCIDRv4) + utils_test.TestContext.GenerateApiVipPostStepReply(ctx, h, nil, false) waitForHostValidationStatus(day2ClusterID, *day2InfraEnvID, *h.ID, "failure", models.HostValidationIDIgnitionDownloadable) // create a validation success - generateApiVipPostStepReply(ctx, h, nil, true) + utils_test.TestContext.GenerateApiVipPostStepReply(ctx, h, nil, true) waitForHostValidationStatus(day2ClusterID, *day2InfraEnvID, *h.ID, "success", models.HostValidationIDIgnitionDownloadable) // check generated events @@ -812,10 +813,10 @@ var _ = Describe("Metrics tests", func() { ips := hostutil.GenerateIPv4Addresses(4, defaultCIDRv4) // create a validation success - h1 := registerNode(ctx, *infraEnvID, "h1", ips[0]) - h2 := registerNode(ctx, *infraEnvID, "h2", ips[1]) - h3 := registerNode(ctx, *infraEnvID, "h3", ips[2]) - h4 := registerNode(ctx, *infraEnvID, "h4", ips[3]) + h1 := utils_test.TestContext.RegisterNode(ctx, *infraEnvID, "h1", ips[0]) + h2 := utils_test.TestContext.RegisterNode(ctx, *infraEnvID, "h2", ips[1]) + h3 := utils_test.TestContext.RegisterNode(ctx, *infraEnvID, "h3", ips[2]) + h4 := utils_test.TestContext.RegisterNode(ctx, *infraEnvID, "h4", ips[3]) generateFullMeshConnectivity(ctx, ips[0], h1, h2, h3, h4) waitForHostValidationStatus(clusterID, *infraEnvID, *h1.ID, "success", models.HostValidationIDBelongsToMajorityGroup) @@ -842,10 +843,10 @@ var _ = Describe("Metrics tests", func() { It("'belongs-to-majority-group' got fixed", func() { ips := hostutil.GenerateIPv4Addresses(4, defaultCIDRv4) // create a validation failure - h1 := registerNode(ctx, *infraEnvID, "h1", ips[0]) - h2 := registerNode(ctx, *infraEnvID, "h2", ips[1]) - h3 := registerNode(ctx, *infraEnvID, "h3", ips[2]) - h4 := registerNode(ctx, *infraEnvID, "h4", ips[3]) + h1 := utils_test.TestContext.RegisterNode(ctx, *infraEnvID, "h1", ips[0]) + h2 := utils_test.TestContext.RegisterNode(ctx, *infraEnvID, "h2", ips[1]) + h3 := utils_test.TestContext.RegisterNode(ctx, *infraEnvID, "h3", ips[2]) + h4 := utils_test.TestContext.RegisterNode(ctx, *infraEnvID, "h4", ips[3]) generateFullMeshConnectivity(ctx, ips[0], h2, h3, h4) waitForHostValidationStatus(clusterID, *infraEnvID, *h1.ID, "failure", models.HostValidationIDBelongsToMajorityGroup) @@ -860,15 +861,15 @@ var _ = Describe("Metrics tests", func() { It("'time sync' failed", func() { // create a validation success - h := ®isterHost(*infraEnvID).Host - generateGetNextStepsWithTimestamp(ctx, h, time.Now().Unix()) + h := &utils_test.TestContext.RegisterHost(*infraEnvID).Host + utils_test.TestContext.GenerateGetNextStepsWithTimestamp(ctx, h, time.Now().Unix()) waitForHostValidationStatus(clusterID, *infraEnvID, *h.ID, "success", models.HostValidationIDTimeSyncedBetweenHostAndService) oldChangedMetricCounter := getValidationMetricCounter(string(models.HostValidationIDTimeSyncedBetweenHostAndService), hostValidationChangedMetric) oldFailedMetricCounter := getValidationMetricCounter(string(models.HostValidationIDTimeSyncedBetweenHostAndService), hostValidationFailedMetric) // create a validation failure - generateGetNextStepsWithTimestamp(ctx, h, time.Now().Add(-2*time.Hour).Unix()) + utils_test.TestContext.GenerateGetNextStepsWithTimestamp(ctx, h, time.Now().Add(-2*time.Hour).Unix()) waitForHostValidationStatus(clusterID, *infraEnvID, *h.ID, "failure", models.HostValidationIDTimeSyncedBetweenHostAndService) // check generated metrics @@ -880,12 +881,12 @@ var _ = Describe("Metrics tests", func() { It("'time sync' got fixed", func() { // create a validation failure - h := ®isterHost(*infraEnvID).Host - generateGetNextStepsWithTimestamp(ctx, h, time.Now().Add(65*time.Minute).Unix()) + h := &utils_test.TestContext.RegisterHost(*infraEnvID).Host + utils_test.TestContext.GenerateGetNextStepsWithTimestamp(ctx, h, time.Now().Add(65*time.Minute).Unix()) waitForHostValidationStatus(clusterID, *infraEnvID, *h.ID, "failure", models.HostValidationIDTimeSyncedBetweenHostAndService) // create a validation success - generateGetNextStepsWithTimestamp(ctx, h, time.Now().Unix()) + utils_test.TestContext.GenerateGetNextStepsWithTimestamp(ctx, h, time.Now().Unix()) waitForHostValidationStatus(clusterID, *infraEnvID, *h.ID, "success", models.HostValidationIDTimeSyncedBetweenHostAndService) // check generated events @@ -895,15 +896,15 @@ var _ = Describe("Metrics tests", func() { It("'ntp-synced' failed", func() { // create a validation success - h := ®isterHost(*infraEnvID).Host - generateNTPPostStepReply(ctx, h, []*models.NtpSource{common.TestNTPSourceSynced}) + h := &utils_test.TestContext.RegisterHost(*infraEnvID).Host + utils_test.TestContext.GenerateNTPPostStepReply(ctx, h, []*models.NtpSource{common.TestNTPSourceSynced}) waitForHostValidationStatus(clusterID, *infraEnvID, *h.ID, "success", models.HostValidationIDNtpSynced) oldChangedMetricCounter := getValidationMetricCounter(string(models.HostValidationIDNtpSynced), hostValidationChangedMetric) oldFailedMetricCounter := getValidationMetricCounter(string(models.HostValidationIDNtpSynced), hostValidationFailedMetric) // create a validation failure - generateNTPPostStepReply(ctx, h, nil) + utils_test.TestContext.GenerateNTPPostStepReply(ctx, h, nil) waitForHostValidationStatus(clusterID, *infraEnvID, *h.ID, "failure", models.HostValidationIDNtpSynced) // check generated events @@ -918,12 +919,12 @@ var _ = Describe("Metrics tests", func() { It("'ntp-synced' got fixed", func() { // create a validation failure - h := ®isterHost(*infraEnvID).Host - generateNTPPostStepReply(ctx, h, nil) + h := &utils_test.TestContext.RegisterHost(*infraEnvID).Host + utils_test.TestContext.GenerateNTPPostStepReply(ctx, h, nil) waitForHostValidationStatus(clusterID, *infraEnvID, *h.ID, "failure", models.HostValidationIDNtpSynced) // create a validation success - generateNTPPostStepReply(ctx, h, []*models.NtpSource{common.TestNTPSourceSynced}) + utils_test.TestContext.GenerateNTPPostStepReply(ctx, h, []*models.NtpSource{common.TestNTPSourceSynced}) waitForHostValidationStatus(clusterID, *infraEnvID, *h.ID, "success", models.HostValidationIDNtpSynced) // check generated events @@ -934,7 +935,7 @@ var _ = Describe("Metrics tests", func() { Context("Cluster validation metrics", func() { removeHost := func(host *models.Host) { - _, err := userBMClient.Installer.V2DeregisterHost(ctx, &installer.V2DeregisterHostParams{ + _, err := utils_test.TestContext.UserBMClient.Installer.V2DeregisterHost(ctx, &installer.V2DeregisterHostParams{ InfraEnvID: host.InfraEnvID, HostID: *host.ID, }) @@ -944,10 +945,10 @@ var _ = Describe("Metrics tests", func() { It("'all-hosts-are-ready-to-install' failed", func() { // create a validation success - hosts, _ := register3nodes(ctx, clusterID, *infraEnvID, defaultCIDRv4) - c := getCluster(clusterID) + hosts, _ := utils_test.TestContext.Register3nodes(ctx, clusterID, *infraEnvID, defaultCIDRv4) + c := utils_test.TestContext.GetCluster(clusterID) for _, h := range c.Hosts { - generateDomainResolution(ctx, h, "test-cluster", "example.com") + utils_test.TestContext.GenerateDomainResolution(ctx, h, "test-cluster", "example.com") } waitForClusterValidationStatus(clusterID, "success", models.ClusterValidationIDAllHostsAreReadyToInstall) @@ -955,8 +956,8 @@ var _ = Describe("Metrics tests", func() { oldFailedMetricCounter := getValidationMetricCounter(string(models.ClusterValidationIDAllHostsAreReadyToInstall), clusterValidationFailedMetric) // create a validation failure by causing the a host to not be ready - generateHWPostStepReply(ctx, hosts[0], validHwInfo, "localhost") - waitForHostStateV2(ctx, models.HostStatusInsufficient, defaultWaitForHostStateTimeout, hosts[0]) + utils_test.TestContext.GenerateHWPostStepReply(ctx, hosts[0], validHwInfo, "localhost") + waitForHostStateV2(ctx, models.HostStatusInsufficient, utils_test.DefaultWaitForHostStateTimeout, hosts[0]) waitForClusterValidationStatus(clusterID, "failure", models.ClusterValidationIDAllHostsAreReadyToInstall) @@ -972,18 +973,18 @@ var _ = Describe("Metrics tests", func() { It("'all-hosts-are-ready-to-install' got fixed", func() { // create a validation failure - hosts, ips := register3nodes(ctx, clusterID, *infraEnvID, defaultCIDRv4) - c := getCluster(clusterID) + hosts, ips := utils_test.TestContext.Register3nodes(ctx, clusterID, *infraEnvID, defaultCIDRv4) + c := utils_test.TestContext.GetCluster(clusterID) for _, h := range c.Hosts { - generateDomainResolution(ctx, h, "test-cluster", "example.com") + utils_test.TestContext.GenerateDomainResolution(ctx, h, "test-cluster", "example.com") } removeHost(hosts[0]) waitForClusterValidationStatus(clusterID, "failure", models.ClusterValidationIDSufficientMastersCount) // create a validation success - h1 := registerNode(ctx, *infraEnvID, "h1-new", ips[0]) - v2UpdateVipParams(ctx, clusterID) + h1 := utils_test.TestContext.RegisterNode(ctx, *infraEnvID, "h1-new", ips[0]) + utils_test.TestContext.V2UpdateVipParams(ctx, clusterID) generateFullMeshConnectivity(ctx, ips[0], h1, hosts[1], hosts[2]) waitForClusterValidationStatus(clusterID, "success", models.ClusterValidationIDAllHostsAreReadyToInstall) @@ -994,7 +995,7 @@ var _ = Describe("Metrics tests", func() { It("'sufficient-masters-count' failed", func() { // create a validation success - hosts, _ := register3nodes(ctx, clusterID, *infraEnvID, defaultCIDRv4) + hosts, _ := utils_test.TestContext.Register3nodes(ctx, clusterID, *infraEnvID, defaultCIDRv4) waitForClusterValidationStatus(clusterID, "success", models.ClusterValidationIDSufficientMastersCount) oldChangedMetricCounter := getValidationMetricCounter(string(models.ClusterValidationIDSufficientMastersCount), clusterValidationChangedMetric) @@ -1020,7 +1021,7 @@ var _ = Describe("Metrics tests", func() { waitForClusterValidationStatus(clusterID, "failure", models.ClusterValidationIDSufficientMastersCount) // create a validation success - register3nodes(ctx, clusterID, *infraEnvID, defaultCIDRv4) + utils_test.TestContext.Register3nodes(ctx, clusterID, *infraEnvID, defaultCIDRv4) waitForClusterValidationStatus(clusterID, "success", models.ClusterValidationIDSufficientMastersCount) // check generated events @@ -1031,15 +1032,15 @@ var _ = Describe("Metrics tests", func() { ips := hostutil.GenerateIPv4Addresses(2, defaultCIDRv4) // create a validation success - h1 := registerNode(ctx, *infraEnvID, "h1", ips[0]) - registerNode(ctx, *infraEnvID, "h2", ips[1]) + h1 := utils_test.TestContext.RegisterNode(ctx, *infraEnvID, "h1", ips[0]) + utils_test.TestContext.RegisterNode(ctx, *infraEnvID, "h2", ips[1]) waitForClusterValidationStatus(clusterID, "success", models.ClusterValidationIDNtpServerConfigured) oldChangedMetricCounter := getValidationMetricCounter(string(models.ClusterValidationIDNtpServerConfigured), clusterValidationChangedMetric) oldFailedMetricCounter := getValidationMetricCounter(string(models.ClusterValidationIDNtpServerConfigured), clusterValidationFailedMetric) // create a validation failure - generateGetNextStepsWithTimestamp(ctx, h1, time.Now().Unix()+(common.MaximumAllowedTimeDiffMinutes+1)*60) + utils_test.TestContext.GenerateGetNextStepsWithTimestamp(ctx, h1, time.Now().Unix()+(common.MaximumAllowedTimeDiffMinutes+1)*60) Expect(db.Model(h1).Update("status", "known").Error).NotTo(HaveOccurred()) waitForClusterValidationStatus(clusterID, "failure", models.ClusterValidationIDNtpServerConfigured) @@ -1055,15 +1056,15 @@ var _ = Describe("Metrics tests", func() { It("'ntp-server-configured' got fixed", func() { ips := hostutil.GenerateIPv4Addresses(2, defaultCIDRv4) // create a validation failure - h1 := registerNode(ctx, *infraEnvID, "h1", ips[0]) - registerNode(ctx, *infraEnvID, "h2", ips[1]) - generateGetNextStepsWithTimestamp(ctx, h1, time.Now().Unix()+(common.MaximumAllowedTimeDiffMinutes+1)*60) + h1 := utils_test.TestContext.RegisterNode(ctx, *infraEnvID, "h1", ips[0]) + utils_test.TestContext.RegisterNode(ctx, *infraEnvID, "h2", ips[1]) + utils_test.TestContext.GenerateGetNextStepsWithTimestamp(ctx, h1, time.Now().Unix()+(common.MaximumAllowedTimeDiffMinutes+1)*60) Expect(db.Model(h1).Update("status", "known").Error).NotTo(HaveOccurred()) waitForClusterValidationStatus(clusterID, "failure", models.ClusterValidationIDNtpServerConfigured) // create a validation success - generateGetNextStepsWithTimestamp(ctx, h1, time.Now().Unix()) - generateHWPostStepReply(ctx, h1, validHwInfo, "h1") + utils_test.TestContext.GenerateGetNextStepsWithTimestamp(ctx, h1, time.Now().Unix()) + utils_test.TestContext.GenerateHWPostStepReply(ctx, h1, validHwInfo, "h1") waitForClusterValidationStatus(clusterID, "success", models.ClusterValidationIDNtpServerConfigured) // check generated events diff --git a/subsystem/operators_test.go b/subsystem/operators_test.go index 77da1637da6..fea6c242031 100644 --- a/subsystem/operators_test.go +++ b/subsystem/operators_test.go @@ -32,6 +32,7 @@ import ( "github.com/openshift/assisted-service/internal/operators/serverless" "github.com/openshift/assisted-service/internal/operators/servicemesh" "github.com/openshift/assisted-service/models" + "github.com/openshift/assisted-service/subsystem/utils_test" ) var _ = Describe("Operators endpoint tests", func() { @@ -42,7 +43,7 @@ var _ = Describe("Operators endpoint tests", func() { Context("supported-operators", func() { It("should return all supported operators", func() { - reply, err := userBMClient.Operators.V2ListSupportedOperators(context.TODO(), opclient.NewV2ListSupportedOperatorsParams()) + reply, err := utils_test.TestContext.UserBMClient.Operators.V2ListSupportedOperators(context.TODO(), opclient.NewV2ListSupportedOperatorsParams()) Expect(err).ToNot(HaveOccurred()) Expect(reply.GetPayload()).To(ConsistOf( @@ -65,7 +66,7 @@ var _ = Describe("Operators endpoint tests", func() { It("should provide operator properties", func() { params := opclient.NewV2ListOperatorPropertiesParams().WithOperatorName(odf.Operator.Name) - reply, err := userBMClient.Operators.V2ListOperatorProperties(context.TODO(), params) + reply, err := utils_test.TestContext.UserBMClient.Operators.V2ListOperatorProperties(context.TODO(), params) Expect(err).ToNot(HaveOccurred()) Expect(reply.Payload).To(BeEquivalentTo(models.OperatorProperties{})) @@ -74,7 +75,7 @@ var _ = Describe("Operators endpoint tests", func() { Context("Create cluster", func() { It("Have builtins", func() { - reply, err := userBMClient.Installer.V2RegisterCluster(context.TODO(), &installer.V2RegisterClusterParams{ + reply, err := utils_test.TestContext.UserBMClient.Installer.V2RegisterCluster(context.TODO(), &installer.V2RegisterClusterParams{ NewClusterParams: &models.ClusterCreateParams{ Name: swag.String("test-cluster"), OpenshiftVersion: swag.String(openshiftVersion), @@ -94,7 +95,7 @@ var _ = Describe("Operators endpoint tests", func() { It("New OLM", func() { newOperator := odf.Operator.Name - reply, err := userBMClient.Installer.V2RegisterCluster(context.TODO(), &installer.V2RegisterClusterParams{ + reply, err := utils_test.TestContext.UserBMClient.Installer.V2RegisterCluster(context.TODO(), &installer.V2RegisterClusterParams{ NewClusterParams: &models.ClusterCreateParams{ Name: swag.String("test-cluster"), OpenshiftVersion: swag.String(openshiftVersion), @@ -109,7 +110,7 @@ var _ = Describe("Operators endpoint tests", func() { Expect(err).NotTo(HaveOccurred()) cluster := reply.GetPayload() - getClusterReply, err := userBMClient.Installer.V2GetCluster(context.TODO(), installer.NewV2GetClusterParams().WithClusterID(*cluster.ID)) + getClusterReply, err := utils_test.TestContext.UserBMClient.Installer.V2GetCluster(context.TODO(), installer.NewV2GetClusterParams().WithClusterID(*cluster.ID)) Expect(err).NotTo(HaveOccurred()) cluster = getClusterReply.GetPayload() @@ -124,7 +125,7 @@ var _ = Describe("Operators endpoint tests", func() { BeforeEach(func() { clusterCIDR := "10.128.0.0/14" serviceCIDR := "172.30.0.0/16" - registerClusterReply, err := userBMClient.Installer.V2RegisterCluster(context.TODO(), &installer.V2RegisterClusterParams{ + registerClusterReply, err := utils_test.TestContext.UserBMClient.Installer.V2RegisterCluster(context.TODO(), &installer.V2RegisterClusterParams{ NewClusterParams: &models.ClusterCreateParams{ BaseDNSDomain: "example.com", ClusterNetworks: []*models.ClusterNetwork{{Cidr: models.Subnet(clusterCIDR), HostPrefix: 23}}, @@ -132,7 +133,7 @@ var _ = Describe("Operators endpoint tests", func() { Name: swag.String("test-cluster"), OpenshiftVersion: swag.String(VipAutoAllocOpenshiftVersion), PullSecret: swag.String(pullSecret), - SSHPublicKey: sshPublicKey, + SSHPublicKey: utils_test.SshPublicKey, VipDhcpAllocation: swag.Bool(true), NetworkType: swag.String(models.ClusterCreateParamsNetworkTypeOpenShiftSDN), }, @@ -145,7 +146,7 @@ var _ = Describe("Operators endpoint tests", func() { It("Update OLMs", func() { By("First time - operators is empty", func() { - _, err := userBMClient.Installer.V2UpdateCluster(context.TODO(), &installer.V2UpdateClusterParams{ + _, err := utils_test.TestContext.UserBMClient.Installer.V2UpdateCluster(context.TODO(), &installer.V2UpdateClusterParams{ ClusterUpdateParams: &models.V2ClusterUpdateParams{ OlmOperators: []*models.OperatorCreateParams{ {Name: lso.Operator.Name}, @@ -155,17 +156,17 @@ var _ = Describe("Operators endpoint tests", func() { ClusterID: clusterID, }) Expect(err).ToNot(HaveOccurred()) - getReply, err2 := userBMClient.Installer.V2GetCluster(context.TODO(), installer.NewV2GetClusterParams().WithClusterID(clusterID)) + getReply, err2 := utils_test.TestContext.UserBMClient.Installer.V2GetCluster(context.TODO(), installer.NewV2GetClusterParams().WithClusterID(clusterID)) Expect(err2).ToNot(HaveOccurred()) c := &common.Cluster{Cluster: *getReply.Payload} Expect(operatorscommon.HasOperator(c.MonitoredOperators, lso.Operator.Name)).Should(BeTrue()) Expect(operatorscommon.HasOperator(c.MonitoredOperators, odf.Operator.Name)).Should(BeTrue()) - verifyUsageSet(c.FeatureUsage, models.Usage{Name: strings.ToUpper(lso.Operator.Name)}, models.Usage{Name: strings.ToUpper(odf.Operator.Name)}) + utils_test.VerifyUsageSet(c.FeatureUsage, models.Usage{Name: strings.ToUpper(lso.Operator.Name)}, models.Usage{Name: strings.ToUpper(odf.Operator.Name)}) }) By("Second time - operators is not empty", func() { - _, err := userBMClient.Installer.V2UpdateCluster(context.TODO(), &installer.V2UpdateClusterParams{ + _, err := utils_test.TestContext.UserBMClient.Installer.V2UpdateCluster(context.TODO(), &installer.V2UpdateClusterParams{ ClusterUpdateParams: &models.V2ClusterUpdateParams{ OlmOperators: []*models.OperatorCreateParams{ {Name: lso.Operator.Name}, @@ -174,13 +175,13 @@ var _ = Describe("Operators endpoint tests", func() { ClusterID: clusterID, }) Expect(err).ToNot(HaveOccurred()) - getReply, err := userBMClient.Installer.V2GetCluster(context.TODO(), installer.NewV2GetClusterParams().WithClusterID(clusterID)) + getReply, err := utils_test.TestContext.UserBMClient.Installer.V2GetCluster(context.TODO(), installer.NewV2GetClusterParams().WithClusterID(clusterID)) Expect(err).ToNot(HaveOccurred()) c := &common.Cluster{Cluster: *getReply.Payload} Expect(operatorscommon.HasOperator(c.MonitoredOperators, lso.Operator.Name)).Should(BeTrue()) Expect(operatorscommon.HasOperator(c.MonitoredOperators, odf.Operator.Name)).Should(BeFalse()) - verifyUsageSet(c.FeatureUsage, models.Usage{Name: strings.ToUpper(lso.Operator.Name)}) + utils_test.VerifyUsageSet(c.FeatureUsage, models.Usage{Name: strings.ToUpper(lso.Operator.Name)}) }) }) @@ -189,24 +190,24 @@ var _ = Describe("Operators endpoint tests", func() { hInventory := models.Inventory{} _ = json.Unmarshal([]byte(h.Inventory), &hInventory) hInventory.CPU = &models.CPU{Count: cpucores} - generateEssentialHostStepsWithInventory(context.TODO(), h, h.RequestedHostname, &hInventory) + utils_test.TestContext.GenerateEssentialHostStepsWithInventory(context.TODO(), h, h.RequestedHostname, &hInventory) } By("add hosts with a minimal worker (cnv operator is not enabled)") infraEnvID := registerInfraEnvSpecificVersion(&clusterID, models.ImageTypeMinimalIso, cluster.OpenshiftVersion).ID hosts := registerHostsAndSetRolesDHCP(clusterID, *infraEnvID, 6, "test-cluster", "example.com") - worker := getHostV2(*infraEnvID, *hosts[5].ID) + worker := utils_test.TestContext.GetHostV2(*infraEnvID, *hosts[5].ID) updateCpuCores(worker, 2) for _, h := range hosts { By(fmt.Sprintf("waiting for host %s to be ready", h.RequestedHostname)) - waitForHostState(context.TODO(), models.HostStatusKnown, defaultWaitForHostStateTimeout, h) + waitForHostState(context.TODO(), models.HostStatusKnown, utils_test.DefaultWaitForHostStateTimeout, h) } By("waiting for the cluster to be ready") - waitForClusterState(context.TODO(), clusterID, models.ClusterStatusReady, defaultWaitForClusterStateTimeout, + waitForClusterState(context.TODO(), clusterID, models.ClusterStatusReady, utils_test.DefaultWaitForClusterStateTimeout, IgnoreStateInfo) By("enable CNV operator") - _, err := userBMClient.Installer.V2UpdateCluster(context.TODO(), &installer.V2UpdateClusterParams{ + _, err := utils_test.TestContext.UserBMClient.Installer.V2UpdateCluster(context.TODO(), &installer.V2UpdateClusterParams{ ClusterUpdateParams: &models.V2ClusterUpdateParams{ OlmOperators: []*models.OperatorCreateParams{ {Name: cnv.Operator.Name}, @@ -217,7 +218,7 @@ var _ = Describe("Operators endpoint tests", func() { Expect(err).ToNot(HaveOccurred()) By("check that the cluster move to insufficient immediately") - c := getCluster(clusterID) + c := utils_test.TestContext.GetCluster(clusterID) Expect(*c.Status).To(Equal(models.ClusterStatusInsufficient)) }) }) @@ -237,7 +238,7 @@ var _ = Describe("Operators endpoint tests", func() { } } - cluster, err = user2BMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ + cluster, err = utils_test.TestContext.User2BMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ NewClusterParams: &models.ClusterCreateParams{ Name: swag.String("test-cluster"), OpenshiftVersion: swag.String(openshiftVersion), @@ -248,7 +249,7 @@ var _ = Describe("Operators endpoint tests", func() { BaseDNSDomain: "example.com", ClusterNetworks: []*models.ClusterNetwork{{Cidr: models.Subnet(clusterCIDR), HostPrefix: 23}}, ServiceNetworks: []*models.ServiceNetwork{{Cidr: models.Subnet(serviceCIDR)}}, - SSHPublicKey: sshPublicKey, + SSHPublicKey: utils_test.SshPublicKey, VipDhcpAllocation: vipDhcpAllocation, NetworkType: swag.String(models.ClusterNetworkTypeOVNKubernetes), }, @@ -280,20 +281,20 @@ var _ = Describe("Operators endpoint tests", func() { OpenshiftVersion: "4.13.0", ClusterID: cluster.Payload.ID, PullSecret: swag.String(fmt.Sprintf(psTemplate, FakePS2)), - SSHAuthorizedKey: swag.String(sshPublicKey), + SSHAuthorizedKey: swag.String(utils_test.SshPublicKey), CPUArchitecture: models.ClusterCPUArchitectureS390x, }, } - infraEnv, err := user2BMClient.Installer.RegisterInfraEnv(ctx, &infraEnvParams) + infraEnv, err := utils_test.TestContext.User2BMClient.Installer.RegisterInfraEnv(ctx, &infraEnvParams) Expect(err).NotTo(HaveOccurred()) Expect(infraEnv.Payload.CPUArchitecture).To(Equal(models.ClusterCPUArchitectureS390x)) - ops, err := agent2BMClient.Operators.V2ListOfClusterOperators(ctx, opclient.NewV2ListOfClusterOperatorsParams().WithClusterID(*cluster.Payload.ID)) + ops, err := utils_test.TestContext.Agent2BMClient.Operators.V2ListOfClusterOperators(ctx, opclient.NewV2ListOfClusterOperatorsParams().WithClusterID(*cluster.Payload.ID)) Expect(err).ToNot(HaveOccurred()) Expect(len(ops.GetPayload())).To(BeNumerically("==", 1)) // Update cluster with ODF operator - _, err = user2BMClient.Installer.V2UpdateCluster(context.TODO(), &installer.V2UpdateClusterParams{ + _, err = utils_test.TestContext.User2BMClient.Installer.V2UpdateCluster(context.TODO(), &installer.V2UpdateClusterParams{ ClusterUpdateParams: &models.V2ClusterUpdateParams{ OlmOperators: []*models.OperatorCreateParams{ {Name: odf.Operator.Name}, @@ -303,7 +304,7 @@ var _ = Describe("Operators endpoint tests", func() { }) Expect(err).ShouldNot(HaveOccurred()) - ops, err = agent2BMClient.Operators.V2ListOfClusterOperators(ctx, opclient.NewV2ListOfClusterOperatorsParams().WithClusterID(*cluster.Payload.ID)) + ops, err = utils_test.TestContext.Agent2BMClient.Operators.V2ListOfClusterOperators(ctx, opclient.NewV2ListOfClusterOperatorsParams().WithClusterID(*cluster.Payload.ID)) Expect(err).ToNot(HaveOccurred()) Expect(len(ops.GetPayload())).To(BeNumerically(">=", 3)) @@ -324,7 +325,7 @@ var _ = Describe("Operators endpoint tests", func() { )) // Verify that the cluster is updatable - _, err = user2BMClient.Installer.V2UpdateCluster(context.TODO(), &installer.V2UpdateClusterParams{ + _, err = utils_test.TestContext.User2BMClient.Installer.V2UpdateCluster(context.TODO(), &installer.V2UpdateClusterParams{ ClusterUpdateParams: &models.V2ClusterUpdateParams{ Name: swag.String("new-cluster-name"), }, @@ -350,20 +351,20 @@ var _ = Describe("Operators endpoint tests", func() { OpenshiftVersion: "4.13.0", ClusterID: cluster.Payload.ID, PullSecret: swag.String(fmt.Sprintf(psTemplate, FakePS2)), - SSHAuthorizedKey: swag.String(sshPublicKey), + SSHAuthorizedKey: swag.String(utils_test.SshPublicKey), CPUArchitecture: models.ClusterCPUArchitectureArm64, }, } - infraEnv, err := user2BMClient.Installer.RegisterInfraEnv(ctx, &infraEnvParams) + infraEnv, err := utils_test.TestContext.User2BMClient.Installer.RegisterInfraEnv(ctx, &infraEnvParams) Expect(err).NotTo(HaveOccurred()) Expect(infraEnv.Payload.CPUArchitecture).To(Equal(models.ClusterCPUArchitectureArm64)) - ops, err := agent2BMClient.Operators.V2ListOfClusterOperators(ctx, opclient.NewV2ListOfClusterOperatorsParams().WithClusterID(*cluster.Payload.ID)) + ops, err := utils_test.TestContext.Agent2BMClient.Operators.V2ListOfClusterOperators(ctx, opclient.NewV2ListOfClusterOperatorsParams().WithClusterID(*cluster.Payload.ID)) Expect(err).ToNot(HaveOccurred()) Expect(len(ops.GetPayload())).To(BeNumerically("==", 1)) // Update cluster with ODF operator - _, err = user2BMClient.Installer.V2UpdateCluster(context.TODO(), &installer.V2UpdateClusterParams{ + _, err = utils_test.TestContext.User2BMClient.Installer.V2UpdateCluster(context.TODO(), &installer.V2UpdateClusterParams{ ClusterUpdateParams: &models.V2ClusterUpdateParams{ OlmOperators: []*models.OperatorCreateParams{ {Name: lso.Operator.Name}, @@ -383,7 +384,7 @@ var _ = Describe("Operators endpoint tests", func() { nil, nil, ) - ops, err := agent2BMClient.Operators.V2ListOfClusterOperators(ctx, opclient.NewV2ListOfClusterOperatorsParams().WithClusterID(*cluster.Payload.ID)) + ops, err := utils_test.TestContext.Agent2BMClient.Operators.V2ListOfClusterOperators(ctx, opclient.NewV2ListOfClusterOperatorsParams().WithClusterID(*cluster.Payload.ID)) Expect(err).ToNot(HaveOccurred()) Expect(len(ops.GetPayload())).To(BeNumerically(">=", 3)) @@ -413,7 +414,7 @@ var _ = Describe("Operators endpoint tests", func() { nil, nil, ) - ops, err := agent2BMClient.Operators.V2ListOfClusterOperators(ctx, opclient.NewV2ListOfClusterOperatorsParams().WithClusterID(*cluster.Payload.ID)) + ops, err := utils_test.TestContext.Agent2BMClient.Operators.V2ListOfClusterOperators(ctx, opclient.NewV2ListOfClusterOperatorsParams().WithClusterID(*cluster.Payload.ID)) Expect(err).ToNot(HaveOccurred()) @@ -436,7 +437,7 @@ var _ = Describe("Operators endpoint tests", func() { nil, nil, ) - ops, err := agent2BMClient.Operators.V2ListOfClusterOperators(ctx, opclient.NewV2ListOfClusterOperatorsParams().WithClusterID(*cluster.Payload.ID)) + ops, err := utils_test.TestContext.Agent2BMClient.Operators.V2ListOfClusterOperators(ctx, opclient.NewV2ListOfClusterOperatorsParams().WithClusterID(*cluster.Payload.ID)) Expect(err).ToNot(HaveOccurred()) @@ -457,7 +458,7 @@ var _ = Describe("Operators endpoint tests", func() { ctx := context.Background() BeforeEach(func() { var err error - cluster, err = userBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ + cluster, err = utils_test.TestContext.UserBMClient.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ NewClusterParams: &models.ClusterCreateParams{ Name: swag.String("test-cluster"), OpenshiftVersion: swag.String(openshiftVersion), @@ -476,7 +477,7 @@ var _ = Describe("Operators endpoint tests", func() { }) It("should be all returned", func() { - ops, err := agentBMClient.Operators.V2ListOfClusterOperators(ctx, opclient.NewV2ListOfClusterOperatorsParams().WithClusterID(*cluster.Payload.ID)) + ops, err := utils_test.TestContext.AgentBMClient.Operators.V2ListOfClusterOperators(ctx, opclient.NewV2ListOfClusterOperatorsParams().WithClusterID(*cluster.Payload.ID)) Expect(err).ToNot(HaveOccurred()) Expect(len(ops.GetPayload())).To(BeNumerically(">=", 3)) @@ -499,7 +500,7 @@ var _ = Describe("Operators endpoint tests", func() { }) It("should selected be returned", func() { - ops, err := agentBMClient.Operators.V2ListOfClusterOperators(ctx, opclient.NewV2ListOfClusterOperatorsParams(). + ops, err := utils_test.TestContext.AgentBMClient.Operators.V2ListOfClusterOperators(ctx, opclient.NewV2ListOfClusterOperatorsParams(). WithClusterID(*cluster.Payload.ID). WithOperatorName(&odf.Operator.Name)) @@ -509,9 +510,9 @@ var _ = Describe("Operators endpoint tests", func() { }) It("should be updated", func() { - v2ReportMonitoredOperatorStatus(ctx, agentBMClient, *cluster.Payload.ID, odf.Operator.Name, models.OperatorStatusFailed, "4.12") + utils_test.TestContext.V2ReportMonitoredOperatorStatus(ctx, *cluster.Payload.ID, odf.Operator.Name, models.OperatorStatusFailed, "4.12") - ops, err := agentBMClient.Operators.V2ListOfClusterOperators(ctx, opclient.NewV2ListOfClusterOperatorsParams(). + ops, err := utils_test.TestContext.AgentBMClient.Operators.V2ListOfClusterOperators(ctx, opclient.NewV2ListOfClusterOperatorsParams(). WithClusterID(*cluster.Payload.ID). WithOperatorName(&odf.Operator.Name)) @@ -526,7 +527,7 @@ var _ = Describe("Operators endpoint tests", func() { Context("Installation", func() { BeforeEach(func() { - cID, err := registerCluster(context.TODO(), userBMClient, "test-cluster", pullSecret) + cID, err := utils_test.TestContext.RegisterCluster(context.TODO(), utils_test.TestContext.UserBMClient, "test-cluster", pullSecret) Expect(err).ToNot(HaveOccurred()) clusterID = cID infraEnvID := registerInfraEnv(&clusterID, models.ImageTypeMinimalIso).ID @@ -535,7 +536,7 @@ var _ = Describe("Operators endpoint tests", func() { It("All OLM operators available", func() { By("Update OLM", func() { - _, err := userBMClient.Installer.V2UpdateCluster(context.TODO(), &installer.V2UpdateClusterParams{ + _, err := utils_test.TestContext.UserBMClient.Installer.V2UpdateCluster(context.TODO(), &installer.V2UpdateClusterParams{ ClusterUpdateParams: &models.V2ClusterUpdateParams{ OlmOperators: []*models.OperatorCreateParams{ {Name: lso.Operator.Name}, @@ -547,18 +548,18 @@ var _ = Describe("Operators endpoint tests", func() { }) By("Report operator available", func() { - v2ReportMonitoredOperatorStatus(context.TODO(), agentBMClient, clusterID, lso.Operator.Name, models.OperatorStatusAvailable, "") + utils_test.TestContext.V2ReportMonitoredOperatorStatus(context.TODO(), clusterID, lso.Operator.Name, models.OperatorStatusAvailable, "") }) By("Wait for cluster to be installed", func() { setClusterAsFinalizing(context.TODO(), clusterID) - completeInstallationAndVerify(context.TODO(), agentBMClient, clusterID, true) + completeInstallationAndVerify(context.TODO(), utils_test.TestContext.AgentBMClient, clusterID, true) }) }) It("Failed OLM Operator", func() { By("Update OLM", func() { - _, err := userBMClient.Installer.V2UpdateCluster(context.TODO(), &installer.V2UpdateClusterParams{ + _, err := utils_test.TestContext.UserBMClient.Installer.V2UpdateCluster(context.TODO(), &installer.V2UpdateClusterParams{ ClusterUpdateParams: &models.V2ClusterUpdateParams{ OlmOperators: []*models.OperatorCreateParams{ {Name: lso.Operator.Name}, @@ -570,14 +571,14 @@ var _ = Describe("Operators endpoint tests", func() { }) By("Report operator failed", func() { - v2ReportMonitoredOperatorStatus(context.TODO(), agentBMClient, clusterID, lso.Operator.Name, models.OperatorStatusFailed, "") + utils_test.TestContext.V2ReportMonitoredOperatorStatus(context.TODO(), clusterID, lso.Operator.Name, models.OperatorStatusFailed, "") }) By("Wait for cluster to be degraded", func() { setClusterAsFinalizing(context.TODO(), clusterID) - completeInstallation(agentBMClient, clusterID) + completeInstallation(utils_test.TestContext.AgentBMClient, clusterID) expectedStatusInfo := fmt.Sprintf("%s. Failed OLM operators: %s", cluster.StatusInfoDegraded, lso.Operator.Name) - waitForClusterState(context.TODO(), clusterID, models.ClusterStatusInstalled, defaultWaitForClusterStateTimeout, expectedStatusInfo) + waitForClusterState(context.TODO(), clusterID, models.ClusterStatusInstalled, utils_test.DefaultWaitForClusterStateTimeout, expectedStatusInfo) }) }) }) diff --git a/subsystem/spec_test.go b/subsystem/spec_test.go index 343a88faea5..0fdb0ae2dde 100644 --- a/subsystem/spec_test.go +++ b/subsystem/spec_test.go @@ -9,6 +9,7 @@ import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" "github.com/openshift/assisted-service/client" + "github.com/openshift/assisted-service/subsystem/utils_test" ) var _ = Describe("test spec endpoint", func() { @@ -19,6 +20,6 @@ var _ = Describe("test spec endpoint", func() { data, err := io.ReadAll(reply.Body) Expect(err).To(BeNil()) reply.Body.Close() - Expect(isJSON(data)).To(BeTrue(), fmt.Sprintf("got %s", string(data))) + Expect(utils_test.IsJSON(data)).To(BeTrue(), fmt.Sprintf("got %s", string(data))) }) }) diff --git a/subsystem/subsystem_suite_test.go b/subsystem/subsystem_suite_test.go index 8ffa3f228e6..9eea6a5cbd0 100644 --- a/subsystem/subsystem_suite_test.go +++ b/subsystem/subsystem_suite_test.go @@ -27,9 +27,9 @@ import ( "k8s.io/client-go/kubernetes/scheme" k8sclient "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/config" + "github.com/openshift/assisted-service/subsystem/utils_test" ) -var db *gorm.DB var log *logrus.Logger var wiremock *WireMock var kubeClient k8sclient.Client @@ -41,16 +41,6 @@ var VipAutoAllocOpenshiftVersion string = "4.14.0" var SDNNetworkTypeOpenshiftVersion string = "4.14.0" var pullSecret = "{\"auths\":{\"cloud.openshift.com\":{\"auth\":\"dXNlcjpwYXNzd29yZAo=\",\"email\":\"r@r.com\"}}}" // #nosec -var ( - agentBMClient *client.AssistedInstall - agent2BMClient *client.AssistedInstall - badAgentBMClient *client.AssistedInstall - userBMClient *client.AssistedInstall - user2BMClient *client.AssistedInstall - readOnlyAdminUserBMClient *client.AssistedInstall - unallowedUserBMClient *client.AssistedInstall - editclusterUserBMClient *client.AssistedInstall -) const ( pollDefaultInterval = 1 * time.Millisecond @@ -122,6 +112,18 @@ func init() { if err != nil { log.Fatal(err.Error()) } + + + db, err := gorm.Open(postgres.Open(fmt.Sprintf("host=%s port=%s user=admin database=installer password=admin sslmode=disable", + Options.DBHost, Options.DBPort)), &gorm.Config{}) + if err != nil { + logrus.Fatal("Fail to connect to DB, ", err) + } + + if Options.EnableKubeAPI { + setupKubeClient() + } + userClientCfg := clientcfg(auth.UserAuthHeaderWriter("bearer " + Options.TestToken)) userClientCfg2 := clientcfg(auth.UserAuthHeaderWriter("bearer " + Options.TestToken2)) adminUserClientCfg := clientcfg(auth.UserAuthHeaderWriter("bearer " + Options.TestTokenAdmin)) @@ -130,24 +132,22 @@ func init() { agentClientCfg := clientcfg(auth.AgentAuthHeaderWriter(FakePS)) agent2ClientCfg := clientcfg(auth.AgentAuthHeaderWriter(FakePS2)) badAgentClientCfg := clientcfg(auth.AgentAuthHeaderWriter(WrongPullSecret)) - userBMClient = client.New(userClientCfg) - user2BMClient = client.New(userClientCfg2) - readOnlyAdminUserBMClient = client.New(adminUserClientCfg) - unallowedUserBMClient = client.New(unallowedUserClientCfg) - editclusterUserBMClient = client.New(editclusterClientCfg) - agentBMClient = client.New(agentClientCfg) - agent2BMClient = client.New(agent2ClientCfg) - badAgentBMClient = client.New(badAgentClientCfg) - - db, err = gorm.Open(postgres.Open(fmt.Sprintf("host=%s port=%s user=admin database=installer password=admin sslmode=disable", - Options.DBHost, Options.DBPort)), &gorm.Config{}) - if err != nil { - logrus.Fatal("Fail to connect to DB, ", err) - } - if Options.EnableKubeAPI { - setupKubeClient() - } + utils_test.TestContext = utils_test.NewSubsystemTestContext( + log, + db, + client.New(agentClientCfg), + client.New(userClientCfg), + client.New(agent2ClientCfg), + client.New(userClientCfg2), + client.New(adminUserClientCfg), + client.New(unallowedUserClientCfg), + client.New(editclusterClientCfg), + client.New(badAgentClientCfg), + pollDefaultInterval, + pollDefaultTimeout, + VipAutoAllocOpenshiftVersion, + ) if Options.AuthType == auth.TypeRHSSO { releaseSourcesString := os.Getenv("RELEASE_SOURCES") @@ -173,7 +173,7 @@ func init() { } // Use the default openshift version - if reply, err := userBMClient.Versions.V2ListSupportedOpenshiftVersions(context.Background(), + if reply, err := utils_test.TestContext.UserBMClient.Versions.V2ListSupportedOpenshiftVersions(context.Background(), &versions.V2ListSupportedOpenshiftVersionsParams{}); err == nil { for openshiftVersionString, openshiftVersionStruct := range reply.GetPayload() { if openshiftVersionStruct.Default { @@ -186,10 +186,15 @@ func init() { func TestSubsystem(t *testing.T) { AfterEach(func() { - subsystemAfterEach() + subsystemAfterEach(utils_test.TestContext) }) RegisterFailHandler(Fail) - subsystemAfterEach() // make sure we start tests from scratch + subsystemAfterEach(utils_test.TestContext) // make sure we start tests from scratch RunSpecs(t, "Subsystem Suite") } + +func subsystemAfterEach(testContext *utils_test.SubsystemTestContext) { + testContext.DeregisterResources() + testContext.ClearDB() +} diff --git a/subsystem/utils_test.go b/subsystem/utils_test.go deleted file mode 100644 index 5ed258ca8ca..00000000000 --- a/subsystem/utils_test.go +++ /dev/null @@ -1,753 +0,0 @@ -package subsystem - -import ( - "context" - "encoding/json" - "fmt" - "net" - "time" - - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/google/uuid" - "github.com/hashicorp/go-multierror" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - "github.com/openshift/assisted-service/client" - "github.com/openshift/assisted-service/client/installer" - operatorsClient "github.com/openshift/assisted-service/client/operators" - "github.com/openshift/assisted-service/internal/common" - "github.com/openshift/assisted-service/internal/constants" - "github.com/openshift/assisted-service/internal/host/hostutil" - usageMgr "github.com/openshift/assisted-service/internal/usage" - "github.com/openshift/assisted-service/models" - "gorm.io/gorm" - "k8s.io/apimachinery/pkg/util/wait" -) - -// #nosec -const ( - sshPublicKey = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC50TuHS7aYci+U+5PLe/aW/I6maBi9PBDucLje6C6gtArfjy7udWA1DCSIQd+DkHhi57/s+PmvEjzfAfzqo+L+/8/O2l2seR1pPhHDxMR/rSyo/6rZP6KIL8HwFqXHHpDUM4tLXdgwKAe1LxBevLt/yNl8kOiHJESUSl+2QSf8z4SIbo/frDD8OwOvtfKBEG4WCb8zEsEuIPNF/Vo/UxPtS9pPTecEsWKDHR67yFjjamoyLvAzMAJotYgyMoxm8PTyCgEzHk3s3S4iO956d6KVOEJVXnTVhAxrtLuubjskd7N4hVN7h2s4Z584wYLKYhrIBL0EViihOMzY4mH3YE4KZusfIx6oMcggKX9b3NHm0la7cj2zg0r6zjUn6ZCP4gXM99e5q4auc0OEfoSfQwofGi3WmxkG3tEozCB8Zz0wGbi2CzR8zlcF+BNV5I2LESlLzjPY5B4dvv5zjxsYoz94p3rUhKnnPM2zTx1kkilDK5C5fC1k9l/I/r5Qk4ebLQU= oscohen@localhost.localdomain" - pullSecretName = "pull-secret" - defaultWaitForHostStateTimeout = 20 * time.Second - defaultWaitForClusterStateTimeout = 40 * time.Second - defaultWaitForMachineNetworkCIDRTimeout = 40 * time.Second -) - -func subsystemAfterEach() { - if Options.EnableKubeAPI { - printCRs(context.Background(), kubeClient) - cleanUpCRs(context.Background(), kubeClient) - verifyCleanUP(context.Background(), kubeClient) - } else { - deregisterResources() - } - clearDB() -} - -func deregisterResources() { - var multiErr *multierror.Error - - reply, err := userBMClient.Installer.V2ListClusters(context.Background(), &installer.V2ListClustersParams{}) - if err != nil { - log.WithError(err).Error("Failed to list clusters") - return - } - - if GinkgoT().Failed() { - // Dump cluster info on failure - multiErr = multierror.Append(multiErr, GinkgoResourceLogger(models.ClusterKindCluster, reply.Payload)) - } - - infraEnvReply, err := userBMClient.Installer.ListInfraEnvs(context.Background(), &installer.ListInfraEnvsParams{}) - if err != nil { - log.WithError(err).Error("Failed to list infra-envs") - } - - if GinkgoT().Failed() { - // Dump infar-env info on failure - multiErr = multierror.Append(multiErr, GinkgoResourceLogger(models.InfraEnvKindInfraEnv, infraEnvReply.Payload)) - } - - for _, i := range infraEnvReply.GetPayload() { - if GinkgoT().Failed() { - hostReply, err1 := userBMClient.Installer.V2ListHosts(context.Background(), &installer.V2ListHostsParams{InfraEnvID: *i.ID}) - if err1 != nil { - log.WithError(err).Errorf("Failed to list infra-env %s (%s) hosts", i.ID, *i.Name) - } - // Dump host info on failure - multiErr = multierror.Append(multiErr, GinkgoResourceLogger(models.HostKindHost, hostReply.Payload)) - } - if _, err = userBMClient.Installer.DeregisterInfraEnv(context.Background(), &installer.DeregisterInfraEnvParams{InfraEnvID: *i.ID}); err != nil { - log.WithError(err).Debugf("InfraEnv %s couldn't be deleted via REST API", i.ID) - } - } - - for _, c := range reply.GetPayload() { - if _, err = userBMClient.Installer.V2DeregisterCluster(context.Background(), &installer.V2DeregisterClusterParams{ClusterID: *c.ID}); err != nil { - log.WithError(err).Debugf("Cluster %s couldn't be deleted via REST API", *c.ID) - } - } - - if multiErr.ErrorOrNil() != nil { - log.WithError(err).Error("At-least one error occured during deregister cleanup") - } -} - -func clearDB() { - // Clean the DB to make sure we start tests from scratch - for _, model := range []interface{}{ - &models.Host{}, - &models.Cluster{}, - &models.InfraEnv{}, - &models.Event{}, - &models.MonitoredOperator{}, - &models.ClusterNetwork{}, - &models.ServiceNetwork{}, - &models.MachineNetwork{}, - } { - db.Session(&gorm.Session{AllowGlobalUpdate: true}).Unscoped().Delete(model) - } -} - -func GinkgoResourceLogger(kind string, resources interface{}) error { - resList, err := json.MarshalIndent(resources, "", " ") - if err != nil { - return err - } - GinkgoLogger(fmt.Sprintf("The failed test '%s' created the following %s resources:", GinkgoT().Name(), kind)) - GinkgoLogger(string(resList)) - return nil -} - -func GinkgoLogger(s string) { - _, _ = GinkgoWriter.Write([]byte(fmt.Sprintln(s))) -} - -func strToUUID(s string) *strfmt.UUID { - u := strfmt.UUID(s) - return &u -} - -func registerHost(infraEnvID strfmt.UUID) *models.HostRegistrationResponse { - uuid := strToUUID(uuid.New().String()) - return registerHostByUUID(infraEnvID, *uuid) -} - -func registerHostByUUID(infraEnvID, hostID strfmt.UUID) *models.HostRegistrationResponse { - host, err := agentBMClient.Installer.V2RegisterHost(context.Background(), &installer.V2RegisterHostParams{ - InfraEnvID: infraEnvID, - NewHostParams: &models.HostCreateParams{ - HostID: &hostID, - }, - }) - Expect(err).NotTo(HaveOccurred()) - return host.GetPayload() -} - -func bindHost(infraEnvID, hostID, clusterID strfmt.UUID) *models.Host { - host, err := userBMClient.Installer.BindHost(context.Background(), &installer.BindHostParams{ - HostID: hostID, - InfraEnvID: infraEnvID, - BindHostParams: &models.BindHostParams{ - ClusterID: &clusterID, - }, - }) - Expect(err).NotTo(HaveOccurred()) - return host.GetPayload() -} - -func unbindHost(infraEnvID, hostID strfmt.UUID) *models.Host { - host, err := userBMClient.Installer.UnbindHost(context.Background(), &installer.UnbindHostParams{ - HostID: hostID, - InfraEnvID: infraEnvID, - }) - Expect(err).NotTo(HaveOccurred()) - return host.GetPayload() -} - -func getHostV2(infraEnvID, hostID strfmt.UUID) *models.Host { - host, err := userBMClient.Installer.V2GetHost(context.Background(), &installer.V2GetHostParams{ - InfraEnvID: infraEnvID, - HostID: hostID, - }) - Expect(err).NotTo(HaveOccurred()) - return host.GetPayload() -} - -func registerCluster(ctx context.Context, client *client.AssistedInstall, clusterName string, pullSecret string) (strfmt.UUID, error) { - var cluster, err = client.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ - NewClusterParams: &models.ClusterCreateParams{ - Name: swag.String(clusterName), - OpenshiftVersion: swag.String(VipAutoAllocOpenshiftVersion), - PullSecret: swag.String(pullSecret), - BaseDNSDomain: "example.com", - VipDhcpAllocation: swag.Bool(true), - }, - }) - if err != nil { - return "", err - } - return *cluster.GetPayload().ID, nil -} - -func getCluster(clusterID strfmt.UUID) *models.Cluster { - cluster, err := userBMClient.Installer.V2GetCluster(context.Background(), &installer.V2GetClusterParams{ - ClusterID: clusterID, - }) - Expect(err).NotTo(HaveOccurred()) - return cluster.GetPayload() -} - -func getCommonCluster(ctx context.Context, clusterID strfmt.UUID) *common.Cluster { - var cluster common.Cluster - err := db.First(&cluster, "id = ?", clusterID).Error - Expect(err).ShouldNot(HaveOccurred()) - return &cluster -} - -func areStepsInList(steps models.Steps, stepTypes []models.StepType) { - for _, stepType := range stepTypes { - Expect(isStepTypeInList(steps, stepType)).Should(BeTrue()) - } -} - -func isStepTypeInList(steps models.Steps, sType models.StepType) bool { - for _, step := range steps.Instructions { - if step.StepType == sType { - return true - } - } - return false -} - -func getStepFromListByStepType(steps models.Steps, sType models.StepType) *models.Step { - for _, step := range steps.Instructions { - if step.StepType == sType { - return step - } - } - return nil -} - -func getNextSteps(infraEnvID, hostID strfmt.UUID) models.Steps { - steps, err := agentBMClient.Installer.V2GetNextSteps(context.Background(), &installer.V2GetNextStepsParams{ - InfraEnvID: infraEnvID, - HostID: hostID, - DiscoveryAgentVersion: swag.String("quay.io/edge-infrastructure/assisted-installer-agent:latest"), - }) - Expect(err).NotTo(HaveOccurred()) - return *steps.GetPayload() -} - -func updateHostLogProgress(infraEnvID strfmt.UUID, hostID strfmt.UUID, progress models.LogsState) { - ctx := context.Background() - - updateReply, err := agentBMClient.Installer.V2UpdateHostLogsProgress(ctx, &installer.V2UpdateHostLogsProgressParams{ - InfraEnvID: infraEnvID, - HostID: hostID, - LogsProgressParams: &models.LogsProgressParams{ - LogsState: common.LogStatePtr(progress), - }, - }) - Expect(err).ShouldNot(HaveOccurred()) - Expect(updateReply).Should(BeAssignableToTypeOf(installer.NewV2UpdateHostLogsProgressNoContent())) -} - -func updateClusterLogProgress(clusterID strfmt.UUID, progress models.LogsState) { - ctx := context.Background() - - updateReply, err := agentBMClient.Installer.V2UpdateClusterLogsProgress(ctx, &installer.V2UpdateClusterLogsProgressParams{ - ClusterID: clusterID, - LogsProgressParams: &models.LogsProgressParams{ - LogsState: common.LogStatePtr(progress), - }, - }) - Expect(err).ShouldNot(HaveOccurred()) - Expect(updateReply).Should(BeAssignableToTypeOf(installer.NewV2UpdateClusterLogsProgressNoContent())) -} - -func updateProgress(hostID strfmt.UUID, infraEnvID strfmt.UUID, current_step models.HostStage) { - updateHostProgressWithInfo(hostID, infraEnvID, current_step, "") -} - -func updateHostProgressWithInfo(hostID strfmt.UUID, infraEnvID strfmt.UUID, current_step models.HostStage, info string) { - ctx := context.Background() - - installProgress := &models.HostProgress{ - CurrentStage: current_step, - ProgressInfo: info, - } - updateReply, err := agentBMClient.Installer.V2UpdateHostInstallProgress(ctx, &installer.V2UpdateHostInstallProgressParams{ - InfraEnvID: infraEnvID, - HostProgress: installProgress, - HostID: hostID, - }) - Expect(err).ShouldNot(HaveOccurred()) - Expect(updateReply).Should(BeAssignableToTypeOf(installer.NewV2UpdateHostInstallProgressOK())) -} - -func generateGetNextStepsWithTimestamp(ctx context.Context, h *models.Host, timestamp int64) { - _, err := agentBMClient.Installer.V2GetNextSteps(ctx, &installer.V2GetNextStepsParams{ - HostID: *h.ID, - InfraEnvID: h.InfraEnvID, - DiscoveryAgentVersion: swag.String("quay.io/edge-infrastructure/assisted-installer-agent:latest"), - Timestamp: ×tamp, - }) - Expect(err).ToNot(HaveOccurred()) -} - -func generateHWPostStepReply(ctx context.Context, h *models.Host, hwInfo *models.Inventory, hostname string) { - hwInfo.Hostname = hostname - hw, err := json.Marshal(&hwInfo) - Expect(err).NotTo(HaveOccurred()) - _, err = agentBMClient.Installer.V2PostStepReply(ctx, &installer.V2PostStepReplyParams{ - InfraEnvID: h.InfraEnvID, - HostID: *h.ID, - Reply: &models.StepReply{ - ExitCode: 0, - Output: string(hw), - StepID: string(models.StepTypeInventory), - StepType: models.StepTypeInventory, - }, - }) - Expect(err).ShouldNot(HaveOccurred()) -} - -func generateConnectivityCheckPostStepReply(ctx context.Context, h *models.Host, targetCIDR string, success bool) { - targetIP, _, err := net.ParseCIDR(targetCIDR) - Expect(err).NotTo(HaveOccurred()) - response := models.ConnectivityReport{ - RemoteHosts: []*models.ConnectivityRemoteHost{ - {L3Connectivity: []*models.L3Connectivity{{RemoteIPAddress: targetIP.String(), Successful: success}}}, - }, - } - bytes, err := json.Marshal(&response) - Expect(err).NotTo(HaveOccurred()) - _, err = agentBMClient.Installer.V2PostStepReply(ctx, &installer.V2PostStepReplyParams{ - InfraEnvID: h.InfraEnvID, - HostID: *h.ID, - Reply: &models.StepReply{ - ExitCode: 0, - Output: string(bytes), - StepID: string(models.StepTypeConnectivityCheck), - StepType: models.StepTypeConnectivityCheck, - }, - }) - Expect(err).ShouldNot(HaveOccurred()) -} - -func generateNTPPostStepReply(ctx context.Context, h *models.Host, ntpSources []*models.NtpSource) { - response := models.NtpSynchronizationResponse{ - NtpSources: ntpSources, - } - - bytes, err := json.Marshal(&response) - Expect(err).NotTo(HaveOccurred()) - _, err = agentBMClient.Installer.V2PostStepReply(ctx, &installer.V2PostStepReplyParams{ - InfraEnvID: h.InfraEnvID, - HostID: *h.ID, - Reply: &models.StepReply{ - ExitCode: 0, - Output: string(bytes), - StepID: string(models.StepTypeNtpSynchronizer), - StepType: models.StepTypeNtpSynchronizer, - }, - }) - Expect(err).ShouldNot(HaveOccurred()) -} - -func generateApiVipPostStepReply(ctx context.Context, h *models.Host, cluster *models.Cluster, success bool) { - checkVipApiResponse := models.APIVipConnectivityResponse{ - IsSuccess: success, - } - if cluster != nil && swag.StringValue(cluster.Status) == models.ClusterStatusAddingHosts { - checkVipApiResponse.Ignition = `{ - "ignition": { - "config": {}, - "version": "3.2.0" - }, - "storage": { - "files": [] - } - }` - } - bytes, jsonErr := json.Marshal(checkVipApiResponse) - Expect(jsonErr).NotTo(HaveOccurred()) - _, err := agentBMClient.Installer.V2PostStepReply(ctx, &installer.V2PostStepReplyParams{ - InfraEnvID: h.InfraEnvID, - HostID: *h.ID, - Reply: &models.StepReply{ - ExitCode: 0, - StepType: models.StepTypeAPIVipConnectivityCheck, - Output: string(bytes), - StepID: "apivip-connectivity-check-step", - }, - }) - Expect(err).ShouldNot(HaveOccurred()) -} - -func generateVerifyVipsPostStepReply(ctx context.Context, h *models.Host, apiVips []string, ingressVips []string, verification models.VipVerification) { - response := models.VerifyVipsResponse{} - for _, vip := range apiVips { - response = append(response, &models.VerifiedVip{ - Verification: common.VipVerificationPtr(verification), - Vip: models.IP(vip), - VipType: models.VipTypeAPI, - }) - } - for _, vip := range ingressVips { - response = append(response, &models.VerifiedVip{ - Verification: common.VipVerificationPtr(verification), - Vip: models.IP(vip), - VipType: models.VipTypeIngress, - }) - } - bytes, jsonErr := json.Marshal(&response) - Expect(jsonErr).NotTo(HaveOccurred()) - _, err := agentBMClient.Installer.V2PostStepReply(ctx, &installer.V2PostStepReplyParams{ - InfraEnvID: h.InfraEnvID, - HostID: *h.ID, - Reply: &models.StepReply{ - ExitCode: 0, - StepType: models.StepTypeVerifyVips, - Output: string(bytes), - StepID: string(models.StepTypeVerifyVips), - }, - }) - Expect(err).ShouldNot(HaveOccurred()) -} - -func getTangResponse(url string) models.TangServerResponse { - return models.TangServerResponse{ - TangURL: url, - Payload: "some_fake_payload", - Signatures: []*models.TangServerSignatures{ - { - Signature: "some_fake_signature1", - Protected: "foobar1", - }, - { - Signature: "some_fake_signature2", - Protected: "foobar2", - }, - }, - } -} - -func generateTangPostStepReply(ctx context.Context, success bool, hosts ...*models.Host) { - response := models.TangConnectivityResponse{ - IsSuccess: false, - TangServerResponse: nil, - } - - if success { - tangResponse := getTangResponse("http://tang.example.com:7500") - response = models.TangConnectivityResponse{ - IsSuccess: true, - TangServerResponse: []*models.TangServerResponse{&tangResponse}, - } - } - - bytes, err := json.Marshal(&response) - Expect(err).NotTo(HaveOccurred()) - - for _, h := range hosts { - _, err = agentBMClient.Installer.V2PostStepReply(ctx, &installer.V2PostStepReplyParams{ - InfraEnvID: h.InfraEnvID, - HostID: *h.ID, - Reply: &models.StepReply{ - ExitCode: 0, - Output: string(bytes), - StepID: string(models.StepTypeTangConnectivityCheck), - StepType: models.StepTypeTangConnectivityCheck, - }, - }) - Expect(err).ShouldNot(HaveOccurred()) - } -} - -func generateContainerImageAvailabilityPostStepReply(ctx context.Context, h *models.Host, imageStatuses []*models.ContainerImageAvailability) { - response := models.ContainerImageAvailabilityResponse{ - Images: imageStatuses, - } - - bytes, err := json.Marshal(&response) - Expect(err).NotTo(HaveOccurred()) - _, err = agentBMClient.Installer.V2PostStepReply(ctx, &installer.V2PostStepReplyParams{ - InfraEnvID: h.InfraEnvID, - HostID: *h.ID, - Reply: &models.StepReply{ - ExitCode: 0, - Output: string(bytes), - StepID: string(models.StepTypeContainerImageAvailability), - StepType: models.StepTypeContainerImageAvailability, - }, - }) - Expect(err).ShouldNot(HaveOccurred()) -} - -func getDefaultInventory(cidr string) *models.Inventory { - hwInfo := validHwInfo - hwInfo.Interfaces[0].IPV4Addresses = []string{cidr} - return hwInfo -} - -func getDefaultNutanixInventory(cidr string) *models.Inventory { - nutanixInventory := *getDefaultInventory(cidr) - nutanixInventory.SystemVendor = &models.SystemVendor{Manufacturer: "Nutanix", ProductName: "AHV", Virtual: true, SerialNumber: "3534"} - nutanixInventory.Disks = []*models.Disk{&vma, &vmremovable} - return &nutanixInventory -} - -func getDefaultExternalInventory(cidr string) *models.Inventory { - externalInventory := *getDefaultInventory(cidr) - externalInventory.SystemVendor = &models.SystemVendor{Manufacturer: "OracleCloud.com", ProductName: "OCI", Virtual: true, SerialNumber: "3534"} - externalInventory.Disks = []*models.Disk{&vma, &vmremovable} - return &externalInventory -} - -func getDefaultVmwareInventory(cidr string) *models.Inventory { - vmwareInventory := *getDefaultInventory(cidr) - vmwareInventory.SystemVendor = &models.SystemVendor{Manufacturer: "VMware, Inc.", ProductName: "VMware Virtual", Virtual: true, SerialNumber: "3534"} - vmwareInventory.Disks = []*models.Disk{&vma, &vmremovable} - return &vmwareInventory -} - -func generateEssentialHostSteps(ctx context.Context, h *models.Host, name, cidr string) { - generateEssentialHostStepsWithInventory(ctx, h, name, getDefaultInventory(cidr)) -} - -func generateEssentialHostStepsWithInventory(ctx context.Context, h *models.Host, name string, inventory *models.Inventory) { - generateGetNextStepsWithTimestamp(ctx, h, time.Now().Unix()) - generateHWPostStepReply(ctx, h, inventory, name) - generateFAPostStepReply(ctx, h, validFreeAddresses) - generateNTPPostStepReply(ctx, h, []*models.NtpSource{common.TestNTPSourceSynced}) - generateDomainNameResolutionReply(ctx, h, *common.TestDomainNameResolutionsSuccess) -} - -func generateDomainResolution(ctx context.Context, h *models.Host, name string, baseDomain string) { - reply := common.CreateWildcardDomainNameResolutionReply(name, baseDomain) - reply.Resolutions = append(reply.Resolutions, &models.DomainResolutionResponseDomain{ - DomainName: swag.String("quay.io"), - IPV4Addresses: []strfmt.IPv4{"7.8.9.11/24"}, - IPV6Addresses: []strfmt.IPv6{"1003:db8::11/120"}, - }) - generateDomainNameResolutionReply(ctx, h, *reply) -} - -func generateCommonDomainReply(ctx context.Context, h *models.Host, clusterName, baseDomain string) { - fqdn := func(domainPrefix, clusterName, baseDomain string) *string { - return swag.String(fmt.Sprintf("%s.%s.%s", domainPrefix, clusterName, baseDomain)) - } - var domainResolutions = []*models.DomainResolutionResponseDomain{ - { - DomainName: fqdn(constants.APIClusterSubdomain, clusterName, baseDomain), - IPV4Addresses: []strfmt.IPv4{"1.2.3.4/24"}, - IPV6Addresses: []strfmt.IPv6{"1001:db8::10/120"}, - }, - { - DomainName: fqdn(constants.InternalAPIClusterSubdomain, clusterName, baseDomain), - IPV4Addresses: []strfmt.IPv4{"4.5.6.7/24"}, - IPV6Addresses: []strfmt.IPv6{"1002:db8::10/120"}, - }, - { - DomainName: fqdn(constants.AppsSubDomainNameHostDNSValidation+".apps", clusterName, baseDomain), - IPV4Addresses: []strfmt.IPv4{"7.8.9.10/24"}, - IPV6Addresses: []strfmt.IPv6{"1003:db8::10/120"}, - }, - { - DomainName: swag.String("quay.io"), - IPV4Addresses: []strfmt.IPv4{"7.8.9.11/24"}, - IPV6Addresses: []strfmt.IPv6{"1003:db8::11/120"}, - }, - { - DomainName: fqdn(constants.DNSWildcardFalseDomainName, clusterName, baseDomain), - IPV4Addresses: []strfmt.IPv4{}, - IPV6Addresses: []strfmt.IPv6{}, - }, - { - DomainName: fqdn(constants.DNSWildcardFalseDomainName, clusterName, baseDomain+"."), - IPV4Addresses: []strfmt.IPv4{}, - IPV6Addresses: []strfmt.IPv6{}, - }, - } - var domainResolutionResponse = models.DomainResolutionResponse{ - Resolutions: domainResolutions, - } - generateDomainNameResolutionReply(ctx, h, domainResolutionResponse) -} - -func generateEssentialPrepareForInstallationSteps(ctx context.Context, hosts ...*models.Host) { - generateSuccessfulDiskSpeedResponses(ctx, sdbId, hosts...) - for _, h := range hosts { - generateContainerImageAvailabilityPostStepReply(ctx, h, []*models.ContainerImageAvailability{common.TestImageStatusesSuccess}) - } -} - -func registerNode(ctx context.Context, infraenvID strfmt.UUID, name, ip string) *models.Host { - h := ®isterHost(infraenvID).Host - generateEssentialHostSteps(ctx, h, name, ip) - generateEssentialPrepareForInstallationSteps(ctx, h) - return h -} - -func registerNodeWithInventory(ctx context.Context, infraEnvID strfmt.UUID, name, ip string, inventory *models.Inventory) *models.Host { - h := ®isterHost(infraEnvID).Host - hwInfo := inventory - hwInfo.Interfaces[0].IPV4Addresses = []string{ip} - generateEssentialHostStepsWithInventory(ctx, h, name, hwInfo) - generateEssentialPrepareForInstallationSteps(ctx, h) - return h -} - -func isJSON(s []byte) bool { - var js map[string]interface{} - return json.Unmarshal(s, &js) == nil - -} - -func generateFAPostStepReply(ctx context.Context, h *models.Host, freeAddresses models.FreeNetworksAddresses) { - fa, err := json.Marshal(&freeAddresses) - Expect(err).NotTo(HaveOccurred()) - _, err = agentBMClient.Installer.V2PostStepReply(ctx, &installer.V2PostStepReplyParams{ - InfraEnvID: h.InfraEnvID, - HostID: *h.ID, - Reply: &models.StepReply{ - ExitCode: 0, - Output: string(fa), - StepID: string(models.StepTypeFreeNetworkAddresses), - StepType: models.StepTypeFreeNetworkAddresses, - }, - }) - Expect(err).To(BeNil()) -} - -func generateDiskSpeedChekResponse(ctx context.Context, h *models.Host, path string, exitCode int64) { - result := models.DiskSpeedCheckResponse{ - IoSyncDuration: 10, - Path: path, - } - b, err := json.Marshal(&result) - Expect(err).ToNot(HaveOccurred()) - _, err = agentBMClient.Installer.V2PostStepReply(ctx, &installer.V2PostStepReplyParams{ - InfraEnvID: h.InfraEnvID, - HostID: *h.ID, - Reply: &models.StepReply{ - ExitCode: exitCode, - Output: string(b), - StepID: string(models.StepTypeInstallationDiskSpeedCheck), - StepType: models.StepTypeInstallationDiskSpeedCheck, - }, - }) - Expect(err).ShouldNot(HaveOccurred()) -} - -func generateSuccessfulDiskSpeedResponses(ctx context.Context, path string, hosts ...*models.Host) { - for _, h := range hosts { - generateDiskSpeedChekResponse(ctx, h, path, 0) - } -} - -func generateFailedDiskSpeedResponses(ctx context.Context, path string, hosts ...*models.Host) { - for _, h := range hosts { - generateDiskSpeedChekResponse(ctx, h, path, -1) - } -} - -func generateDomainNameResolutionReply(ctx context.Context, h *models.Host, domainNameResolution models.DomainResolutionResponse) { - dnsResolotion, err := json.Marshal(&domainNameResolution) - Expect(err).NotTo(HaveOccurred()) - _, err = agentBMClient.Installer.V2PostStepReply(ctx, &installer.V2PostStepReplyParams{ - InfraEnvID: h.InfraEnvID, - HostID: *h.ID, - Reply: &models.StepReply{ - ExitCode: 0, - Output: string(dnsResolotion), - StepID: string(models.StepTypeDomainResolution), - StepType: models.StepTypeDomainResolution, - }, - }) - Expect(err).To(BeNil()) -} - -func updateVipParams(ctx context.Context, clusterID strfmt.UUID) { - apiVip := "1.2.3.5" - ingressVip := "1.2.3.6" - _, err := userBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ - ClusterUpdateParams: &models.V2ClusterUpdateParams{ - VipDhcpAllocation: swag.Bool(false), - APIVips: []*models.APIVip{{IP: models.IP(apiVip), ClusterID: clusterID}}, - IngressVips: []*models.IngressVip{{IP: models.IP(ingressVip), ClusterID: clusterID}}, - }, - ClusterID: clusterID, - }) - Expect(err).ShouldNot(HaveOccurred()) -} - -func v2UpdateVipParams(ctx context.Context, clusterID strfmt.UUID) { - apiVip := "1.2.3.5" - ingressVip := "1.2.3.6" - _, err := userBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ - ClusterUpdateParams: &models.V2ClusterUpdateParams{ - VipDhcpAllocation: swag.Bool(false), - APIVips: []*models.APIVip{{IP: models.IP(apiVip), ClusterID: clusterID}}, - IngressVips: []*models.IngressVip{{IP: models.IP(ingressVip), ClusterID: clusterID}}, - }, - ClusterID: clusterID, - }) - Expect(err).ShouldNot(HaveOccurred()) -} - -func register3nodes(ctx context.Context, clusterID, infraenvID strfmt.UUID, cidr string) ([]*models.Host, []string) { - ips := hostutil.GenerateIPv4Addresses(3, cidr) - h1 := registerNode(ctx, infraenvID, "h1", ips[0]) - h2 := registerNode(ctx, infraenvID, "h2", ips[1]) - h3 := registerNode(ctx, infraenvID, "h3", ips[2]) - updateVipParams(ctx, clusterID) - generateFullMeshConnectivity(ctx, ips[0], h1, h2, h3) - - return []*models.Host{h1, h2, h3}, ips -} - -func v2ReportMonitoredOperatorStatus(ctx context.Context, client *client.AssistedInstall, clusterID strfmt.UUID, opName string, opStatus models.OperatorStatus, opVersion string) { - _, err := client.Operators.V2ReportMonitoredOperatorStatus(ctx, &operatorsClient.V2ReportMonitoredOperatorStatusParams{ - ClusterID: clusterID, - ReportParams: &models.OperatorMonitorReport{ - Name: opName, - Status: opStatus, - StatusInfo: string(opStatus), - Version: opVersion, - }, - }) - Expect(err).NotTo(HaveOccurred()) -} - -func verifyUsageSet(featureUsage string, candidates ...models.Usage) { - usages := make(map[string]models.Usage) - err := json.Unmarshal([]byte(featureUsage), &usages) - Expect(err).NotTo(HaveOccurred()) - for _, usage := range candidates { - usage.ID = usageMgr.UsageNameToID(usage.Name) - Expect(usages[usage.Name]).To(Equal(usage)) - } -} - -func verifyUsageNotSet(featureUsage string, features ...string) { - usages := make(map[string]*models.Usage) - err := json.Unmarshal([]byte(featureUsage), &usages) - Expect(err).NotTo(HaveOccurred()) - for _, name := range features { - Expect(usages[name]).To(BeNil()) - } -} - -func waitForLastInstallationCompletionStatus(clusterID strfmt.UUID, status string) { - - waitFunc := func(ctx context.Context) (bool, error) { - c := getCommonCluster(ctx, clusterID) - return c.LastInstallationPreparation.Status == status, nil - } - err := wait.PollUntilContextTimeout(context.Background(), pollDefaultInterval, pollDefaultTimeout, false, waitFunc) - Expect(err).NotTo(HaveOccurred()) -} diff --git a/subsystem/utils_test/subsystem_test_context.go b/subsystem/utils_test/subsystem_test_context.go new file mode 100644 index 00000000000..527f66b47ac --- /dev/null +++ b/subsystem/utils_test/subsystem_test_context.go @@ -0,0 +1,777 @@ +package utils_test + +import ( + "context" + "encoding/json" + "fmt" + "net" + "time" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/google/uuid" + "github.com/hashicorp/go-multierror" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/openshift/assisted-service/client" + "github.com/openshift/assisted-service/client/installer" + operatorsClient "github.com/openshift/assisted-service/client/operators" + "github.com/openshift/assisted-service/internal/common" + "github.com/openshift/assisted-service/internal/constants" + "github.com/openshift/assisted-service/internal/host/hostutil" + "github.com/openshift/assisted-service/models" + "github.com/sirupsen/logrus" + "gorm.io/gorm" + "k8s.io/apimachinery/pkg/util/wait" +) + +type SubsystemTestContext struct { + log *logrus.Logger + db *gorm.DB + AgentBMClient *client.AssistedInstall + UserBMClient *client.AssistedInstall + Agent2BMClient *client.AssistedInstall + User2BMClient *client.AssistedInstall + ReadOnlyAdminUserBMClient *client.AssistedInstall + UnallowedUserBMClient *client.AssistedInstall + EditclusterUserBMClient *client.AssistedInstall + BadAgentBMClient *client.AssistedInstall + pollDefaultInterval time.Duration + pollDefaultTimeout time.Duration + vipAutoAllocOpenshiftVersion string +} + +func NewSubsystemTestContext( + log *logrus.Logger, + db *gorm.DB, + agentBMClient *client.AssistedInstall, + userBMClient *client.AssistedInstall, + agent2BMClient *client.AssistedInstall, + user2BMClient *client.AssistedInstall, + readOnlyAdminUserBMClient *client.AssistedInstall, + unallowedUserBMClient *client.AssistedInstall, + editclusterUserBMClient *client.AssistedInstall, + badAgentBMClient *client.AssistedInstall, + pollDefaultInterval time.Duration, + pollDefaultTimeout time.Duration, + vipAutoAllocOpenshiftVersion string, +) *SubsystemTestContext { + return &SubsystemTestContext{ + log: log, + db: db, + AgentBMClient: agentBMClient, + UserBMClient: userBMClient, + Agent2BMClient: agent2BMClient, + User2BMClient: user2BMClient, + ReadOnlyAdminUserBMClient: readOnlyAdminUserBMClient, + UnallowedUserBMClient: unallowedUserBMClient, + EditclusterUserBMClient: editclusterUserBMClient, + BadAgentBMClient: badAgentBMClient, + pollDefaultInterval: pollDefaultInterval, + pollDefaultTimeout: pollDefaultTimeout, + vipAutoAllocOpenshiftVersion: vipAutoAllocOpenshiftVersion, + } +} + +func (t *SubsystemTestContext) GetDB() *gorm.DB { + return t.db +} + +func (t *SubsystemTestContext) RegisterHost(infraEnvID strfmt.UUID) *models.HostRegistrationResponse { + uuid := StrToUUID(uuid.New().String()) + return t.RegisterHostByUUID(infraEnvID, *uuid) +} + +func (t *SubsystemTestContext) RegisterHostByUUID(infraEnvID, hostID strfmt.UUID) *models.HostRegistrationResponse { + host, err := t.AgentBMClient.Installer.V2RegisterHost(context.Background(), &installer.V2RegisterHostParams{ + InfraEnvID: infraEnvID, + NewHostParams: &models.HostCreateParams{ + HostID: &hostID, + }, + }) + Expect(err).NotTo(HaveOccurred()) + return host.GetPayload() +} + +func (t *SubsystemTestContext) GenerateEssentialHostStepsWithInventory(ctx context.Context, h *models.Host, name string, inventory *models.Inventory) { + t.GenerateGetNextStepsWithTimestamp(ctx, h, time.Now().Unix()) + t.GenerateHWPostStepReply(ctx, h, inventory, name) + t.GenerateFAPostStepReply(ctx, h, ValidFreeAddresses) + t.GenerateNTPPostStepReply(ctx, h, []*models.NtpSource{common.TestNTPSourceSynced}) + t.GenerateDomainNameResolutionReply(ctx, h, *common.TestDomainNameResolutionsSuccess) +} + +func (t *SubsystemTestContext) GenerateGetNextStepsWithTimestamp(ctx context.Context, h *models.Host, timestamp int64) { + _, err := t.AgentBMClient.Installer.V2GetNextSteps(ctx, &installer.V2GetNextStepsParams{ + HostID: *h.ID, + InfraEnvID: h.InfraEnvID, + DiscoveryAgentVersion: swag.String("quay.io/edge-infrastructure/assisted-installer-agent:latest"), + Timestamp: ×tamp, + }) + Expect(err).ToNot(HaveOccurred()) +} + +func (t *SubsystemTestContext) GenerateHWPostStepReply(ctx context.Context, h *models.Host, hwInfo *models.Inventory, hostname string) { + hwInfo.Hostname = hostname + hw, err := json.Marshal(&hwInfo) + Expect(err).NotTo(HaveOccurred()) + _, err = t.AgentBMClient.Installer.V2PostStepReply(ctx, &installer.V2PostStepReplyParams{ + InfraEnvID: h.InfraEnvID, + HostID: *h.ID, + Reply: &models.StepReply{ + ExitCode: 0, + Output: string(hw), + StepID: string(models.StepTypeInventory), + StepType: models.StepTypeInventory, + }, + }) + Expect(err).ShouldNot(HaveOccurred()) +} + +func (t *SubsystemTestContext) GenerateFAPostStepReply(ctx context.Context, h *models.Host, freeAddresses models.FreeNetworksAddresses) { + fa, err := json.Marshal(&freeAddresses) + Expect(err).NotTo(HaveOccurred()) + _, err = t.AgentBMClient.Installer.V2PostStepReply(ctx, &installer.V2PostStepReplyParams{ + InfraEnvID: h.InfraEnvID, + HostID: *h.ID, + Reply: &models.StepReply{ + ExitCode: 0, + Output: string(fa), + StepID: string(models.StepTypeFreeNetworkAddresses), + StepType: models.StepTypeFreeNetworkAddresses, + }, + }) + Expect(err).To(BeNil()) +} + +func (t *SubsystemTestContext) GenerateNTPPostStepReply(ctx context.Context, h *models.Host, ntpSources []*models.NtpSource) { + response := models.NtpSynchronizationResponse{ + NtpSources: ntpSources, + } + + bytes, err := json.Marshal(&response) + Expect(err).NotTo(HaveOccurred()) + _, err = t.AgentBMClient.Installer.V2PostStepReply(ctx, &installer.V2PostStepReplyParams{ + InfraEnvID: h.InfraEnvID, + HostID: *h.ID, + Reply: &models.StepReply{ + ExitCode: 0, + Output: string(bytes), + StepID: string(models.StepTypeNtpSynchronizer), + StepType: models.StepTypeNtpSynchronizer, + }, + }) + Expect(err).ShouldNot(HaveOccurred()) +} + +func (t *SubsystemTestContext) GenerateDomainNameResolutionReply(ctx context.Context, h *models.Host, domainNameResolution models.DomainResolutionResponse) { + dnsResolotion, err := json.Marshal(&domainNameResolution) + Expect(err).NotTo(HaveOccurred()) + _, err = t.AgentBMClient.Installer.V2PostStepReply(ctx, &installer.V2PostStepReplyParams{ + InfraEnvID: h.InfraEnvID, + HostID: *h.ID, + Reply: &models.StepReply{ + ExitCode: 0, + Output: string(dnsResolotion), + StepID: string(models.StepTypeDomainResolution), + StepType: models.StepTypeDomainResolution, + }, + }) + Expect(err).To(BeNil()) +} + +func (t *SubsystemTestContext) GenerateEssentialPrepareForInstallationSteps(ctx context.Context, hosts ...*models.Host) { + t.GenerateSuccessfulDiskSpeedResponses(ctx, SdbId, hosts...) + for _, h := range hosts { + t.GenerateContainerImageAvailabilityPostStepReply(ctx, h, []*models.ContainerImageAvailability{common.TestImageStatusesSuccess}) + } +} + +func (t *SubsystemTestContext) GenerateSuccessfulDiskSpeedResponses(ctx context.Context, path string, hosts ...*models.Host) { + for _, h := range hosts { + t.GenerateDiskSpeedChekResponse(ctx, h, path, 0) + } +} + +func (t *SubsystemTestContext) GenerateDiskSpeedChekResponse(ctx context.Context, h *models.Host, path string, exitCode int64) { + result := models.DiskSpeedCheckResponse{ + IoSyncDuration: 10, + Path: path, + } + b, err := json.Marshal(&result) + Expect(err).ToNot(HaveOccurred()) + _, err = t.AgentBMClient.Installer.V2PostStepReply(ctx, &installer.V2PostStepReplyParams{ + InfraEnvID: h.InfraEnvID, + HostID: *h.ID, + Reply: &models.StepReply{ + ExitCode: exitCode, + Output: string(b), + StepID: string(models.StepTypeInstallationDiskSpeedCheck), + StepType: models.StepTypeInstallationDiskSpeedCheck, + }, + }) + Expect(err).ShouldNot(HaveOccurred()) +} + +func (t *SubsystemTestContext) GenerateContainerImageAvailabilityPostStepReply(ctx context.Context, h *models.Host, imageStatuses []*models.ContainerImageAvailability) { + response := models.ContainerImageAvailabilityResponse{ + Images: imageStatuses, + } + + bytes, err := json.Marshal(&response) + Expect(err).NotTo(HaveOccurred()) + _, err = t.AgentBMClient.Installer.V2PostStepReply(ctx, &installer.V2PostStepReplyParams{ + InfraEnvID: h.InfraEnvID, + HostID: *h.ID, + Reply: &models.StepReply{ + ExitCode: 0, + Output: string(bytes), + StepID: string(models.StepTypeContainerImageAvailability), + StepType: models.StepTypeContainerImageAvailability, + }, + }) + Expect(err).ShouldNot(HaveOccurred()) +} + +func (t *SubsystemTestContext) GenerateConnectivityPostStepReply(ctx context.Context, h *models.Host, connectivityReport *models.ConnectivityReport) { + fa, err := json.Marshal(connectivityReport) + Expect(err).NotTo(HaveOccurred()) + _, err = t.AgentBMClient.Installer.V2PostStepReply(ctx, &installer.V2PostStepReplyParams{ + InfraEnvID: h.InfraEnvID, + HostID: *h.ID, + Reply: &models.StepReply{ + ExitCode: 0, + Output: string(fa), + StepID: string(models.StepTypeConnectivityCheck), + StepType: models.StepTypeConnectivityCheck, + }, + }) + Expect(err).ShouldNot(HaveOccurred()) +} + +func (t *SubsystemTestContext) GenerateVerifyVipsPostStepReply(ctx context.Context, h *models.Host, apiVips []string, ingressVips []string, verification models.VipVerification) { + response := models.VerifyVipsResponse{} + for _, vip := range apiVips { + response = append(response, &models.VerifiedVip{ + Verification: common.VipVerificationPtr(verification), + Vip: models.IP(vip), + VipType: models.VipTypeAPI, + }) + } + for _, vip := range ingressVips { + response = append(response, &models.VerifiedVip{ + Verification: common.VipVerificationPtr(verification), + Vip: models.IP(vip), + VipType: models.VipTypeIngress, + }) + } + bytes, jsonErr := json.Marshal(&response) + Expect(jsonErr).NotTo(HaveOccurred()) + _, err := t.AgentBMClient.Installer.V2PostStepReply(ctx, &installer.V2PostStepReplyParams{ + InfraEnvID: h.InfraEnvID, + HostID: *h.ID, + Reply: &models.StepReply{ + ExitCode: 0, + StepType: models.StepTypeVerifyVips, + Output: string(bytes), + StepID: string(models.StepTypeVerifyVips), + }, + }) + Expect(err).ShouldNot(HaveOccurred()) +} + +func (t *SubsystemTestContext) GenerateDomainResolution(ctx context.Context, h *models.Host, name string, baseDomain string) { + reply := common.CreateWildcardDomainNameResolutionReply(name, baseDomain) + reply.Resolutions = append(reply.Resolutions, &models.DomainResolutionResponseDomain{ + DomainName: swag.String("quay.io"), + IPV4Addresses: []strfmt.IPv4{"7.8.9.11/24"}, + IPV6Addresses: []strfmt.IPv6{"1003:db8::11/120"}, + }) + t.GenerateDomainNameResolutionReply(ctx, h, *reply) +} + +func (t *SubsystemTestContext) RegisterNode(ctx context.Context, infraenvID strfmt.UUID, name, ip string) *models.Host { + h := &t.RegisterHost(infraenvID).Host + t.GenerateEssentialHostSteps(ctx, h, name, ip) + t.GenerateEssentialPrepareForInstallationSteps(ctx, h) + return h +} + +func (t *SubsystemTestContext) RegisterNodeWithInventory(ctx context.Context, infraEnvID strfmt.UUID, name, ip string, inventory *models.Inventory) *models.Host { + h := &t.RegisterHost(infraEnvID).Host + hwInfo := inventory + hwInfo.Interfaces[0].IPV4Addresses = []string{ip} + t.GenerateEssentialHostStepsWithInventory(ctx, h, name, hwInfo) + t.GenerateEssentialPrepareForInstallationSteps(ctx, h) + return h +} + +func (t *SubsystemTestContext) GenerateEssentialHostSteps(ctx context.Context, h *models.Host, name, cidr string) { + t.GenerateEssentialHostStepsWithInventory(ctx, h, name, GetDefaultInventory(cidr)) +} + +func (t *SubsystemTestContext) GenerateFullMeshConnectivity(ctx context.Context, startCIDR string, hosts ...*models.Host) { + + ip, _, err := net.ParseCIDR(startCIDR) + Expect(err).NotTo(HaveOccurred()) + hostToAddr := make(map[strfmt.UUID]string) + + for _, h := range hosts { + hostToAddr[*h.ID] = ip.String() + common.IncrementIP(ip) + } + + var connectivityReport models.ConnectivityReport + for _, h := range hosts { + + l2Connectivity := make([]*models.L2Connectivity, 0) + l3Connectivity := make([]*models.L3Connectivity, 0) + for id, addr := range hostToAddr { + + if id != *h.ID { + continue + } + + l2Connectivity = append(l2Connectivity, &models.L2Connectivity{ + RemoteIPAddress: addr, + Successful: true, + }) + l3Connectivity = append(l3Connectivity, &models.L3Connectivity{ + RemoteIPAddress: addr, + Successful: true, + }) + } + + connectivityReport.RemoteHosts = append(connectivityReport.RemoteHosts, &models.ConnectivityRemoteHost{ + HostID: *h.ID, + L2Connectivity: l2Connectivity, + L3Connectivity: l3Connectivity, + }) + } + + for _, h := range hosts { + t.GenerateConnectivityPostStepReply(ctx, h, &connectivityReport) + } +} + +func (t *SubsystemTestContext) GenerateCommonDomainReply(ctx context.Context, h *models.Host, clusterName, baseDomain string) { + fqdn := func(domainPrefix, clusterName, baseDomain string) *string { + return swag.String(fmt.Sprintf("%s.%s.%s", domainPrefix, clusterName, baseDomain)) + } + var domainResolutions = []*models.DomainResolutionResponseDomain{ + { + DomainName: fqdn(constants.APIClusterSubdomain, clusterName, baseDomain), + IPV4Addresses: []strfmt.IPv4{"1.2.3.4/24"}, + IPV6Addresses: []strfmt.IPv6{"1001:db8::10/120"}, + }, + { + DomainName: fqdn(constants.InternalAPIClusterSubdomain, clusterName, baseDomain), + IPV4Addresses: []strfmt.IPv4{"4.5.6.7/24"}, + IPV6Addresses: []strfmt.IPv6{"1002:db8::10/120"}, + }, + { + DomainName: fqdn(constants.AppsSubDomainNameHostDNSValidation+".apps", clusterName, baseDomain), + IPV4Addresses: []strfmt.IPv4{"7.8.9.10/24"}, + IPV6Addresses: []strfmt.IPv6{"1003:db8::10/120"}, + }, + { + DomainName: swag.String("quay.io"), + IPV4Addresses: []strfmt.IPv4{"7.8.9.11/24"}, + IPV6Addresses: []strfmt.IPv6{"1003:db8::11/120"}, + }, + { + DomainName: fqdn(constants.DNSWildcardFalseDomainName, clusterName, baseDomain), + IPV4Addresses: []strfmt.IPv4{}, + IPV6Addresses: []strfmt.IPv6{}, + }, + { + DomainName: fqdn(constants.DNSWildcardFalseDomainName, clusterName, baseDomain+"."), + IPV4Addresses: []strfmt.IPv4{}, + IPV6Addresses: []strfmt.IPv6{}, + }, + } + var domainResolutionResponse = models.DomainResolutionResponse{ + Resolutions: domainResolutions, + } + t.GenerateDomainNameResolutionReply(ctx, h, domainResolutionResponse) +} + +func (t *SubsystemTestContext) CompleteInstallation(clusterID strfmt.UUID) { + ctx := context.Background() + rep, err := t.AgentBMClient.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: clusterID}) + Expect(err).NotTo(HaveOccurred()) + + status := models.OperatorStatusAvailable + + Eventually(func() error { + _, err = t.AgentBMClient.Installer.V2UploadClusterIngressCert(ctx, &installer.V2UploadClusterIngressCertParams{ + ClusterID: clusterID, + IngressCertParams: models.IngressCertParams(IngressCa), + }) + return err + }, "10s", "2s").Should(BeNil()) + + for _, operator := range rep.Payload.MonitoredOperators { + if operator.OperatorType != models.OperatorTypeBuiltin { + continue + } + + t.V2ReportMonitoredOperatorStatus(ctx, clusterID, operator.Name, status, "") + } +} + +func (t *SubsystemTestContext) V2ReportMonitoredOperatorStatus(ctx context.Context, clusterID strfmt.UUID, opName string, opStatus models.OperatorStatus, opVersion string) { + _, err := t.AgentBMClient.Operators.V2ReportMonitoredOperatorStatus(ctx, &operatorsClient.V2ReportMonitoredOperatorStatusParams{ + ClusterID: clusterID, + ReportParams: &models.OperatorMonitorReport{ + Name: opName, + Status: opStatus, + StatusInfo: string(opStatus), + Version: opVersion, + }, + }) + Expect(err).NotTo(HaveOccurred()) +} + +func (t *SubsystemTestContext) UpdateProgress(hostID strfmt.UUID, infraEnvID strfmt.UUID, current_step models.HostStage) { + t.UpdateHostProgressWithInfo(hostID, infraEnvID, current_step, "") +} + +func (t *SubsystemTestContext) UpdateHostProgressWithInfo(hostID strfmt.UUID, infraEnvID strfmt.UUID, current_step models.HostStage, info string) { + ctx := context.Background() + + installProgress := &models.HostProgress{ + CurrentStage: current_step, + ProgressInfo: info, + } + updateReply, err := t.AgentBMClient.Installer.V2UpdateHostInstallProgress(ctx, &installer.V2UpdateHostInstallProgressParams{ + InfraEnvID: infraEnvID, + HostProgress: installProgress, + HostID: hostID, + }) + Expect(err).ShouldNot(HaveOccurred()) + Expect(updateReply).Should(BeAssignableToTypeOf(installer.NewV2UpdateHostInstallProgressOK())) +} + +func (t *SubsystemTestContext) GenerateApiVipPostStepReply(ctx context.Context, h *models.Host, cluster *models.Cluster, success bool) { + checkVipApiResponse := models.APIVipConnectivityResponse{ + IsSuccess: success, + } + if cluster != nil && swag.StringValue(cluster.Status) == models.ClusterStatusAddingHosts { + checkVipApiResponse.Ignition = `{ + "ignition": { + "config": {}, + "version": "3.2.0" + }, + "storage": { + "files": [] + } + }` + } + bytes, jsonErr := json.Marshal(checkVipApiResponse) + Expect(jsonErr).NotTo(HaveOccurred()) + _, err := t.AgentBMClient.Installer.V2PostStepReply(ctx, &installer.V2PostStepReplyParams{ + InfraEnvID: h.InfraEnvID, + HostID: *h.ID, + Reply: &models.StepReply{ + ExitCode: 0, + StepType: models.StepTypeAPIVipConnectivityCheck, + Output: string(bytes), + StepID: "apivip-connectivity-check-step", + }, + }) + Expect(err).ShouldNot(HaveOccurred()) +} + +func (t *SubsystemTestContext) GetDefaultNutanixInventory(cidr string) *models.Inventory { + nutanixInventory := *GetDefaultInventory(cidr) + nutanixInventory.SystemVendor = &models.SystemVendor{Manufacturer: "Nutanix", ProductName: "AHV", Virtual: true, SerialNumber: "3534"} + nutanixInventory.Disks = []*models.Disk{&Vma, &Vmremovable} + return &nutanixInventory +} + +func (t *SubsystemTestContext) GetDefaultExternalInventory(cidr string) *models.Inventory { + externalInventory := *GetDefaultInventory(cidr) + externalInventory.SystemVendor = &models.SystemVendor{Manufacturer: "OracleCloud.com", ProductName: "OCI", Virtual: true, SerialNumber: "3534"} + externalInventory.Disks = []*models.Disk{&Vma, &Vmremovable} + return &externalInventory +} + +func (t *SubsystemTestContext) BindHost(infraEnvID, hostID, clusterID strfmt.UUID) *models.Host { + host, err := t.UserBMClient.Installer.BindHost(context.Background(), &installer.BindHostParams{ + HostID: hostID, + InfraEnvID: infraEnvID, + BindHostParams: &models.BindHostParams{ + ClusterID: &clusterID, + }, + }) + Expect(err).NotTo(HaveOccurred()) + return host.GetPayload() +} + +func (t *SubsystemTestContext) UnbindHost(infraEnvID, hostID strfmt.UUID) *models.Host { + host, err := t.UserBMClient.Installer.UnbindHost(context.Background(), &installer.UnbindHostParams{ + HostID: hostID, + InfraEnvID: infraEnvID, + }) + Expect(err).NotTo(HaveOccurred()) + return host.GetPayload() +} + +func (t *SubsystemTestContext) GetHostV2(infraEnvID, hostID strfmt.UUID) *models.Host { + host, err := t.UserBMClient.Installer.V2GetHost(context.Background(), &installer.V2GetHostParams{ + InfraEnvID: infraEnvID, + HostID: hostID, + }) + Expect(err).NotTo(HaveOccurred()) + return host.GetPayload() +} + +func (t *SubsystemTestContext) GetCluster(clusterID strfmt.UUID) *models.Cluster { + cluster, err := t.UserBMClient.Installer.V2GetCluster(context.Background(), &installer.V2GetClusterParams{ + ClusterID: clusterID, + }) + Expect(err).NotTo(HaveOccurred()) + return cluster.GetPayload() +} + +func (t *SubsystemTestContext) GetCommonCluster(ctx context.Context, clusterID strfmt.UUID) *common.Cluster { + var cluster common.Cluster + err := t.db.First(&cluster, "id = ?", clusterID).Error + Expect(err).ShouldNot(HaveOccurred()) + return &cluster +} + +func (t *SubsystemTestContext) WaitForLastInstallationCompletionStatus(clusterID strfmt.UUID, status string) { + waitFunc := func(ctx context.Context) (bool, error) { + c := t.GetCommonCluster(ctx, clusterID) + return c.LastInstallationPreparation.Status == status, nil + } + err := wait.PollUntilContextTimeout(context.Background(), t.pollDefaultInterval, t.pollDefaultTimeout, false, waitFunc) + Expect(err).NotTo(HaveOccurred()) +} + +func (t *SubsystemTestContext) GetNextSteps(infraEnvID, hostID strfmt.UUID) models.Steps { + steps, err := t.AgentBMClient.Installer.V2GetNextSteps(context.Background(), &installer.V2GetNextStepsParams{ + InfraEnvID: infraEnvID, + HostID: hostID, + DiscoveryAgentVersion: swag.String("quay.io/edge-infrastructure/assisted-installer-agent:latest"), + }) + Expect(err).NotTo(HaveOccurred()) + return *steps.GetPayload() +} + +func (t *SubsystemTestContext) UpdateClusterLogProgress(clusterID strfmt.UUID, progress models.LogsState) { + ctx := context.Background() + + updateReply, err := t.AgentBMClient.Installer.V2UpdateClusterLogsProgress(ctx, &installer.V2UpdateClusterLogsProgressParams{ + ClusterID: clusterID, + LogsProgressParams: &models.LogsProgressParams{ + LogsState: common.LogStatePtr(progress), + }, + }) + Expect(err).ShouldNot(HaveOccurred()) + Expect(updateReply).Should(BeAssignableToTypeOf(installer.NewV2UpdateClusterLogsProgressNoContent())) +} + +func (t *SubsystemTestContext) UpdateHostLogProgress(infraEnvID strfmt.UUID, hostID strfmt.UUID, progress models.LogsState) { + ctx := context.Background() + + updateReply, err := t.AgentBMClient.Installer.V2UpdateHostLogsProgress(ctx, &installer.V2UpdateHostLogsProgressParams{ + InfraEnvID: infraEnvID, + HostID: hostID, + LogsProgressParams: &models.LogsProgressParams{ + LogsState: common.LogStatePtr(progress), + }, + }) + Expect(err).ShouldNot(HaveOccurred()) + Expect(updateReply).Should(BeAssignableToTypeOf(installer.NewV2UpdateHostLogsProgressNoContent())) +} + +func (t *SubsystemTestContext) GenerateConnectivityCheckPostStepReply(ctx context.Context, h *models.Host, targetCIDR string, success bool) { + targetIP, _, err := net.ParseCIDR(targetCIDR) + Expect(err).NotTo(HaveOccurred()) + response := models.ConnectivityReport{ + RemoteHosts: []*models.ConnectivityRemoteHost{ + {L3Connectivity: []*models.L3Connectivity{{RemoteIPAddress: targetIP.String(), Successful: success}}}, + }, + } + bytes, err := json.Marshal(&response) + Expect(err).NotTo(HaveOccurred()) + _, err = t.AgentBMClient.Installer.V2PostStepReply(ctx, &installer.V2PostStepReplyParams{ + InfraEnvID: h.InfraEnvID, + HostID: *h.ID, + Reply: &models.StepReply{ + ExitCode: 0, + Output: string(bytes), + StepID: string(models.StepTypeConnectivityCheck), + StepType: models.StepTypeConnectivityCheck, + }, + }) + Expect(err).ShouldNot(HaveOccurred()) +} + +func (t *SubsystemTestContext) GenerateTangPostStepReply(ctx context.Context, success bool, hosts ...*models.Host) { + response := models.TangConnectivityResponse{ + IsSuccess: false, + TangServerResponse: nil, + } + + if success { + tangResponse := getTangResponse("http://tang.example.com:7500") + response = models.TangConnectivityResponse{ + IsSuccess: true, + TangServerResponse: []*models.TangServerResponse{&tangResponse}, + } + } + + bytes, err := json.Marshal(&response) + Expect(err).NotTo(HaveOccurred()) + + for _, h := range hosts { + _, err = t.AgentBMClient.Installer.V2PostStepReply(ctx, &installer.V2PostStepReplyParams{ + InfraEnvID: h.InfraEnvID, + HostID: *h.ID, + Reply: &models.StepReply{ + ExitCode: 0, + Output: string(bytes), + StepID: string(models.StepTypeTangConnectivityCheck), + StepType: models.StepTypeTangConnectivityCheck, + }, + }) + Expect(err).ShouldNot(HaveOccurred()) + } +} + +func (t *SubsystemTestContext) GenerateFailedDiskSpeedResponses(ctx context.Context, path string, hosts ...*models.Host) { + for _, h := range hosts { + t.GenerateDiskSpeedChekResponse(ctx, h, path, -1) + } +} + +func (t *SubsystemTestContext) UpdateVipParams(ctx context.Context, clusterID strfmt.UUID) { + apiVip := "1.2.3.5" + ingressVip := "1.2.3.6" + _, err := t.UserBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ + ClusterUpdateParams: &models.V2ClusterUpdateParams{ + VipDhcpAllocation: swag.Bool(false), + APIVips: []*models.APIVip{{IP: models.IP(apiVip), ClusterID: clusterID}}, + IngressVips: []*models.IngressVip{{IP: models.IP(ingressVip), ClusterID: clusterID}}, + }, + ClusterID: clusterID, + }) + Expect(err).ShouldNot(HaveOccurred()) +} + +func (t *SubsystemTestContext) V2UpdateVipParams(ctx context.Context, clusterID strfmt.UUID) { + apiVip := "1.2.3.5" + ingressVip := "1.2.3.6" + _, err := t.UserBMClient.Installer.V2UpdateCluster(ctx, &installer.V2UpdateClusterParams{ + ClusterUpdateParams: &models.V2ClusterUpdateParams{ + VipDhcpAllocation: swag.Bool(false), + APIVips: []*models.APIVip{{IP: models.IP(apiVip), ClusterID: clusterID}}, + IngressVips: []*models.IngressVip{{IP: models.IP(ingressVip), ClusterID: clusterID}}, + }, + ClusterID: clusterID, + }) + Expect(err).ShouldNot(HaveOccurred()) +} + +func (t *SubsystemTestContext) Register3nodes(ctx context.Context, clusterID, infraenvID strfmt.UUID, cidr string) ([]*models.Host, []string) { + ips := hostutil.GenerateIPv4Addresses(3, cidr) + h1 := t.RegisterNode(ctx, infraenvID, "h1", ips[0]) + h2 := t.RegisterNode(ctx, infraenvID, "h2", ips[1]) + h3 := t.RegisterNode(ctx, infraenvID, "h3", ips[2]) + t.UpdateVipParams(ctx, clusterID) + t.GenerateFullMeshConnectivity(ctx, ips[0], h1, h2, h3) + + return []*models.Host{h1, h2, h3}, ips +} + +func (t *SubsystemTestContext) GetDefaultVmwareInventory(cidr string) *models.Inventory { + vmwareInventory := *GetDefaultInventory(cidr) + vmwareInventory.SystemVendor = &models.SystemVendor{Manufacturer: "VMware, Inc.", ProductName: "VMware Virtual", Virtual: true, SerialNumber: "3534"} + vmwareInventory.Disks = []*models.Disk{&Vma, &Vmremovable} + return &vmwareInventory +} + +func (t *SubsystemTestContext) RegisterCluster(ctx context.Context, client *client.AssistedInstall, clusterName string, pullSecret string) (strfmt.UUID, error) { + var cluster, err = client.Installer.V2RegisterCluster(ctx, &installer.V2RegisterClusterParams{ + NewClusterParams: &models.ClusterCreateParams{ + Name: swag.String(clusterName), + OpenshiftVersion: swag.String(t.vipAutoAllocOpenshiftVersion), + PullSecret: swag.String(pullSecret), + BaseDNSDomain: "example.com", + VipDhcpAllocation: swag.Bool(true), + }, + }) + if err != nil { + return "", err + } + return *cluster.GetPayload().ID, nil +} + +func (t *SubsystemTestContext) DeregisterResources() { + var multiErr *multierror.Error + + reply, err := t.UserBMClient.Installer.V2ListClusters(context.Background(), &installer.V2ListClustersParams{}) + if err != nil { + t.log.WithError(err).Error("Failed to list clusters") + return + } + + if GinkgoT().Failed() { + // Dump cluster info on failure + multiErr = multierror.Append(multiErr, GinkgoResourceLogger(models.ClusterKindCluster, reply.Payload)) + } + + infraEnvReply, err := t.UserBMClient.Installer.ListInfraEnvs(context.Background(), &installer.ListInfraEnvsParams{}) + if err != nil { + t.log.WithError(err).Error("Failed to list infra-envs") + } + + if GinkgoT().Failed() { + // Dump infar-env info on failure + multiErr = multierror.Append(multiErr, GinkgoResourceLogger(models.InfraEnvKindInfraEnv, infraEnvReply.Payload)) + } + + for _, i := range infraEnvReply.GetPayload() { + if GinkgoT().Failed() { + hostReply, err1 := t.UserBMClient.Installer.V2ListHosts(context.Background(), &installer.V2ListHostsParams{InfraEnvID: *i.ID}) + if err1 != nil { + t.log.WithError(err).Errorf("Failed to list infra-env %s (%s) hosts", i.ID, *i.Name) + } + // Dump host info on failure + multiErr = multierror.Append(multiErr, GinkgoResourceLogger(models.HostKindHost, hostReply.Payload)) + } + if _, err = t.UserBMClient.Installer.DeregisterInfraEnv(context.Background(), &installer.DeregisterInfraEnvParams{InfraEnvID: *i.ID}); err != nil { + t.log.WithError(err).Debugf("InfraEnv %s couldn't be deleted via REST API", i.ID) + } + } + + for _, c := range reply.GetPayload() { + if _, err = t.UserBMClient.Installer.V2DeregisterCluster(context.Background(), &installer.V2DeregisterClusterParams{ClusterID: *c.ID}); err != nil { + t.log.WithError(err).Debugf("Cluster %s couldn't be deleted via REST API", *c.ID) + } + } + + if multiErr.ErrorOrNil() != nil { + t.log.WithError(err).Error("At-least one error occured during deregister cleanup") + } +} + +func (t *SubsystemTestContext) ClearDB() { + // Clean the DB to make sure we start tests from scratch + for _, model := range []interface{}{ + &models.Host{}, + &models.Cluster{}, + &models.InfraEnv{}, + &models.Event{}, + &models.MonitoredOperator{}, + &models.ClusterNetwork{}, + &models.ServiceNetwork{}, + &models.MachineNetwork{}, + } { + t.db.Session(&gorm.Session{AllowGlobalUpdate: true}).Unscoped().Delete(model) + } +} diff --git a/subsystem/utils_test/utils.go b/subsystem/utils_test/utils.go new file mode 100644 index 00000000000..42e7664c109 --- /dev/null +++ b/subsystem/utils_test/utils.go @@ -0,0 +1,245 @@ +package utils_test + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/alecthomas/units" + "github.com/go-openapi/strfmt" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/openshift/assisted-service/internal/common" + usageMgr "github.com/openshift/assisted-service/internal/usage" + "github.com/openshift/assisted-service/models" +) + +const ( + PullSecretName = "pull-secret" + validDiskSize = int64(128849018880) + loop0Id = "wwn-0x1111111111111111111111" + SdbId = "wwn-0x2222222222222222222222" + DefaultCIDRv6 = "1001:db8::10/120" + DefaultCIDRv4 = "1.2.3.10/24" + SshPublicKey = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC50TuHS7aYci+U+5PLe/aW/I6maBi9PBDucLje6C6gtArfjy7udWA1DCSIQd+DkHhi57/s+PmvEjzfAfzqo+L+/8/O2l2seR1pPhHDxMR/rSyo/6rZP6KIL8HwFqXHHpDUM4tLXdgwKAe1LxBevLt/yNl8kOiHJESUSl+2QSf8z4SIbo/frDD8OwOvtfKBEG4WCb8zEsEuIPNF/Vo/UxPtS9pPTecEsWKDHR67yFjjamoyLvAzMAJotYgyMoxm8PTyCgEzHk3s3S4iO956d6KVOEJVXnTVhAxrtLuubjskd7N4hVN7h2s4Z584wYLKYhrIBL0EViihOMzY4mH3YE4KZusfIx6oMcggKX9b3NHm0la7cj2zg0r6zjUn6ZCP4gXM99e5q4auc0OEfoSfQwofGi3WmxkG3tEozCB8Zz0wGbi2CzR8zlcF+BNV5I2LESlLzjPY5B4dvv5zjxsYoz94p3rUhKnnPM2zTx1kkilDK5C5fC1k9l/I/r5Qk4ebLQU= oscohen@localhost.localdomain" + IngressCa = "-----BEGIN CERTIFICATE-----\nMIIDozCCAougAwIBAgIULCOqWTF" + + "aEA8gNEmV+rb7h1v0r3EwDQYJKoZIhvcNAQELBQAwYTELMAkGA1UEBhMCaXMxCzAJBgNVBAgMAmRk" + + "MQswCQYDVQQHDAJkZDELMAkGA1UECgwCZGQxCzAJBgNVBAsMAmRkMQswCQYDVQQDDAJkZDERMA8GCSqGSIb3DQEJARYCZGQwHhcNMjAwNTI1MTYwNTAwWhcNMzA" + + "wNTIzMTYwNTAwWjBhMQswCQYDVQQGEwJpczELMAkGA1UECAwCZGQxCzAJBgNVBAcMAmRkMQswCQYDVQQKDAJkZDELMAkGA1UECwwCZGQxCzAJBgNVBAMMAmRkMREwDwYJKoZIh" + + "vcNAQkBFgJkZDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAML63CXkBb+lvrJKfdfYBHLDYfuaC6exCSqASUAosJWWrfyDiDMUbmfs06PLKyv7N8efDhza74ov0EQJ" + + "NRhMNaCE+A0ceq6ZXmmMswUYFdLAy8K2VMz5mroBFX8sj5PWVr6rDJ2ckBaFKWBB8NFmiK7MTWSIF9n8M107/9a0QURCvThUYu+sguzbsLODFtXUxG5rtTVKBVcPZvEfRky2Tkt4AySFS" + + "mkO6Kf4sBd7MC4mKWZm7K8k7HrZYz2usSpbrEtYGtr6MmN9hci+/ITDPE291DFkzIcDCF493v/3T+7XsnmQajh6kuI+bjIaACfo8N+twEoJf/N1PmphAQdEiC0CAwEAAaNTMFEwHQYDVR0O" + + "BBYEFNvmSprQQ2HUUtPxs6UOuxq9lKKpMB8GA1UdIwQYMBaAFNvmSprQQ2HUUtPxs6UOuxq9lKKpMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAJEWxnxtQV5IqPVRr2SM" + + "WNNxcJ7A/wyet39l5VhHjbrQGynk5WS80psn/riLUfIvtzYMWC0IR0pIMQuMDF5sNcKp4D8Xnrd+Bl/4/Iy/iTOoHlw+sPkKv+NL2XR3iO8bSDwjtjvd6L5NkUuzsRoSkQCG2fHASqqgFoyV9Ld" + + "RsQa1w9ZGebtEWLuGsrJtR7gaFECqJnDbb0aPUMixmpMHID8kt154TrLhVFmMEqGGC1GvZVlQ9Of3GP9y7X4vDpHshdlWotOnYKHaeu2d5cRVFHhEbrslkISgh/TRuyl7VIpnjOYUwMBpCiVH6M" + + "2lyDI6UR3Fbz4pVVAxGXnVhBExjBE=\n-----END CERTIFICATE-----" + DefaultWaitForHostStateTimeout = 20 * time.Second + DefaultWaitForClusterStateTimeout = 40 * time.Second + DefaultWaitForMachineNetworkCIDRTimeout = 40 * time.Second +) + +var Options struct { + Namespace string `envconfig:"NAMESPACE" default:"assisted-installer"` + EnableKubeAPI bool `envconfig:"ENABLE_KUBE_API" default:"false"` +} + +var ( + TestContext *SubsystemTestContext + loop0 = models.Disk{ + ID: loop0Id, + ByID: loop0Id, + DriveType: "SSD", + Name: "loop0", + SizeBytes: validDiskSize, + } + + Sdb = models.Disk{ + ID: SdbId, + ByID: SdbId, + DriveType: "HDD", + Name: "sdb", + SizeBytes: validDiskSize, + } + + Vma = models.Disk{ + ID: SdbId, + ByID: SdbId, + DriveType: "HDD", + Name: "vma", + HasUUID: true, + Vendor: "VMware", + SizeBytes: validDiskSize, + } + + Vmremovable = models.Disk{ + ID: loop0Id, + ByID: loop0Id, + DriveType: "0DD", + Name: "sr0", + Removable: true, + SizeBytes: 106516480, + } + + ValidHwInfoV6 = &models.Inventory{ + CPU: &models.CPU{Count: 16}, + Memory: &models.Memory{PhysicalBytes: int64(32 * units.GiB), UsableBytes: int64(32 * units.GiB)}, + Disks: []*models.Disk{&loop0, &Sdb}, + Interfaces: []*models.Interface{ + { + IPV6Addresses: []string{ + DefaultCIDRv6, + }, + Type: "physical", + }, + }, + SystemVendor: &models.SystemVendor{Manufacturer: "manu", ProductName: "prod", SerialNumber: "3534"}, + Routes: common.TestDefaultRouteConfiguration, + } + ValidHwInfo = &models.Inventory{ + CPU: &models.CPU{Count: 16, Architecture: "x86_64"}, + Memory: &models.Memory{PhysicalBytes: int64(32 * units.GiB), UsableBytes: int64(32 * units.GiB)}, + Disks: []*models.Disk{&loop0, &Sdb}, + Interfaces: []*models.Interface{ + { + IPV4Addresses: []string{ + DefaultCIDRv4, + }, + MacAddress: "e6:53:3d:a7:77:b4", + Type: "physical", + }, + }, + SystemVendor: &models.SystemVendor{Manufacturer: "manu", ProductName: "prod", SerialNumber: "3534"}, + Routes: common.TestDefaultRouteConfiguration, + TpmVersion: models.InventoryTpmVersionNr20, + } + ValidFreeAddresses = models.FreeNetworksAddresses{ + { + Network: "1.2.3.0/24", + FreeAddresses: []strfmt.IPv4{ + "1.2.3.8", + "1.2.3.9", + "1.2.3.5", + "1.2.3.6", + "1.2.3.100", + "1.2.3.101", + "1.2.3.102", + "1.2.3.103", + }, + }, + } +) + +func GinkgoLogger(s string) { + _, _ = GinkgoWriter.Write([]byte(fmt.Sprintln(s))) +} + +func GinkgoResourceLogger(kind string, resources interface{}) error { + resList, err := json.MarshalIndent(resources, "", " ") + if err != nil { + return err + } + GinkgoLogger(fmt.Sprintf("The failed test '%s' created the following %s resources:", GinkgoT().Name(), kind)) + GinkgoLogger(string(resList)) + return nil +} + +func StrToUUID(s string) *strfmt.UUID { + u := strfmt.UUID(s) + return &u +} + +func IsJSON(s []byte) bool { + var js map[string]interface{} + return json.Unmarshal(s, &js) == nil + +} + +func getTangResponse(url string) models.TangServerResponse { + return models.TangServerResponse{ + TangURL: url, + Payload: "some_fake_payload", + Signatures: []*models.TangServerSignatures{ + { + Signature: "some_fake_signature1", + Protected: "foobar1", + }, + { + Signature: "some_fake_signature2", + Protected: "foobar2", + }, + }, + } +} + +func GetDefaultInventory(cidr string) *models.Inventory { + hwInfo := ValidHwInfo + hwInfo.Interfaces[0].IPV4Addresses = []string{cidr} + return hwInfo +} + +func GetDefaultNutanixInventory(cidr string) *models.Inventory { + nutanixInventory := *GetDefaultInventory(cidr) + nutanixInventory.SystemVendor = &models.SystemVendor{Manufacturer: "Nutanix", ProductName: "AHV", Virtual: true, SerialNumber: "3534"} + nutanixInventory.Disks = []*models.Disk{&Vma, &Vmremovable} + return &nutanixInventory +} + +func GetDefaultExternalInventory(cidr string) *models.Inventory { + externalInventory := *GetDefaultInventory(cidr) + externalInventory.SystemVendor = &models.SystemVendor{Manufacturer: "OracleCloud.com", ProductName: "OCI", Virtual: true, SerialNumber: "3534"} + externalInventory.Disks = []*models.Disk{&Vma, &Vmremovable} + return &externalInventory +} + +func GetDefaultVmwareInventory(cidr string) *models.Inventory { + vmwareInventory := *GetDefaultInventory(cidr) + vmwareInventory.SystemVendor = &models.SystemVendor{Manufacturer: "VMware, Inc.", ProductName: "VMware Virtual", Virtual: true, SerialNumber: "3534"} + vmwareInventory.Disks = []*models.Disk{&Vma, &Vmremovable} + return &vmwareInventory +} + +func IsStepTypeInList(steps models.Steps, sType models.StepType) bool { + for _, step := range steps.Instructions { + if step.StepType == sType { + return true + } + } + return false +} + +func AreStepsInList(steps models.Steps, stepTypes []models.StepType) { + for _, stepType := range stepTypes { + Expect(IsStepTypeInList(steps, stepType)).Should(BeTrue()) + } +} + +func GetStepFromListByStepType(steps models.Steps, sType models.StepType) *models.Step { + for _, step := range steps.Instructions { + if step.StepType == sType { + return step + } + } + return nil +} + +func VerifyUsageSet(featureUsage string, candidates ...models.Usage) { + usages := make(map[string]models.Usage) + err := json.Unmarshal([]byte(featureUsage), &usages) + Expect(err).NotTo(HaveOccurred()) + for _, usage := range candidates { + usage.ID = usageMgr.UsageNameToID(usage.Name) + Expect(usages[usage.Name]).To(Equal(usage)) + } +} + +func VerifyUsageNotSet(featureUsage string, features ...string) { + usages := make(map[string]*models.Usage) + err := json.Unmarshal([]byte(featureUsage), &usages) + Expect(err).NotTo(HaveOccurred()) + for _, name := range features { + Expect(usages[name]).To(BeNil()) + } +} + + + diff --git a/subsystem/utils_test/wiremock_stubs.go b/subsystem/utils_test/wiremock_stubs.go new file mode 100644 index 00000000000..3f9fe1afa84 --- /dev/null +++ b/subsystem/utils_test/wiremock_stubs.go @@ -0,0 +1,887 @@ +package utils_test + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "path/filepath" + "strconv" + "strings" + + "github.com/go-openapi/strfmt" + "github.com/openshift/assisted-service/internal/common" + "github.com/openshift/assisted-service/internal/releasesources" + "github.com/openshift/assisted-service/models" + "github.com/openshift/assisted-service/pkg/ocm" +) + +type StubDefinition struct { + Request *RequestDefinition `json:"request"` + Response *ResponseDefinition `json:"response"` +} + +type RequestDefinition struct { + URL string `json:"url"` + Method string `json:"method"` + BodyPatterns []map[string]string `json:"bodyPatterns"` + Headers map[string]string `json:"headers"` +} + +type ResponseDefinition struct { + Status int `json:"status"` + Body string `json:"body"` + Headers map[string]string `json:"headers"` +} + +type Mapping struct { + ID string +} + +type WireMock struct { + OCMHost string + TestToken string + ReleaseSources models.ReleaseSources +} + +type subscription struct { + ID strfmt.UUID `json:"id"` + Status string `json:"status"` +} + +const ( + wiremockMappingsPath string = "/__admin/mappings" + capabilityReviewPath string = "/api/authorizations/v1/capability_review" + accessReviewPath string = "/api/authorizations/v1/access_review" + pullAuthPath string = "/api/accounts_mgmt/v1/token_authorization" + clusterAuthzPath string = "/api/accounts_mgmt/v1/cluster_authorizations" + subscriptionPrefix string = "/api/accounts_mgmt/v1/subscriptions/" + accountsMgmtSearchPrefix string = "/api/accounts_mgmt/v1/accounts?search=username" + subscriptionUpdateOpenshiftClusterID string = "subscription_update_openshift_cluster_id" + subscriptionUpdateStatusActive string = "subscription_update_status_active" + subscriptionUpdateDisplayName string = "subscription_update_display_name" + subscriptionUpdateConsoleUrl string = "subscription_update_console_url" + tokenPath string = "/token" + fakePayloadUsername string = "jdoe123@example.com" + fakePayloadUsername2 string = "bob@example.com" + fakePayloadAdmin string = "admin@example.com" + fakePayloadUnallowedUser string = "unallowed@example.com" + fakePayloadClusterEditor string = "alice@example.com" + FakePS string = "dXNlcjpwYXNzd29yZAo=" + FakePS2 string = "dXNlcjI6cGFzc3dvcmQK" + FakePS3 string = "dXNlcjM6cGFzc3dvcmQ=" + FakeAdminPS string = "dXNlcjpwYXNzd29yZAy=" + WrongPullSecret string = "wrong_secret" + OrgId1 string = "1010101" + OrgId2 string = "2020202" + FakeSubscriptionID strfmt.UUID = "1h89fvtqeelulpo0fl5oddngj2ao7tt8" +) + +var ( + subscriptionPath string = filepath.Join(subscriptionPrefix, FakeSubscriptionID.String()) +) + +func (w *WireMock) CreateWiremockStubsForOCM() error { + if err := w.createStubsForAccessReview(); err != nil { + return err + } + + if err := w.createStubsForCapabilityReview(); err != nil { + return err + } + + if err := w.createStubsForClusterEditor(); err != nil { + return err + } + + if _, err := w.createStubTokenAuth(FakePS, fakePayloadUsername); err != nil { + return err + } + + if _, err := w.createStubTokenAuth(FakePS2, fakePayloadUsername2); err != nil { + return err + } + + if _, err := w.createStubTokenAuth(FakePS3, fakePayloadClusterEditor); err != nil { + return err + } + + if _, err := w.createStubTokenAuth(FakeAdminPS, fakePayloadAdmin); err != nil { + return err + } + + if _, err := w.createStubToken(w.TestToken); err != nil { + return err + } + + if err := w.createStubsForCreatingAMSSubscription(http.StatusOK); err != nil { + return err + } + + if err := w.createStubsForGettingAMSSubscription(http.StatusOK, ocm.SubscriptionStatusReserved); err != nil { + return err + } + + if err := w.createStubsForUpdatingAMSSubscription(http.StatusOK, subscriptionUpdateDisplayName); err != nil { + return err + } + + if err := w.createStubsForUpdatingAMSSubscription(http.StatusOK, subscriptionUpdateConsoleUrl); err != nil { + return err + } + + if err := w.createStubsForUpdatingAMSSubscription(http.StatusOK, subscriptionUpdateOpenshiftClusterID); err != nil { + return err + } + + if err := w.createStubsForUpdatingAMSSubscription(http.StatusOK, subscriptionUpdateStatusActive); err != nil { + return err + } + + if err := w.createStubsForDeletingAMSSubscription(http.StatusOK); err != nil { + return err + } + + if _, err := w.createOpenshiftUpdateServiceStubs(); err != nil { + return err + } + + return nil +} + +func (w *WireMock) createStubsForClusterEditor() error { + if _, err := w.createStubClusterEditorRequest(fakePayloadUsername, + FakeSubscriptionID.String(), "update", false); err != nil { + return err + } + if _, err := w.createStubClusterEditorRequest(fakePayloadUsername, + FakeSubscriptionID.String(), "delete", false); err != nil { + return err + } + if _, err := w.createStubClusterEditorRequest(fakePayloadUsername2, + FakeSubscriptionID.String(), "update", false); err != nil { + return err + } + if _, err := w.createStubClusterEditorRequest(fakePayloadUsername2, + FakeSubscriptionID.String(), "delete", false); err != nil { + return err + } + if _, err := w.createStubClusterEditorRequest(fakePayloadClusterEditor, + FakeSubscriptionID.String(), "update", true); err != nil { + return err + } + if _, err := w.createStubClusterEditorRequest(fakePayloadClusterEditor, + FakeSubscriptionID.String(), "delete", true); err != nil { + return err + } + return nil +} + +func (w *WireMock) createStubsForAccessReview() error { + if _, err := w.createStubAccessReview(fakePayloadUsername, true); err != nil { + return err + } + if _, err := w.createStubAccessReview(fakePayloadUsername2, true); err != nil { + return err + } + if _, err := w.createStubAccessReview(fakePayloadClusterEditor, true); err != nil { + return err + } + return nil +} + +func (w *WireMock) createStubsForCapabilityReview() error { + if _, err := w.createStubBareMetalCapabilityReview(fakePayloadUsername, false); err != nil { + return err + } + if _, err := w.createStubBareMetalCapabilityReview(fakePayloadUsername2, false); err != nil { + return err + } + if _, err := w.createStubBareMetalCapabilityReview(fakePayloadClusterEditor, false); err != nil { + return err + } + if _, err := w.createStubMultiarchCapabilityReview(fakePayloadUsername, OrgId1, false); err != nil { + return err + } + if _, err := w.createStubMultiarchCapabilityReview(fakePayloadUsername2, OrgId2, true); err != nil { + return err + } + if _, err := w.createStubIgnoreValidationsCapabilityReview(fakePayloadUsername, OrgId1, false); err != nil { + return err + } + if _, err := w.createStubIgnoreValidationsCapabilityReview(fakePayloadUsername2, OrgId2, true); err != nil { + return err + } + if _, err := w.createStubAccountsMgmt(fakePayloadUsername, OrgId1); err != nil { + return err + } + if _, err := w.createStubAccountsMgmt(fakePayloadUsername2, OrgId2); err != nil { + return err + } + return nil +} + +func (w *WireMock) createStubsForCreatingAMSSubscription(resStatus int) error { + + type reservedResource struct{} + + type clusterAuthorizationRequest struct { + AccountUsername string `json:"account_username"` + ProductCategory string `json:"product_category"` + ProductID string `json:"product_id"` + ClusterID string `json:"cluster_id"` + Managed bool `json:"managed"` + Resources []*reservedResource `json:"resources"` + Reserve bool `json:"reserve"` + DisplayName string `json:"display_name"` + } + + type clusterAuthorizationResponse struct { + Subscription subscription `json:"subscription"` + } + + caRequest := clusterAuthorizationRequest{ + AccountUsername: "${json-unit.any-string}", + ProductCategory: ocm.ProductCategoryAssistedInstall, + ProductID: ocm.ProductIdOCP, + ClusterID: "${json-unit.any-string}", + Managed: false, + Resources: []*reservedResource{}, + Reserve: true, + DisplayName: "${json-unit.any-string}", + } + + caResponse := clusterAuthorizationResponse{ + Subscription: subscription{ID: FakeSubscriptionID}, + } + + var reqBody []byte + reqBody, err := json.Marshal(caRequest) + if err != nil { + return err + } + + var resBody []byte + resBody, err = json.Marshal(caResponse) + if err != nil { + return err + } + + amsSubscriptionStub := w.createStubDefinition(clusterAuthzPath, "POST", string(reqBody), string(resBody), resStatus) + _, err = w.addStub(amsSubscriptionStub) + return err +} + +func (w *WireMock) createStubsForGettingAMSSubscription(resStatus int, status string) error { + + subResponse := subscription{ + ID: FakeSubscriptionID, + Status: status, + } + + var resBody []byte + resBody, err := json.Marshal(subResponse) + if err != nil { + return err + } + + amsSubscriptionStub := w.createStubDefinition(subscriptionPath, "GET", "", string(resBody), resStatus) + _, err = w.addStub(amsSubscriptionStub) + return err +} + +func (w *WireMock) createStubsForUpdatingAMSSubscription(resStatus int, updateType string) error { + + switch updateType { + + case subscriptionUpdateDisplayName: + + type subscriptionUpdateRequest struct { + DisplayName string `json:"display_name"` + } + + subRequest := subscriptionUpdateRequest{ + DisplayName: "${json-unit.any-string}", + } + + subResponse := subscription{ + ID: FakeSubscriptionID, + } + + var reqBody []byte + reqBody, err := json.Marshal(subRequest) + if err != nil { + return err + } + + var resBody []byte + resBody, err = json.Marshal(subResponse) + if err != nil { + return err + } + + amsSubscriptionStub := w.createStubDefinition(subscriptionPath, "PATCH", string(reqBody), string(resBody), resStatus) + _, err = w.addStub(amsSubscriptionStub) + return err + + case subscriptionUpdateConsoleUrl: + + type subscriptionUpdateRequest struct { + ConsoleUrl string `json:"console_url"` + } + + subRequest := subscriptionUpdateRequest{ + ConsoleUrl: "${json-unit.any-string}", + } + + subResponse := subscription{ + ID: FakeSubscriptionID, + } + + var reqBody []byte + reqBody, err := json.Marshal(subRequest) + if err != nil { + return err + } + + var resBody []byte + resBody, err = json.Marshal(subResponse) + if err != nil { + return err + } + + amsSubscriptionStub := w.createStubDefinition(subscriptionPath, "PATCH", string(reqBody), string(resBody), resStatus) + _, err = w.addStub(amsSubscriptionStub) + return err + + case subscriptionUpdateOpenshiftClusterID: + + type subscriptionUpdateRequest struct { + ExternalClusterID strfmt.UUID `json:"external_cluster_id"` + } + + subRequest := subscriptionUpdateRequest{ + ExternalClusterID: "${json-unit.any-string}", + } + + subResponse := subscription{ + ID: FakeSubscriptionID, + } + + var reqBody []byte + reqBody, err := json.Marshal(subRequest) + if err != nil { + return err + } + + var resBody []byte + resBody, err = json.Marshal(subResponse) + if err != nil { + return err + } + + amsSubscriptionStub := w.createStubDefinition(subscriptionPath, "PATCH", string(reqBody), string(resBody), resStatus) + _, err = w.addStub(amsSubscriptionStub) + return err + + case subscriptionUpdateStatusActive: + + type subscriptionUpdateRequest struct { + Status string `json:"status"` + } + + subRequest := subscriptionUpdateRequest{ + Status: ocm.SubscriptionStatusActive, + } + + subResponse := subscription{ + ID: FakeSubscriptionID, + } + + var reqBody []byte + reqBody, err := json.Marshal(subRequest) + if err != nil { + return err + } + + var resBody []byte + resBody, err = json.Marshal(subResponse) + if err != nil { + return err + } + + amsSubscriptionStub := w.createStubDefinition(subscriptionPath, "PATCH", string(reqBody), string(resBody), resStatus) + _, err = w.addStub(amsSubscriptionStub) + return err + + default: + + return errors.New("Invalid updateType arg") + } +} + +func (w *WireMock) createStubsForDeletingAMSSubscription(resStatus int) error { + + amsSubscriptionStub := w.createStubDefinition(subscriptionPath, "DELETE", "", "", resStatus) + _, err := w.addStub(amsSubscriptionStub) + return err +} + +func (w *WireMock) createStubToken(testToken string) (string, error) { + type TokenResponse struct { + AccessToken string `json:"access_token,omitempty"` + Error string `json:"error,omitempty"` + ErrorDescription string `json:"error_description,omitempty"` + RefreshToken string `json:"refresh_token,omitempty"` + TokenType string `json:"token_type,omitempty"` + } + tokenResponse := TokenResponse{ + AccessToken: testToken, + RefreshToken: testToken, + TokenType: "bearer", + } + + var resBody []byte + resBody, err := json.Marshal(tokenResponse) + if err != nil { + return "", err + } + + tokenStub := &StubDefinition{ + Request: &RequestDefinition{ + URL: tokenPath, + Method: "POST", + }, + Response: &ResponseDefinition{ + Status: 200, + Body: string(resBody), + Headers: map[string]string{ + "Content-Type": "application/json", + }, + }, + } + + return w.addStub(tokenStub) +} + +func (w *WireMock) createStubBareMetalCapabilityReview(username string, result bool) (string, error) { + type CapabilityRequest struct { + Name string `json:"capability"` + Type string `json:"type"` + Username string `json:"account_username"` + } + + type CapabilityResponse struct { + Result string `json:"result"` + } + + capabilityRequest := CapabilityRequest{ + Name: ocm.BareMetalCapabilityName, + Type: ocm.AccountCapabilityType, + Username: username, + } + + capabilityResponse := CapabilityResponse{ + Result: strconv.FormatBool(result), + } + + return w.addCapabilityReviewStub(capabilityRequest, capabilityResponse) +} + +func (w *WireMock) createStubMultiarchCapabilityReview(username string, orgId string, result bool) (string, error) { + type CapabilityRequest struct { + Name string `json:"capability"` + Type string `json:"type"` + Username string `json:"account_username"` + Org string `json:"organization_id"` + } + + type CapabilityResponse struct { + Result string `json:"result"` + } + + capabilityRequest := CapabilityRequest{ + Name: ocm.MultiarchCapabilityName, + Type: ocm.OrganizationCapabilityType, + Username: username, + Org: orgId, + } + + capabilityResponse := CapabilityResponse{ + Result: strconv.FormatBool(result), + } + return w.addCapabilityReviewStub(capabilityRequest, capabilityResponse) +} + +func (w *WireMock) createStubIgnoreValidationsCapabilityReview(username string, orgId string, result bool) (string, error) { + type CapabilityRequest struct { + Name string `json:"capability"` + Type string `json:"type"` + Username string `json:"account_username"` + Org string `json:"organization_id"` + } + + type CapabilityResponse struct { + Result string `json:"result"` + } + + capabilityRequest := CapabilityRequest{ + Name: ocm.IgnoreValidationsCapabilityName, + Type: ocm.OrganizationCapabilityType, + Username: username, + Org: orgId, + } + + capabilityResponse := CapabilityResponse{ + Result: strconv.FormatBool(result), + } + return w.addCapabilityReviewStub(capabilityRequest, capabilityResponse) +} + +func (w *WireMock) addCapabilityReviewStub(capabilityRequest interface{}, capabilityResponse interface{}) (string, error) { + var reqBody []byte + reqBody, err := json.Marshal(capabilityRequest) + if err != nil { + return "", err + } + + var resBody []byte + resBody, err = json.Marshal(capabilityResponse) + if err != nil { + return "", err + } + + capabilityReviewStub := w.createStubDefinition(capabilityReviewPath, "POST", string(reqBody), string(resBody), 200) + return w.addStub(capabilityReviewStub) +} + +func (w *WireMock) createStubAccountsMgmt(username string, orgId string) (string, error) { + type Organization struct { + ID string `json:"id"` + } + + type Account struct { + Username string `json:"username"` + Email string `json:"email"` + Organization Organization `json:"organization"` + } + + type AccountsListResponse struct { + Items []*Account `json:"items"` + } + + account := Account{ + Email: username, + Username: username, + Organization: Organization{ + ID: orgId, + }, + } + + res := AccountsListResponse{ + Items: []*Account{ + &account, + }, + } + + var resBody []byte + resBody, err := json.Marshal(res) + if err != nil { + return "", err + } + + accountsMgmtSearchPath := strings.Join([]string{accountsMgmtSearchPrefix, url.QueryEscape(fmt.Sprintf("='%s'", username))}, "") + accountsMgmtSearchStub := w.createStubDefinition(accountsMgmtSearchPath, + "GET", "", string(resBody), 200) + + return w.addStub(accountsMgmtSearchStub) +} + +func (w *WireMock) createStubClusterEditorRequest(username string, subscriptionId string, action string, allowed bool) (string, error) { + type AccessRequest struct { + ResourceType string `json:"resource_type"` + Action string `json:"action"` + Username string `json:"account_username"` + SubscriptionId string `json:"subscription_id"` + } + + type AccessResponse struct { + Allowed bool `json:"allowed"` + } + + accessRequest := AccessRequest{ + Username: username, + Action: action, + ResourceType: "Subscription", + SubscriptionId: subscriptionId, + } + + accessResponse := AccessResponse{ + Allowed: allowed, + } + + var reqBody []byte + reqBody, err := json.Marshal(accessRequest) + if err != nil { + return "", err + } + + var resBody []byte + resBody, err = json.Marshal(accessResponse) + if err != nil { + return "", err + } + + accessReviewStub := w.createStubDefinition(accessReviewPath, "POST", string(reqBody), string(resBody), 200) + return w.addStub(accessReviewStub) +} + +func (w *WireMock) createStubAccessReview(username string, allowed bool) (string, error) { + type CapabilityRequest struct { + ResourceType string `json:"resource_type"` + Action string `json:"action"` + Username string `json:"account_username"` + } + + type CapabilityResponse struct { + Allowed bool `json:"allowed"` + } + + capabilityRequest := CapabilityRequest{ + Username: username, + Action: ocm.AMSActionCreate, + ResourceType: ocm.BareMetalClusterResource, + } + + capabilityResponse := CapabilityResponse{ + Allowed: allowed, + } + + var reqBody []byte + reqBody, err := json.Marshal(capabilityRequest) + if err != nil { + return "", err + } + + var resBody []byte + resBody, err = json.Marshal(capabilityResponse) + if err != nil { + return "", err + } + + capabilityReviewStub := w.createStubDefinition(accessReviewPath, "POST", string(reqBody), string(resBody), 200) + return w.addStub(capabilityReviewStub) +} + +func (w *WireMock) createStubTokenAuth(token, username string) (string, error) { + type TokenAuthorizationRequest struct { + AuthorizationToken string `json:"authorization_token"` + } + + type Account struct { + FirstName string `json:"first_name"` + LastName string `json:"last_name"` + Username string `json:"username"` + Email string `json:"email"` + } + + type TokenAuthorizationResponse struct { + Account Account `json:"account"` + } + + tokenAuthorizationRequest := TokenAuthorizationRequest{ + AuthorizationToken: token, + } + + tokenAuthorizationResponse := TokenAuthorizationResponse{ + Account: Account{ + FirstName: "UserFirstName", + LastName: "UserLastName", + Username: username, + Email: "user@myorg.com", + }, + } + + var reqBody []byte + reqBody, err := json.Marshal(tokenAuthorizationRequest) + if err != nil { + return "", err + } + + var resBody []byte + resBody, err = json.Marshal(tokenAuthorizationResponse) + if err != nil { + return "", err + } + + tokenAuthStub := w.createStubDefinition(pullAuthPath, "POST", string(reqBody), string(resBody), 200) + return w.addStub(tokenAuthStub) +} + +func (w *WireMock) createWrongStubTokenAuth(token string) (string, error) { + type TokenAuthorizationRequest struct { + AuthorizationToken string `json:"authorization_token"` + } + + tokenAuthorizationRequest := TokenAuthorizationRequest{ + AuthorizationToken: token, + } + + type ErrorResponse struct { + Code string `json:"code"` + Href string `json:"href"` + ID string `json:"id"` + Kind string `json:"kind"` + OperationID string `json:"operation_id"` + Reason string `json:"reason"` + } + + errorResponse := ErrorResponse{ + Code: "ACCT-MGMT-7", + Href: "/api/accounts_mgmt/v1/errors/7", + ID: "7", + Kind: "Error", + OperationID: "op_id", + Reason: "Unable to find credential with specified authorization token", + } + + var reqBody []byte + reqBody, err := json.Marshal(tokenAuthorizationRequest) + if err != nil { + return "", err + } + + var resBody []byte + resBody, err = json.Marshal(errorResponse) + if err != nil { + return "", err + } + + tokenAuthStub := w.createStubDefinition(pullAuthPath, "POST", string(reqBody), string(resBody), 404) + return w.addStub(tokenAuthStub) +} + +func (w *WireMock) createOpenshiftUpdateServiceStubs() (string, error) { + // OCP releases API needs amd64, arm64 instead of x86_64, aarch64 respectively + cpuArchMapToAPIArch := map[string]string{ + common.X86CPUArchitecture: common.AMD64CPUArchitecture, + common.AARCH64CPUArchitecture: common.ARM64CPUArchitecture, + } + + for _, releaseSource := range w.ReleaseSources { + openshiftVersion := *releaseSource.OpenshiftVersion + for _, upgradeChannel := range releaseSource.UpgradeChannels { + cpuArchitecture := *upgradeChannel.CPUArchitecture + for _, channel := range upgradeChannel.Channels { + apiCpuArchitecture, shouldSwitch := cpuArchMapToAPIArch[cpuArchitecture] + if shouldSwitch { + cpuArchitecture = apiCpuArchitecture + } + + u := url.URL{ + Path: releasesources.OpenshiftUpdateServiceAPIURLPath, + } + + q := url.Values{} + q.Add(releasesources.OpenshiftUpdateServiceAPIURLQueryChannel, fmt.Sprintf("%s-%s", channel, openshiftVersion)) + q.Add(releasesources.OpenshiftUpdateServiceAPIURLQueryArch, cpuArchitecture) + u.RawQuery = q.Encode() + endpoint := "/" + u.String() + + responseStruct := releasesources.ReleaseGraph{ + Nodes: []releasesources.Node{ + { + Version: fmt.Sprintf("%s.0", openshiftVersion), + }, + }, + } + + var resBody []byte + resBody, err := json.Marshal(responseStruct) + if err != nil { + return "", err + } + + newStub := w.createStubDefinition(endpoint, "GET", "", string(resBody), 200) + _, err = w.addStub(newStub) + if err != nil { + return "", err + } + } + } + } + + return "", nil +} + +func (w *WireMock) createStubDefinition(url, method, reqBody, resBody string, resStatus int) *StubDefinition { + sd := &StubDefinition{ + Request: &RequestDefinition{ + URL: url, + Method: method, + }, + Response: &ResponseDefinition{ + Status: resStatus, + Body: resBody, + Headers: map[string]string{ + "Content-Type": "application/json", + }, + }, + } + if reqBody != "" { + sd.Request.BodyPatterns = []map[string]string{ + { + "equalToJson": reqBody, + "ignoreExtraElements": "true", + }, + } + } + return sd +} + +func (w *WireMock) addStub(stub *StubDefinition) (string, error) { + requestBody, err := json.Marshal(stub) + if err != nil { + return "", err + } + var b bytes.Buffer + b.Write(requestBody) + + resp, err := http.Post("http://"+w.OCMHost+wiremockMappingsPath, "application/json", &b) + if err != nil { + return "", err + } + responseBody, err := io.ReadAll(resp.Body) + if err != nil { + return "", err + } + ret := Mapping{} + err = json.Unmarshal(responseBody, &ret) + if err != nil { + return "", err + } + return ret.ID, nil +} + +func (w *WireMock) DeleteAllWiremockStubs() error { + req, err := http.NewRequest("DELETE", "http://"+w.OCMHost+wiremockMappingsPath, nil) + if err != nil { + return err + } + client := &http.Client{} + _, err = client.Do(req) + return err +} + +func (w *WireMock) DeleteStub(stubID string) error { + req, err := http.NewRequest("DELETE", "http://"+w.OCMHost+wiremockMappingsPath+"/"+stubID, nil) + if err != nil { + return err + } + client := &http.Client{} + _, err = client.Do(req) + return err +} diff --git a/subsystem/versions_test.go b/subsystem/versions_test.go index e65219f4bb3..4a2810e0252 100644 --- a/subsystem/versions_test.go +++ b/subsystem/versions_test.go @@ -8,11 +8,12 @@ import ( . "github.com/onsi/gomega" "github.com/openshift/assisted-service/client/versions" "github.com/openshift/assisted-service/models" + "github.com/openshift/assisted-service/subsystem/utils_test" ) var _ = Describe("[minimal-set]test versions", func() { It("get versions list", func() { - reply, err := userBMClient.Versions.V2ListComponentVersions(context.Background(), &versions.V2ListComponentVersionsParams{}) + reply, err := utils_test.TestContext.UserBMClient.Versions.V2ListComponentVersions(context.Background(), &versions.V2ListComponentVersionsParams{}) Expect(err).ShouldNot(HaveOccurred()) // service, agent, installer, controller @@ -20,7 +21,7 @@ var _ = Describe("[minimal-set]test versions", func() { }) It("get openshift versions list", func() { - reply, err := userBMClient.Versions.V2ListSupportedOpenshiftVersions(context.Background(), &versions.V2ListSupportedOpenshiftVersionsParams{}) + reply, err := utils_test.TestContext.UserBMClient.Versions.V2ListSupportedOpenshiftVersions(context.Background(), &versions.V2ListSupportedOpenshiftVersionsParams{}) Expect(err).ShouldNot(HaveOccurred()) Expect(len(reply.GetPayload())).To(BeNumerically(">=", 1)) }) @@ -32,7 +33,7 @@ var _ = Describe("[minimal-set]test versions", func() { } }) It("Doesn't have multiarch capability", func() { - reply, err := userBMClient.Versions.V2ListSupportedOpenshiftVersions(context.Background(), &versions.V2ListSupportedOpenshiftVersionsParams{}) + reply, err := utils_test.TestContext.UserBMClient.Versions.V2ListSupportedOpenshiftVersions(context.Background(), &versions.V2ListSupportedOpenshiftVersionsParams{}) Expect(err).ShouldNot(HaveOccurred()) Expect(hasMultiarch(reply.GetPayload())).Should(BeFalse()) }) @@ -41,7 +42,7 @@ var _ = Describe("[minimal-set]test versions", func() { // "-multi" suffix when presented via SupportedOpenshiftVersions API. // As soon as we collapse single- and multiarch releases, the contract // defined by "func hasMultiarch()" will not be valid anymore. - reply, err := user2BMClient.Versions.V2ListSupportedOpenshiftVersions(context.Background(), &versions.V2ListSupportedOpenshiftVersionsParams{}) + reply, err := utils_test.TestContext.User2BMClient.Versions.V2ListSupportedOpenshiftVersions(context.Background(), &versions.V2ListSupportedOpenshiftVersionsParams{}) Expect(err).ShouldNot(HaveOccurred()) Expect(hasMultiarch(reply.GetPayload())).Should(BeTrue()) })