diff --git a/test/e2e/autoscaler.go b/test/e2e/autoscaler.go index 7269edf8367a..0a44de5b7924 100644 --- a/test/e2e/autoscaler.go +++ b/test/e2e/autoscaler.go @@ -367,6 +367,20 @@ func AutoscalerSpec(ctx context.Context, inputGetter func() AutoscalerSpecInput) }) } + Byf("Verify Cluster Available condition is true") + framework.VerifyClusterAvailable(ctx, framework.VerifyClusterAvailableInput{ + Getter: input.BootstrapClusterProxy.GetClient(), + Name: clusterResources.Cluster.Name, + Namespace: clusterResources.Cluster.Namespace, + }) + + Byf("Verify Machines Ready condition is true") + framework.VerifyMachinesReady(ctx, framework.VerifyMachinesReadyInput{ + Lister: input.BootstrapClusterProxy.GetClient(), + Name: clusterResources.Cluster.Name, + Namespace: clusterResources.Cluster.Namespace, + }) + By("PASSED!") }) diff --git a/test/e2e/cluster_deletion.go b/test/e2e/cluster_deletion.go index 1a1abcabc621..f59ee9eaba96 100644 --- a/test/e2e/cluster_deletion.go +++ b/test/e2e/cluster_deletion.go @@ -198,6 +198,20 @@ func ClusterDeletionSpec(ctx context.Context, inputGetter func() ClusterDeletion }, }, clusterResources) + Byf("Verify Cluster Available condition is true") + framework.VerifyClusterAvailable(ctx, framework.VerifyClusterAvailableInput{ + Getter: input.BootstrapClusterProxy.GetClient(), + Name: clusterResources.Cluster.Name, + Namespace: clusterResources.Cluster.Namespace, + }) + + Byf("Verify Machines Ready condition is true") + framework.VerifyMachinesReady(ctx, framework.VerifyMachinesReadyInput{ + Lister: input.BootstrapClusterProxy.GetClient(), + Name: clusterResources.Cluster.Name, + Namespace: clusterResources.Cluster.Namespace, + }) + // Get all objects per deletion phase and the list of blocking objects. var objectsPerPhase [][]client.Object objectsPerPhase, blockingObjects = getDeletionPhaseObjects(ctx, input.BootstrapClusterProxy, clusterResources.Cluster, input.ClusterDeletionPhases) diff --git a/test/e2e/cluster_upgrade.go b/test/e2e/cluster_upgrade.go index 6af3aacb122b..4caae51abc95 100644 --- a/test/e2e/cluster_upgrade.go +++ b/test/e2e/cluster_upgrade.go @@ -259,6 +259,20 @@ func ClusterUpgradeConformanceSpec(ctx context.Context, inputGetter func() Clust WaitForNodesReady: input.E2EConfig.GetIntervals(specName, "wait-nodes-ready"), }) + Byf("Verify Cluster Available condition is true") + framework.VerifyClusterAvailable(ctx, framework.VerifyClusterAvailableInput{ + Getter: input.BootstrapClusterProxy.GetClient(), + Name: clusterResources.Cluster.Name, + Namespace: clusterResources.Cluster.Namespace, + }) + + Byf("Verify Machines Ready condition is true") + framework.VerifyMachinesReady(ctx, framework.VerifyMachinesReadyInput{ + Lister: input.BootstrapClusterProxy.GetClient(), + Name: clusterResources.Cluster.Name, + Namespace: clusterResources.Cluster.Namespace, + }) + if !input.SkipConformanceTests { By("Running conformance tests") // Start running the conformance test suite. diff --git a/test/e2e/cluster_upgrade_runtimesdk.go b/test/e2e/cluster_upgrade_runtimesdk.go index ad18ee28f91a..ba4a750c0057 100644 --- a/test/e2e/cluster_upgrade_runtimesdk.go +++ b/test/e2e/cluster_upgrade_runtimesdk.go @@ -347,6 +347,20 @@ func ClusterUpgradeWithRuntimeSDKSpec(ctx context.Context, inputGetter func() Cl "AfterClusterUpgrade": "Success", })).To(Succeed(), "Lifecycle hook calls were not as expected") + Byf("Verify Cluster Available condition is true") + framework.VerifyClusterAvailable(ctx, framework.VerifyClusterAvailableInput{ + Getter: input.BootstrapClusterProxy.GetClient(), + Name: clusterResources.Cluster.Name, + Namespace: clusterResources.Cluster.Namespace, + }) + + Byf("Verify Machines Ready condition is true") + framework.VerifyMachinesReady(ctx, framework.VerifyMachinesReadyInput{ + Lister: input.BootstrapClusterProxy.GetClient(), + Name: clusterResources.Cluster.Name, + Namespace: clusterResources.Cluster.Namespace, + }) + By("PASSED!") }) diff --git a/test/e2e/clusterclass_changes.go b/test/e2e/clusterclass_changes.go index f5d53be95e12..6afd18152037 100644 --- a/test/e2e/clusterclass_changes.go +++ b/test/e2e/clusterclass_changes.go @@ -272,6 +272,20 @@ func ClusterClassChangesSpec(ctx context.Context, inputGetter func() ClusterClas WaitForMachinePools: input.E2EConfig.GetIntervals(specName, "wait-machine-pool-nodes"), }) + Byf("Verify Cluster Available condition is true") + framework.VerifyClusterAvailable(ctx, framework.VerifyClusterAvailableInput{ + Getter: input.BootstrapClusterProxy.GetClient(), + Name: clusterResources.Cluster.Name, + Namespace: clusterResources.Cluster.Namespace, + }) + + Byf("Verify Machines Ready condition is true") + framework.VerifyMachinesReady(ctx, framework.VerifyMachinesReadyInput{ + Lister: input.BootstrapClusterProxy.GetClient(), + Name: clusterResources.Cluster.Name, + Namespace: clusterResources.Cluster.Namespace, + }) + By("Deleting a MachineDeploymentTopology in the Cluster Topology and wait for associated MachineDeployment to be deleted") deleteMachineDeploymentTopologyAndWait(ctx, deleteMachineDeploymentTopologyAndWaitInput{ ClusterProxy: input.BootstrapClusterProxy, diff --git a/test/e2e/clusterclass_rollout.go b/test/e2e/clusterclass_rollout.go index 6ce0fa7e1812..414e31ff3020 100644 --- a/test/e2e/clusterclass_rollout.go +++ b/test/e2e/clusterclass_rollout.go @@ -302,6 +302,20 @@ func ClusterClassRolloutSpec(ctx context.Context, inputGetter func() ClusterClas }, input.E2EConfig.GetIntervals(specName, "wait-machine-upgrade")...).Should(Succeed()) assertClusterObjects(ctx, input.BootstrapClusterProxy, clusterResources.Cluster, clusterResources.ClusterClass, input.FilterMetadataBeforeValidation) + Byf("Verify Cluster Available condition is true") + framework.VerifyClusterAvailable(ctx, framework.VerifyClusterAvailableInput{ + Getter: input.BootstrapClusterProxy.GetClient(), + Name: clusterResources.Cluster.Name, + Namespace: clusterResources.Cluster.Namespace, + }) + + Byf("Verify Machines Ready condition is true") + framework.VerifyMachinesReady(ctx, framework.VerifyMachinesReadyInput{ + Lister: input.BootstrapClusterProxy.GetClient(), + Name: clusterResources.Cluster.Name, + Namespace: clusterResources.Cluster.Namespace, + }) + By("PASSED!") }) diff --git a/test/e2e/clusterctl_upgrade.go b/test/e2e/clusterctl_upgrade.go index be69b1e0ceee..cb31966d1fe5 100644 --- a/test/e2e/clusterctl_upgrade.go +++ b/test/e2e/clusterctl_upgrade.go @@ -722,9 +722,19 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg upgrade.PostUpgrade(managementClusterProxy, workloadCluster.Namespace, workloadCluster.Name) } - Byf("[%d] Verify v1beta2 Available and Ready conditions (if exist) to be true for Cluster and Machines", i) - verifyV1Beta2ConditionsTrueV1Beta1(ctx, managementClusterProxy.GetClient(), workloadCluster.Name, workloadCluster.Namespace, - []string{clusterv1.AvailableCondition, clusterv1.ReadyCondition}) + Byf("[%d] Verify Cluster Available condition is true", i) + framework.VerifyClusterAvailable(ctx, framework.VerifyClusterAvailableInput{ + Getter: managementClusterProxy.GetClient(), + Name: workloadCluster.Name, + Namespace: workloadCluster.Namespace, + }) + + Byf("[%d] Verify Machines Ready condition is true", i) + framework.VerifyMachinesReady(ctx, framework.VerifyMachinesReadyInput{ + Lister: managementClusterProxy.GetClient(), + Name: workloadCluster.Name, + Namespace: workloadCluster.Namespace, + }) // Note: It is a known issue on Kubernetes < v1.29 that SSA sometimes fail: // https://github.com/kubernetes/kubernetes/issues/117356 @@ -801,53 +811,6 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg }) } -// verifyV1Beta2ConditionsTrueV1Beta1 checks the Cluster and Machines of a Cluster that -// the given v1beta2 condition types are set to true without a message, if they exist. -func verifyV1Beta2ConditionsTrueV1Beta1(ctx context.Context, c client.Client, clusterName, clusterNamespace string, v1beta2conditionTypes []string) { - cluster := &clusterv1beta1.Cluster{} - key := client.ObjectKey{ - Namespace: clusterNamespace, - Name: clusterName, - } - Eventually(func() error { - return c.Get(ctx, key, cluster) - }, 3*time.Minute, 3*time.Second).Should(Succeed(), "Failed to get Cluster object %s", klog.KRef(clusterNamespace, clusterName)) - - if cluster.Status.V1Beta2 != nil && len(cluster.Status.V1Beta2.Conditions) > 0 { - for _, conditionType := range v1beta2conditionTypes { - for _, condition := range cluster.Status.V1Beta2.Conditions { - if condition.Type != conditionType { - continue - } - Expect(condition.Status).To(Equal(metav1.ConditionTrue), "The v1beta2 condition %q on the Cluster should be set to true", conditionType) - Expect(condition.Message).To(BeEmpty(), "The v1beta2 condition %q on the Cluster should have an empty message", conditionType) - } - } - } - - machineList := &clusterv1beta1.MachineList{} - Eventually(func() error { - return c.List(ctx, machineList, client.InNamespace(clusterNamespace), - client.MatchingLabels{ - clusterv1.ClusterNameLabel: clusterName, - }) - }, 3*time.Minute, 3*time.Second).Should(Succeed(), "Failed to list Machines for Cluster %s", klog.KObj(cluster)) - for _, machine := range machineList.Items { - if machine.Status.V1Beta2 == nil || len(machine.Status.V1Beta2.Conditions) == 0 { - continue - } - for _, conditionType := range v1beta2conditionTypes { - for _, condition := range machine.Status.V1Beta2.Conditions { - if condition.Type != conditionType { - continue - } - Expect(condition.Status).To(Equal(metav1.ConditionTrue), "The v1beta2 condition %q on the Machine %q should be set to true", conditionType, machine.Name) - Expect(condition.Message).To(BeEmpty(), "The v1beta2 condition %q on the Machine %q should have an empty message", conditionType, machine.Name) - } - } - } -} - func setupClusterctl(ctx context.Context, clusterctlBinaryURL, clusterctlConfigPath string) (string, string) { clusterctlBinaryPath := downloadToTmpFile(ctx, clusterctlBinaryURL) diff --git a/test/e2e/k8s_conformance.go b/test/e2e/k8s_conformance.go index 907d121e5ba9..c85f679be01b 100644 --- a/test/e2e/k8s_conformance.go +++ b/test/e2e/k8s_conformance.go @@ -153,6 +153,20 @@ func K8SConformanceSpec(ctx context.Context, inputGetter func() K8SConformanceSp ) Expect(err).ToNot(HaveOccurred(), "Failed to run Kubernetes conformance") + Byf("Verify Cluster Available condition is true") + framework.VerifyClusterAvailable(ctx, framework.VerifyClusterAvailableInput{ + Getter: input.BootstrapClusterProxy.GetClient(), + Name: clusterResources.Cluster.Name, + Namespace: clusterResources.Cluster.Namespace, + }) + + Byf("Verify Machines Ready condition is true") + framework.VerifyMachinesReady(ctx, framework.VerifyMachinesReadyInput{ + Lister: input.BootstrapClusterProxy.GetClient(), + Name: clusterResources.Cluster.Name, + Namespace: clusterResources.Cluster.Namespace, + }) + By("PASSED!") }) diff --git a/test/e2e/kcp_adoption.go b/test/e2e/kcp_adoption.go index ce5c8d61c142..d5170c5c4b3d 100644 --- a/test/e2e/kcp_adoption.go +++ b/test/e2e/kcp_adoption.go @@ -247,6 +247,20 @@ func KCPAdoptionSpec(ctx context.Context, inputGetter func() KCPAdoptionSpecInpu } Expect(secrets.Items).To(HaveLen(4 /* pki */ + 1 /* kubeconfig */ + int(*replicas))) + Byf("Verify Cluster Available condition is true") + framework.VerifyClusterAvailable(ctx, framework.VerifyClusterAvailableInput{ + Getter: input.BootstrapClusterProxy.GetClient(), + Name: cluster.Name, + Namespace: cluster.Namespace, + }) + + Byf("Verify Machines Ready condition is true") + framework.VerifyMachinesReady(ctx, framework.VerifyMachinesReadyInput{ + Lister: input.BootstrapClusterProxy.GetClient(), + Name: cluster.Name, + Namespace: cluster.Namespace, + }) + By("PASSED!") }) diff --git a/test/e2e/kcp_remediations.go b/test/e2e/kcp_remediations.go index b8d62d3360db..bfe56dc04640 100644 --- a/test/e2e/kcp_remediations.go +++ b/test/e2e/kcp_remediations.go @@ -413,6 +413,20 @@ func KCPRemediationSpec(ctx context.Context, inputGetter func() KCPRemediationSp By("CP BACK TO FULL OPERATIONAL STATE!") + Byf("Verify Cluster Available condition is true") + framework.VerifyClusterAvailable(ctx, framework.VerifyClusterAvailableInput{ + Getter: input.BootstrapClusterProxy.GetClient(), + Name: clusterResources.Cluster.Name, + Namespace: clusterResources.Cluster.Namespace, + }) + + Byf("Verify Machines Ready condition is true") + framework.VerifyMachinesReady(ctx, framework.VerifyMachinesReadyInput{ + Lister: input.BootstrapClusterProxy.GetClient(), + Name: clusterResources.Cluster.Name, + Namespace: clusterResources.Cluster.Namespace, + }) + By("PASSED!") }) diff --git a/test/e2e/machine_pool.go b/test/e2e/machine_pool.go index b4f372452744..cfeff8a4b893 100644 --- a/test/e2e/machine_pool.go +++ b/test/e2e/machine_pool.go @@ -139,6 +139,20 @@ func MachinePoolSpec(ctx context.Context, inputGetter func() MachinePoolInput) { WaitForMachinePoolToScale: input.E2EConfig.GetIntervals(specName, "wait-machine-pool-nodes"), }) + Byf("Verify Cluster Available condition is true") + framework.VerifyClusterAvailable(ctx, framework.VerifyClusterAvailableInput{ + Getter: input.BootstrapClusterProxy.GetClient(), + Name: clusterResources.Cluster.Name, + Namespace: clusterResources.Cluster.Namespace, + }) + + Byf("Verify Machines Ready condition is true") + framework.VerifyMachinesReady(ctx, framework.VerifyMachinesReadyInput{ + Lister: input.BootstrapClusterProxy.GetClient(), + Name: clusterResources.Cluster.Name, + Namespace: clusterResources.Cluster.Namespace, + }) + By("PASSED!") }) diff --git a/test/e2e/md_remediations.go b/test/e2e/md_remediations.go index c78674da925e..e6f663e19429 100644 --- a/test/e2e/md_remediations.go +++ b/test/e2e/md_remediations.go @@ -120,6 +120,20 @@ func MachineDeploymentRemediationSpec(ctx context.Context, inputGetter func() Ma WaitForMachineRemediation: input.E2EConfig.GetIntervals(specName, "wait-machine-remediation"), }) + Byf("Verify Cluster Available condition is true") + framework.VerifyClusterAvailable(ctx, framework.VerifyClusterAvailableInput{ + Getter: input.BootstrapClusterProxy.GetClient(), + Name: clusterResources.Cluster.Name, + Namespace: clusterResources.Cluster.Namespace, + }) + + Byf("Verify Machines Ready condition is true") + framework.VerifyMachinesReady(ctx, framework.VerifyMachinesReadyInput{ + Lister: input.BootstrapClusterProxy.GetClient(), + Name: clusterResources.Cluster.Name, + Namespace: clusterResources.Cluster.Namespace, + }) + By("PASSED!") }) diff --git a/test/e2e/md_rollout.go b/test/e2e/md_rollout.go index bc6212c7924a..a08520422fb5 100644 --- a/test/e2e/md_rollout.go +++ b/test/e2e/md_rollout.go @@ -120,6 +120,21 @@ func MachineDeploymentRolloutSpec(ctx context.Context, inputGetter func() Machin WaitForMachinesToBeUpgraded: input.E2EConfig.GetIntervals(specName, "wait-machine-upgrade"), MachineDeployments: clusterResources.MachineDeployments, }) + + Byf("Verify Cluster Available condition is true") + framework.VerifyClusterAvailable(ctx, framework.VerifyClusterAvailableInput{ + Getter: input.BootstrapClusterProxy.GetClient(), + Name: clusterResources.Cluster.Name, + Namespace: clusterResources.Cluster.Namespace, + }) + + Byf("Verify Machines Ready condition is true") + framework.VerifyMachinesReady(ctx, framework.VerifyMachinesReadyInput{ + Lister: input.BootstrapClusterProxy.GetClient(), + Name: clusterResources.Cluster.Name, + Namespace: clusterResources.Cluster.Namespace, + }) + By("PASSED!") }) diff --git a/test/e2e/md_scale.go b/test/e2e/md_scale.go index d8b6044de32d..bf1ccde5e76b 100644 --- a/test/e2e/md_scale.go +++ b/test/e2e/md_scale.go @@ -125,6 +125,20 @@ func MachineDeploymentScaleSpec(ctx context.Context, inputGetter func() MachineD WaitForMachineDeployments: input.E2EConfig.GetIntervals(specName, "wait-worker-nodes"), }) + Byf("Verify Cluster Available condition is true") + framework.VerifyClusterAvailable(ctx, framework.VerifyClusterAvailableInput{ + Getter: input.BootstrapClusterProxy.GetClient(), + Name: clusterResources.Cluster.Name, + Namespace: clusterResources.Cluster.Namespace, + }) + + Byf("Verify Machines Ready condition is true") + framework.VerifyMachinesReady(ctx, framework.VerifyMachinesReadyInput{ + Lister: input.BootstrapClusterProxy.GetClient(), + Name: clusterResources.Cluster.Name, + Namespace: clusterResources.Cluster.Namespace, + }) + By("Deleting the MachineDeployment with foreground deletion") foreground := metav1.DeletePropagationForeground framework.DeleteAndWaitMachineDeployment(ctx, framework.DeleteAndWaitMachineDeploymentInput{ diff --git a/test/e2e/node_drain.go b/test/e2e/node_drain.go index be86df516160..856ee1c504b9 100644 --- a/test/e2e/node_drain.go +++ b/test/e2e/node_drain.go @@ -614,6 +614,20 @@ func NodeDrainTimeoutSpec(ctx context.Context, inputGetter func() NodeDrainTimeo } }, nodeDrainTimeoutKCPInterval...).Should(Succeed()) + Byf("Verify Cluster Available condition is true") + framework.VerifyClusterAvailable(ctx, framework.VerifyClusterAvailableInput{ + Getter: input.BootstrapClusterProxy.GetClient(), + Name: clusterResources.Cluster.Name, + Namespace: clusterResources.Cluster.Namespace, + }) + + Byf("Verify Machines Ready condition is true") + framework.VerifyMachinesReady(ctx, framework.VerifyMachinesReadyInput{ + Lister: input.BootstrapClusterProxy.GetClient(), + Name: clusterResources.Cluster.Name, + Namespace: clusterResources.Cluster.Namespace, + }) + By("PASSED!") }) diff --git a/test/e2e/quick_start.go b/test/e2e/quick_start.go index 8b65d1b77774..06c9e35af74e 100644 --- a/test/e2e/quick_start.go +++ b/test/e2e/quick_start.go @@ -180,6 +180,20 @@ func QuickStartSpec(ctx context.Context, inputGetter func() QuickStartSpecInput) }, }, clusterResources) + Byf("Verify Cluster Available condition is true") + framework.VerifyClusterAvailable(ctx, framework.VerifyClusterAvailableInput{ + Getter: input.BootstrapClusterProxy.GetClient(), + Name: clusterResources.Cluster.Name, + Namespace: clusterResources.Cluster.Namespace, + }) + + Byf("Verify Machines Ready condition is true") + framework.VerifyMachinesReady(ctx, framework.VerifyMachinesReadyInput{ + Lister: input.BootstrapClusterProxy.GetClient(), + Name: clusterResources.Cluster.Name, + Namespace: clusterResources.Cluster.Namespace, + }) + By("PASSED!") }) diff --git a/test/e2e/self_hosted.go b/test/e2e/self_hosted.go index 04e7e6732bd0..6341d1a51bea 100644 --- a/test/e2e/self_hosted.go +++ b/test/e2e/self_hosted.go @@ -429,6 +429,20 @@ func SelfHostedSpec(ctx context.Context, inputGetter func() SelfHostedSpecInput) WaitForNodesReady: input.E2EConfig.GetIntervals(specName, "wait-nodes-ready"), }) + Byf("Verify Cluster Available condition is true") + framework.VerifyClusterAvailable(ctx, framework.VerifyClusterAvailableInput{ + Getter: input.BootstrapClusterProxy.GetClient(), + Name: clusterResources.Cluster.Name, + Namespace: clusterResources.Cluster.Namespace, + }) + + Byf("Verify Machines Ready condition is true") + framework.VerifyMachinesReady(ctx, framework.VerifyMachinesReadyInput{ + Lister: input.BootstrapClusterProxy.GetClient(), + Name: clusterResources.Cluster.Name, + Namespace: clusterResources.Cluster.Namespace, + }) + By("PASSED!") }) diff --git a/test/framework/cluster_helpers.go b/test/framework/cluster_helpers.go index f775afca3510..5f870482fc33 100644 --- a/test/framework/cluster_helpers.go +++ b/test/framework/cluster_helpers.go @@ -22,10 +22,12 @@ import ( "fmt" "os" "path/filepath" + "time" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/client" @@ -430,3 +432,64 @@ func DescribeAllCluster(ctx context.Context, input DescribeAllClusterInput) { }) } } + +type VerifyClusterAvailableInput struct { + Getter Getter + Name string + Namespace string +} + +// VerifyClusterAvailable verifies that the Cluster's Available condition is set to true. +func VerifyClusterAvailable(ctx context.Context, input VerifyClusterAvailableInput) { + cluster := &clusterv1.Cluster{} + key := client.ObjectKey{ + Name: input.Name, + Namespace: input.Namespace, + } + + // Wait for the cluster Available condition to stabilize. + Eventually(func(g Gomega) { + g.Expect(input.Getter.Get(ctx, key, cluster)).To(Succeed()) + for _, condition := range cluster.Status.Conditions { + if condition.Type == clusterv1.AvailableCondition { + g.Expect(condition.Status).To(Equal(metav1.ConditionTrue), "The Available condition on the Cluster should be set to true") + g.Expect(condition.Message).To(BeEmpty(), "The Available condition on the Cluster should have an empty message") + return + } + } + }, 5*time.Minute, 10*time.Second).Should(Succeed(), "Failed to verify Cluster Available condition for %s", klog.KRef(input.Namespace, input.Name)) +} + +type VerifyMachinesReadyInput struct { + Lister Lister + Name string + Namespace string +} + +// VerifyMachinesReady verifies that all Machines' Ready condition is set to true. +func VerifyMachinesReady(ctx context.Context, input VerifyMachinesReadyInput) { + machineList := &clusterv1.MachineList{} + + // Wait for all machines to have Ready condition set to true. + Eventually(func(g Gomega) { + g.Expect(input.Lister.List(ctx, machineList, client.InNamespace(input.Namespace), + client.MatchingLabels{ + clusterv1.ClusterNameLabel: input.Name, + })).To(Succeed()) + + g.Expect(machineList.Items).ToNot(BeEmpty(), "No machines found for cluster %s", input.Name) + + for _, machine := range machineList.Items { + readyConditionFound := false + for _, condition := range machine.Status.Conditions { + if condition.Type == clusterv1.ReadyCondition { + readyConditionFound = true + g.Expect(condition.Status).To(Equal(metav1.ConditionTrue), "The Ready condition on Machine %q should be set to true", machine.Name) + g.Expect(condition.Message).To(BeEmpty(), "The Ready condition on Machine %q should have an empty message", machine.Name) + break + } + } + g.Expect(readyConditionFound).To(BeTrue(), "Machine %q should have a Ready condition", machine.Name) + } + }, 5*time.Minute, 10*time.Second).Should(Succeed(), "Failed to verify Machines Ready condition for Cluster %s", klog.KRef(input.Namespace, input.Name)) +}