diff --git a/.golangci.yml b/.golangci.yml index 0a3577672702..2227afb178ad 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -47,9 +47,6 @@ linters: - usestdlibvars # using variables/constants from the standard library - usetesting # report function to be replace by testing - whitespace # unnecessary newlines - disable: - # TODO: It will be dropped when the Go version migration is done. - - usetesting settings: ginkgolinter: forbid-focus-container: true diff --git a/cmd/clusterctl/client/alpha/rollout_pauser_test.go b/cmd/clusterctl/client/alpha/rollout_pauser_test.go index 8aa7ce84f795..b22a36b51e93 100644 --- a/cmd/clusterctl/client/alpha/rollout_pauser_test.go +++ b/cmd/clusterctl/client/alpha/rollout_pauser_test.go @@ -17,7 +17,6 @@ limitations under the License. package alpha import ( - "context" "testing" . "github.com/onsi/gomega" @@ -146,25 +145,25 @@ func Test_ObjectPauser(t *testing.T) { g := NewWithT(t) r := newRolloutClient() proxy := test.NewFakeProxy().WithObjs(tt.fields.objs...) - err := r.ObjectPauser(context.Background(), proxy, tt.fields.ref) + err := r.ObjectPauser(t.Context(), proxy, tt.fields.ref) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return } g.Expect(err).ToNot(HaveOccurred()) for _, obj := range tt.fields.objs { - cl, err := proxy.NewClient(context.Background()) + cl, err := proxy.NewClient(t.Context()) g.Expect(err).ToNot(HaveOccurred()) key := client.ObjectKeyFromObject(obj) switch obj.(type) { case *clusterv1.MachineDeployment: md := &clusterv1.MachineDeployment{} - err = cl.Get(context.TODO(), key, md) + err = cl.Get(t.Context(), key, md) g.Expect(err).ToNot(HaveOccurred()) g.Expect(md.Spec.Paused).To(Equal(tt.wantPaused)) case *controlplanev1.KubeadmControlPlane: kcp := &controlplanev1.KubeadmControlPlane{} - err = cl.Get(context.TODO(), key, kcp) + err = cl.Get(t.Context(), key, kcp) g.Expect(err).ToNot(HaveOccurred()) g.Expect(annotations.HasPaused(kcp.GetObjectMeta())).To(Equal(tt.wantPaused)) } diff --git a/cmd/clusterctl/client/alpha/rollout_restarter_test.go b/cmd/clusterctl/client/alpha/rollout_restarter_test.go index dea33c953d31..860ed063ebb9 100644 --- a/cmd/clusterctl/client/alpha/rollout_restarter_test.go +++ b/cmd/clusterctl/client/alpha/rollout_restarter_test.go @@ -17,7 +17,6 @@ limitations under the License. package alpha import ( - "context" "testing" "time" @@ -204,20 +203,20 @@ func Test_ObjectRestarter(t *testing.T) { g := NewWithT(t) r := newRolloutClient() proxy := test.NewFakeProxy().WithObjs(tt.fields.objs...) - err := r.ObjectRestarter(context.Background(), proxy, tt.fields.ref) + err := r.ObjectRestarter(t.Context(), proxy, tt.fields.ref) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return } g.Expect(err).ToNot(HaveOccurred()) for _, obj := range tt.fields.objs { - cl, err := proxy.NewClient(context.Background()) + cl, err := proxy.NewClient(t.Context()) g.Expect(err).ToNot(HaveOccurred()) key := client.ObjectKeyFromObject(obj) switch obj.(type) { case *clusterv1.MachineDeployment: md := &clusterv1.MachineDeployment{} - err = cl.Get(context.TODO(), key, md) + err = cl.Get(t.Context(), key, md) g.Expect(err).ToNot(HaveOccurred()) if tt.wantRollout { g.Expect(md.Spec.RolloutAfter).NotTo(BeNil()) @@ -226,7 +225,7 @@ func Test_ObjectRestarter(t *testing.T) { } case *controlplanev1.KubeadmControlPlane: kcp := &controlplanev1.KubeadmControlPlane{} - err = cl.Get(context.TODO(), key, kcp) + err = cl.Get(t.Context(), key, kcp) g.Expect(err).ToNot(HaveOccurred()) if tt.wantRollout { g.Expect(kcp.Spec.RolloutAfter).NotTo(BeNil()) diff --git a/cmd/clusterctl/client/alpha/rollout_resumer_test.go b/cmd/clusterctl/client/alpha/rollout_resumer_test.go index 7f937b997aeb..2d56ef970a34 100644 --- a/cmd/clusterctl/client/alpha/rollout_resumer_test.go +++ b/cmd/clusterctl/client/alpha/rollout_resumer_test.go @@ -17,7 +17,6 @@ limitations under the License. package alpha import ( - "context" "testing" . "github.com/onsi/gomega" @@ -149,25 +148,25 @@ func Test_ObjectResumer(t *testing.T) { g := NewWithT(t) r := newRolloutClient() proxy := test.NewFakeProxy().WithObjs(tt.fields.objs...) - err := r.ObjectResumer(context.Background(), proxy, tt.fields.ref) + err := r.ObjectResumer(t.Context(), proxy, tt.fields.ref) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return } g.Expect(err).ToNot(HaveOccurred()) for _, obj := range tt.fields.objs { - cl, err := proxy.NewClient(context.Background()) + cl, err := proxy.NewClient(t.Context()) g.Expect(err).ToNot(HaveOccurred()) key := client.ObjectKeyFromObject(obj) switch obj.(type) { case *clusterv1.MachineDeployment: md := &clusterv1.MachineDeployment{} - err = cl.Get(context.TODO(), key, md) + err = cl.Get(t.Context(), key, md) g.Expect(err).ToNot(HaveOccurred()) g.Expect(md.Spec.Paused).To(Equal(tt.wantPaused)) case *controlplanev1.KubeadmControlPlane: kcp := &controlplanev1.KubeadmControlPlane{} - err = cl.Get(context.TODO(), key, kcp) + err = cl.Get(t.Context(), key, kcp) g.Expect(err).ToNot(HaveOccurred()) g.Expect(annotations.HasPaused(kcp.GetObjectMeta())).To(Equal(tt.wantPaused)) } diff --git a/cmd/clusterctl/client/alpha/rollout_rollbacker_test.go b/cmd/clusterctl/client/alpha/rollout_rollbacker_test.go new file mode 100644 index 000000000000..2aa67ad2e6e4 --- /dev/null +++ b/cmd/clusterctl/client/alpha/rollout_rollbacker_test.go @@ -0,0 +1,260 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package alpha + +import ( + "testing" + + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" + + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + "sigs.k8s.io/cluster-api/cmd/clusterctl/internal/test" +) + +func Test_ObjectRollbacker(t *testing.T) { + labels := map[string]string{ + clusterv1.ClusterNameLabel: "test", + clusterv1.MachineDeploymentNameLabel: "test-md-0", + } + currentVersion := "v1.19.3" + rollbackVersion := "v1.19.1" + deployment := &clusterv1.MachineDeployment{ + TypeMeta: metav1.TypeMeta{ + Kind: "MachineDeployment", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-md-0", + Namespace: "default", + Labels: map[string]string{ + clusterv1.ClusterNameLabel: "test", + }, + Annotations: map[string]string{ + clusterv1.RevisionAnnotation: "2", + }, + }, + Spec: clusterv1.MachineDeploymentSpec{ + ClusterName: "test", + Selector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + clusterv1.ClusterNameLabel: "test", + }, + }, + Template: clusterv1.MachineTemplateSpec{ + ObjectMeta: clusterv1.ObjectMeta{ + Labels: labels, + }, + Spec: clusterv1.MachineSpec{ + ClusterName: "test", + Version: ¤tVersion, + InfrastructureRef: corev1.ObjectReference{ + APIVersion: clusterv1.GroupVersionInfrastructure.String(), + Kind: "InfrastructureMachineTemplate", + Name: "md-template", + }, + Bootstrap: clusterv1.Bootstrap{ + DataSecretName: ptr.To("data-secret-name"), + }, + }, + }, + }, + } + type fields struct { + objs []client.Object + ref corev1.ObjectReference + toRevision int64 + } + tests := []struct { + name string + fields fields + wantErr bool + wantVersion string + wantInfraTemplate string + wantBootsrapSecretName string + }{ + { + name: "machinedeployment should rollback to revision=1", + fields: fields{ + objs: []client.Object{ + deployment, + &clusterv1.MachineSet{ + TypeMeta: metav1.TypeMeta{ + Kind: "MachineSet", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "ms-rev-2", + Namespace: "default", + OwnerReferences: []metav1.OwnerReference{ + *metav1.NewControllerRef(deployment, clusterv1.GroupVersion.WithKind("MachineDeployment")), + }, + Labels: map[string]string{ + clusterv1.ClusterNameLabel: "test", + }, + Annotations: map[string]string{ + clusterv1.RevisionAnnotation: "2", + }, + }, + }, + &clusterv1.MachineSet{ + TypeMeta: metav1.TypeMeta{ + Kind: "MachineSet", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "ms-rev-1", + OwnerReferences: []metav1.OwnerReference{ + *metav1.NewControllerRef(deployment, clusterv1.GroupVersion.WithKind("MachineDeployment")), + }, + Labels: map[string]string{ + clusterv1.ClusterNameLabel: "test", + }, + Annotations: map[string]string{ + clusterv1.RevisionAnnotation: "999", + }, + }, + Spec: clusterv1.MachineSetSpec{ + ClusterName: "test", + Selector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + clusterv1.ClusterNameLabel: "test", + }, + }, + Template: clusterv1.MachineTemplateSpec{ + ObjectMeta: clusterv1.ObjectMeta{ + Labels: labels, + }, + Spec: clusterv1.MachineSpec{ + ClusterName: "test", + Version: &rollbackVersion, + InfrastructureRef: corev1.ObjectReference{ + APIVersion: clusterv1.GroupVersionInfrastructure.String(), + Kind: "InfrastructureMachineTemplate", + Name: "md-template-rollback", + }, + Bootstrap: clusterv1.Bootstrap{ + DataSecretName: ptr.To("data-secret-name-rollback"), + }, + }, + }, + }, + }, + }, + ref: corev1.ObjectReference{ + Kind: MachineDeployment, + Name: "test-md-0", + Namespace: "default", + }, + toRevision: int64(999), + }, + wantErr: false, + wantVersion: rollbackVersion, + wantInfraTemplate: "md-template-rollback", + wantBootsrapSecretName: "data-secret-name-rollback", + }, + { + name: "machinedeployment should not rollback because there is no previous revision", + fields: fields{ + objs: []client.Object{ + deployment, + &clusterv1.MachineSet{ + TypeMeta: metav1.TypeMeta{ + Kind: "MachineSet", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "ms-rev-2", + Namespace: "default", + OwnerReferences: []metav1.OwnerReference{ + *metav1.NewControllerRef(deployment, clusterv1.GroupVersion.WithKind("MachineDeployment")), + }, + Labels: map[string]string{ + clusterv1.ClusterNameLabel: "test", + }, + Annotations: map[string]string{ + clusterv1.RevisionAnnotation: "2", + }, + }, + }, + }, + ref: corev1.ObjectReference{ + Kind: MachineDeployment, + Name: "test-md-0", + Namespace: "default", + }, + toRevision: int64(0), + }, + wantErr: true, + }, + { + name: "machinedeployment should not rollback because the specified version does not exist", + fields: fields{ + objs: []client.Object{ + deployment, + &clusterv1.MachineSet{ + TypeMeta: metav1.TypeMeta{ + Kind: "MachineSet", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "ms-rev-2", + Namespace: "default", + OwnerReferences: []metav1.OwnerReference{ + *metav1.NewControllerRef(deployment, clusterv1.GroupVersion.WithKind("MachineDeployment")), + }, + Labels: map[string]string{ + clusterv1.ClusterNameLabel: "test", + }, + Annotations: map[string]string{ + clusterv1.RevisionAnnotation: "2", + }, + }, + }, + }, + ref: corev1.ObjectReference{ + Kind: MachineDeployment, + Name: "test-md-0", + Namespace: "default", + }, + toRevision: int64(999), + }, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + r := newRolloutClient() + proxy := test.NewFakeProxy().WithObjs(tt.fields.objs...) + err := r.ObjectRollbacker(t.Context(), proxy, tt.fields.ref, tt.fields.toRevision) + if tt.wantErr { + g.Expect(err).To(HaveOccurred()) + return + } + g.Expect(err).ToNot(HaveOccurred()) + cl, err := proxy.NewClient(t.Context()) + g.Expect(err).ToNot(HaveOccurred()) + key := client.ObjectKeyFromObject(deployment) + md := &clusterv1.MachineDeployment{} + err = cl.Get(t.Context(), key, md) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(*md.Spec.Template.Spec.Version).To(Equal(tt.wantVersion)) + g.Expect(md.Spec.Template.Spec.InfrastructureRef.Name).To(Equal(tt.wantInfraTemplate)) + g.Expect(*md.Spec.Template.Spec.Bootstrap.DataSecretName).To(Equal(tt.wantBootsrapSecretName)) + }) + } +} diff --git a/cmd/clusterctl/client/client_test.go b/cmd/clusterctl/client/client_test.go index 671bb9663c89..126aa65e33f2 100644 --- a/cmd/clusterctl/client/client_test.go +++ b/cmd/clusterctl/client/client_test.go @@ -41,11 +41,11 @@ import ( ) // TestNewFakeClient is a fake test to document fakeClient usage. -func TestNewFakeClient(_ *testing.T) { +func TestNewFakeClient(t *testing.T) { // create a fake config with a provider named P1 and a variable named var repository1Config := config.NewProvider("p1", "url", clusterctlv1.CoreProviderType) - ctx := context.Background() + ctx := t.Context() config1 := newFakeConfig(ctx). WithVar("var", "value"). @@ -62,7 +62,7 @@ func TestNewFakeClient(_ *testing.T) { WithObjs() // create a new fakeClient that allows to execute tests on the fake config, the fake repositories and the fake cluster. - newFakeClient(context.Background(), config1). + newFakeClient(t.Context(), config1). WithRepository(repository1). WithCluster(cluster1) } diff --git a/cmd/clusterctl/client/cluster/cert_manager_test.go b/cmd/clusterctl/client/cluster/cert_manager_test.go index cab681d02acb..773a1b436c98 100644 --- a/cmd/clusterctl/client/cluster/cert_manager_test.go +++ b/cmd/clusterctl/client/cluster/cert_manager_test.go @@ -61,7 +61,7 @@ var certManagerNamespaceYaml = []byte("apiVersion: v1\n" + func Test_getManifestObjs(t *testing.T) { g := NewWithT(t) - defaultConfigClient, err := config.New(context.Background(), "", config.InjectReader(test.NewFakeReader().WithImageMeta(config.CertManagerImageComponent, "bar-repository.io", ""))) + defaultConfigClient, err := config.New(t.Context(), "", config.InjectReader(test.NewFakeReader().WithImageMeta(config.CertManagerImageComponent, "bar-repository.io", ""))) g.Expect(err).ToNot(HaveOccurred()) type fields struct { @@ -109,7 +109,7 @@ func Test_getManifestObjs(t *testing.T) { name: "successfully gets the cert-manager components for a custom release", fields: fields{ configClient: func() config.Client { - configClient, err := config.New(context.Background(), "", config.InjectReader(test.NewFakeReader().WithImageMeta(config.CertManagerImageComponent, "bar-repository.io", "").WithCertManager("", "v1.0.0", ""))) + configClient, err := config.New(t.Context(), "", config.InjectReader(test.NewFakeReader().WithImageMeta(config.CertManagerImageComponent, "bar-repository.io", "").WithCertManager("", "v1.0.0", ""))) g.Expect(err).ToNot(HaveOccurred()) return configClient }(), @@ -125,7 +125,7 @@ func Test_getManifestObjs(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - ctx := context.Background() + ctx := t.Context() cm := &certManagerClient{ configClient: defaultConfigClient, @@ -590,7 +590,7 @@ func Test_certManagerClient_deleteObjs(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - ctx := context.Background() + ctx := t.Context() proxy := test.NewFakeProxy().WithObjs(tt.fields.objs...) cm := &certManagerClient{ @@ -622,7 +622,7 @@ func Test_certManagerClient_deleteObjs(t *testing.T) { cl, err := proxy.NewClient(ctx) g.Expect(err).ToNot(HaveOccurred()) - err = cl.Get(context.Background(), client.ObjectKeyFromObject(obj), obj) + err = cl.Get(t.Context(), client.ObjectKeyFromObject(obj), obj) switch objShouldStillExist { case true: g.Expect(err).ToNot(HaveOccurred()) @@ -736,7 +736,7 @@ func Test_certManagerClient_PlanUpgrade(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - ctx := context.Background() + ctx := t.Context() proxy := test.NewFakeProxy().WithObjs(tt.objs...) fakeConfigClient := newFakeConfig() @@ -798,7 +798,7 @@ func Test_certManagerClient_EnsureLatestVersion(t *testing.T) { proxy: tt.fields.proxy, } - err := cm.EnsureLatestVersion(context.Background()) + err := cm.EnsureLatestVersion(t.Context()) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return diff --git a/cmd/clusterctl/client/cluster/components_test.go b/cmd/clusterctl/client/cluster/components_test.go index 5acd68253448..b2c40d19e827 100644 --- a/cmd/clusterctl/client/cluster/components_test.go +++ b/cmd/clusterctl/client/cluster/components_test.go @@ -17,7 +17,6 @@ limitations under the License. package cluster import ( - "context" "fmt" "testing" @@ -258,7 +257,7 @@ func Test_providerComponents_Delete(t *testing.T) { c := newComponentsClient(proxy) - err := c.Delete(context.Background(), DeleteOptions{ + err := c.Delete(t.Context(), DeleteOptions{ Provider: tt.args.provider, IncludeNamespace: tt.args.includeNamespace, IncludeCRDs: tt.args.includeCRD, @@ -271,7 +270,7 @@ func Test_providerComponents_Delete(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) - cs, err := proxy.NewClient(context.Background()) + cs, err := proxy.NewClient(t.Context()) g.Expect(err).ToNot(HaveOccurred()) for _, want := range tt.wantDiff { @@ -284,7 +283,7 @@ func Test_providerComponents_Delete(t *testing.T) { Name: want.object.Name, } - err := cs.Get(context.Background(), key, obj) + err := cs.Get(t.Context(), key, obj) if err != nil && !apierrors.IsNotFound(err) { t.Fatalf("Failed to get %v from the cluster: %v", key, err) } @@ -323,19 +322,19 @@ func Test_providerComponents_DeleteCoreProviderWebhookNamespace(t *testing.T) { } proxy := test.NewFakeProxy().WithObjs(initObjs...) - proxyClient, _ := proxy.NewClient(context.Background()) + proxyClient, _ := proxy.NewClient(t.Context()) var nsList corev1.NamespaceList // assert length before deleting - _ = proxyClient.List(context.Background(), &nsList) + _ = proxyClient.List(t.Context(), &nsList) g.Expect(nsList.Items).Should(HaveLen(1)) c := newComponentsClient(proxy) - err := c.DeleteWebhookNamespace(context.Background()) + err := c.DeleteWebhookNamespace(t.Context()) g.Expect(err).To(Not(HaveOccurred())) // assert length after deleting - _ = proxyClient.List(context.Background(), &nsList) + _ = proxyClient.List(t.Context(), &nsList) g.Expect(nsList.Items).Should(BeEmpty()) }) } @@ -450,7 +449,7 @@ func Test_providerComponents_Create(t *testing.T) { } unstructuredObjectsToCreate = append(unstructuredObjectsToCreate, *uns) } - err := c.Create(context.Background(), unstructuredObjectsToCreate) + err := c.Create(t.Context(), unstructuredObjectsToCreate) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return @@ -458,7 +457,7 @@ func Test_providerComponents_Create(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) - cs, err := proxy.NewClient(context.Background()) + cs, err := proxy.NewClient(t.Context()) g.Expect(err).ToNot(HaveOccurred()) for _, item := range tt.want { @@ -470,7 +469,7 @@ func Test_providerComponents_Create(t *testing.T) { Name: item.GetName(), } - err := cs.Get(context.Background(), key, obj) + err := cs.Get(t.Context(), key, obj) if err != nil && !apierrors.IsNotFound(err) { t.Fatalf("Failed to get %v from the cluster: %v", key, err) @@ -565,7 +564,7 @@ func Test_providerComponents_ValidateNoObjectsExist(t *testing.T) { c := newComponentsClient(proxy) - if err := c.ValidateNoObjectsExist(context.Background(), tt.provider); (err != nil) != tt.wantErr { + if err := c.ValidateNoObjectsExist(t.Context(), tt.provider); (err != nil) != tt.wantErr { t.Errorf("providerComponents.ValidateNoObjectsExist() error = %v, wantErr %v", err, tt.wantErr) } }) diff --git a/cmd/clusterctl/client/cluster/crd_migration_test.go b/cmd/clusterctl/client/cluster/crd_migration_test.go index 95efc6c31488..457dede842dd 100644 --- a/cmd/clusterctl/client/cluster/crd_migration_test.go +++ b/cmd/clusterctl/client/cluster/crd_migration_test.go @@ -203,7 +203,7 @@ func Test_CRDMigrator(t *testing.T) { objs = append(objs, &tt.CRs[i]) } - c, err := test.NewFakeProxy().WithObjs(objs...).NewClient(context.Background()) + c, err := test.NewFakeProxy().WithObjs(objs...).NewClient(t.Context()) g.Expect(err).ToNot(HaveOccurred()) countingClient := newUpgradeCountingClient(c) @@ -211,7 +211,7 @@ func Test_CRDMigrator(t *testing.T) { Client: countingClient, } - isMigrated, err := m.run(context.Background(), tt.newCRD) + isMigrated, err := m.run(t.Context(), tt.newCRD) if tt.wantErr { g.Expect(err).To(HaveOccurred()) } else { @@ -228,7 +228,7 @@ func Test_CRDMigrator(t *testing.T) { // Check storage versions has been cleaned up. currentCRD := &apiextensionsv1.CustomResourceDefinition{} - err = c.Get(context.Background(), client.ObjectKeyFromObject(tt.newCRD), currentCRD) + err = c.Get(t.Context(), client.ObjectKeyFromObject(tt.newCRD), currentCRD) g.Expect(err).ToNot(HaveOccurred()) g.Expect(currentCRD.Status.StoredVersions).To(Equal(tt.wantStoredVersions)) } diff --git a/cmd/clusterctl/client/cluster/installer_test.go b/cmd/clusterctl/client/cluster/installer_test.go index 292ba80b7024..7fced365b026 100644 --- a/cmd/clusterctl/client/cluster/installer_test.go +++ b/cmd/clusterctl/client/cluster/installer_test.go @@ -292,7 +292,7 @@ func Test_providerInstaller_Validate(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - ctx := context.Background() + ctx := t.Context() configClient, _ := config.New(ctx, "", config.InjectReader(fakeReader)) diff --git a/cmd/clusterctl/client/cluster/inventory_test.go b/cmd/clusterctl/client/cluster/inventory_test.go index aa8b26d6a99d..f795fd26fdf5 100644 --- a/cmd/clusterctl/client/cluster/inventory_test.go +++ b/cmd/clusterctl/client/cluster/inventory_test.go @@ -66,7 +66,7 @@ func Test_inventoryClient_CheckInventoryCRDs(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - ctx := context.Background() + ctx := t.Context() proxy := test.NewFakeProxy() p := newInventoryClient(proxy, fakePollImmediateWaiter, currentContractVersion) @@ -117,7 +117,7 @@ func Test_inventoryClient_List(t *testing.T) { g := NewWithT(t) p := newInventoryClient(test.NewFakeProxy().WithObjs(tt.fields.initObjs...), fakePollImmediateWaiter, currentContractVersion) - got, err := p.List(context.Background()) + got, err := p.List(t.Context()) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return @@ -179,7 +179,7 @@ func Test_inventoryClient_Create(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - ctx := context.Background() + ctx := t.Context() p := &inventoryClient{ proxy: tt.fields.proxy, @@ -336,7 +336,7 @@ func Test_CheckCAPIContract(t *testing.T) { proxy: tt.fields.proxy, currentContractVersion: currentContractVersion, } - err := p.CheckCAPIContract(context.Background(), tt.args.options...) + err := p.CheckCAPIContract(t.Context(), tt.args.options...) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return @@ -383,7 +383,7 @@ func Test_inventoryClient_CheckSingleProviderInstance(t *testing.T) { g := NewWithT(t) p := newInventoryClient(test.NewFakeProxy().WithObjs(tt.fields.initObjs...), fakePollImmediateWaiter, currentContractVersion) - err := p.CheckSingleProviderInstance(context.Background()) + err := p.CheckSingleProviderInstance(t.Context()) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return diff --git a/cmd/clusterctl/client/cluster/mover_test.go b/cmd/clusterctl/client/cluster/mover_test.go index cf762abe5512..1dd2ce7cac25 100644 --- a/cmd/clusterctl/client/cluster/mover_test.go +++ b/cmd/clusterctl/client/cluster/mover_test.go @@ -17,7 +17,6 @@ limitations under the License. package cluster import ( - "context" "fmt" "os" "path/filepath" @@ -735,7 +734,7 @@ func Test_objectMover_backupTargetObject(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - ctx := context.Background() + ctx := t.Context() // Create an objectGraph bound a source cluster with all the CRDs for the types involved in the test. graph := getObjectGraphWithObjs(tt.fields.objs) @@ -816,7 +815,7 @@ func Test_objectMover_restoreTargetObject(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - ctx := context.Background() + ctx := t.Context() dir := t.TempDir() @@ -926,7 +925,7 @@ func Test_objectMover_toDirectory(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - ctx := context.Background() + ctx := t.Context() // Create an objectGraph bound a source cluster with all the CRDs for the types involved in the test. graph := getObjectGraphWithObjs(tt.fields.objs) @@ -1054,7 +1053,7 @@ func Test_objectMover_fromDirectory(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - ctx := context.Background() + ctx := t.Context() dir := t.TempDir() @@ -1134,7 +1133,7 @@ func Test_getMoveSequence(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - ctx := context.Background() + ctx := t.Context() // Create an objectGraph bound a source cluster with all the CRDs for the types involved in the test. graph := getObjectGraphWithObjs(tt.fields.objs) @@ -1167,7 +1166,7 @@ func Test_objectMover_move_dryRun(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - ctx := context.Background() + ctx := t.Context() // Create an objectGraph bound a source cluster with all the CRDs for the types involved in the test. graph := getObjectGraphWithObjs(tt.fields.objs) @@ -1242,7 +1241,7 @@ func Test_objectMover_move(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - ctx := context.Background() + ctx := t.Context() // Create an objectGraph bound a source cluster with all the CRDs for the types involved in the test. graph := getObjectGraphWithObjs(tt.fields.objs) @@ -1319,7 +1318,7 @@ func Test_objectMover_move_with_Mutator(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - ctx := context.Background() + ctx := t.Context() toNamespace := "foobar" updateKnownKinds := map[string][][]string{ @@ -1638,7 +1637,7 @@ func Test_objectMover_checkProvisioningCompleted(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - ctx := context.Background() + ctx := t.Context() // Create an objectGraph bound a source cluster with all the CRDs for the types involved in the test. graph := getObjectGraphWithObjs(tt.fields.objs) @@ -1731,7 +1730,7 @@ func Test_objectsMoverService_checkTargetProviders(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - ctx := context.Background() + ctx := t.Context() o := &objectMover{ fromProviderInventory: newInventoryClient(tt.fields.fromProxy, nil, currentContractVersion), @@ -1786,7 +1785,7 @@ func Test_objectMoverService_ensureNamespace(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - ctx := context.Background() + ctx := t.Context() mover := objectMover{ fromProxy: test.NewFakeProxy(), @@ -1896,7 +1895,7 @@ func Test_objectMoverService_ensureNamespaces(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - ctx := context.Background() + ctx := t.Context() graph := getObjectGraphWithObjs(tt.fields.objs) @@ -2021,13 +2020,13 @@ func Test_createTargetObject(t *testing.T) { nsKey := client.ObjectKey{ Name: "ns1", } - g.Expect(toClient.Get(context.Background(), nsKey, ns)).To(Succeed()) + g.Expect(toClient.Get(t.Context(), nsKey, ns)).To(Succeed()) c := &clusterv1.Cluster{} key := client.ObjectKey{ Namespace: "ns1", Name: "foo", } - g.Expect(toClient.Get(context.Background(), key, c)).ToNot(HaveOccurred()) + g.Expect(toClient.Get(t.Context(), key, c)).ToNot(HaveOccurred()) g.Expect(c.OwnerReferences).To(HaveLen(1)) g.Expect(c.OwnerReferences[0].Controller).To(Equal(ptr.To(true))) }, @@ -2067,7 +2066,7 @@ func Test_createTargetObject(t *testing.T) { Namespace: "ns1", Name: "foo", } - g.Expect(toClient.Get(context.Background(), key, c)).ToNot(HaveOccurred()) + g.Expect(toClient.Get(t.Context(), key, c)).ToNot(HaveOccurred()) g.Expect(c.Annotations).To(BeEmpty()) }, }, @@ -2113,7 +2112,7 @@ func Test_createTargetObject(t *testing.T) { Namespace: "mutatedns1", Name: "foo", } - g.Expect(toClient.Get(context.Background(), key, c)).ToNot(HaveOccurred()) + g.Expect(toClient.Get(t.Context(), key, c)).ToNot(HaveOccurred()) g.Expect(c.Annotations).To(BeEmpty()) }, }, @@ -2149,7 +2148,7 @@ func Test_createTargetObject(t *testing.T) { key := client.ObjectKey{ Name: "foo", } - g.Expect(toClient.Get(context.Background(), key, c)).ToNot(HaveOccurred()) + g.Expect(toClient.Get(t.Context(), key, c)).ToNot(HaveOccurred()) g.Expect(c.Annotations).ToNot(BeEmpty()) }, }, @@ -2189,7 +2188,7 @@ func Test_createTargetObject(t *testing.T) { Namespace: "ns1", Name: "foo", } - g.Expect(toClient.Get(context.Background(), key, c)).ToNot(HaveOccurred()) + g.Expect(toClient.Get(t.Context(), key, c)).ToNot(HaveOccurred()) g.Expect(c.Annotations).ToNot(BeEmpty()) }, }, @@ -2199,7 +2198,7 @@ func Test_createTargetObject(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - ctx := context.Background() + ctx := t.Context() mover := objectMover{ fromProxy: tt.args.fromProxy, @@ -2250,7 +2249,7 @@ func Test_deleteSourceObject(t *testing.T) { Namespace: "ns1", Name: "foo", } - g.Expect(apierrors.IsNotFound(toClient.Get(context.Background(), key, c))).To(BeTrue()) + g.Expect(apierrors.IsNotFound(toClient.Get(t.Context(), key, c))).To(BeTrue()) }, }, { @@ -2279,7 +2278,7 @@ func Test_deleteSourceObject(t *testing.T) { Namespace: "ns1", Name: "foo", } - g.Expect(apierrors.IsNotFound(toClient.Get(context.Background(), key, c))).To(BeTrue()) + g.Expect(apierrors.IsNotFound(toClient.Get(t.Context(), key, c))).To(BeTrue()) }, }, { @@ -2307,7 +2306,7 @@ func Test_deleteSourceObject(t *testing.T) { Namespace: "ns1", Name: "foo", } - g.Expect(apierrors.IsNotFound(toClient.Get(context.Background(), key, c))).To(BeTrue()) + g.Expect(apierrors.IsNotFound(toClient.Get(t.Context(), key, c))).To(BeTrue()) }, }, { @@ -2337,7 +2336,7 @@ func Test_deleteSourceObject(t *testing.T) { Namespace: "ns1", Name: "foo", } - g.Expect(apierrors.IsNotFound(toClient.Get(context.Background(), key, c))).To(BeTrue()) + g.Expect(apierrors.IsNotFound(toClient.Get(t.Context(), key, c))).To(BeTrue()) }, }, } @@ -2346,7 +2345,7 @@ func Test_deleteSourceObject(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - ctx := context.Background() + ctx := t.Context() mover := objectMover{ fromProxy: tt.args.fromProxy, @@ -2396,7 +2395,7 @@ func TestWaitReadyForMove(t *testing.T) { clusterNamespace := "ns1" objs := test.NewFakeCluster(clusterNamespace, clusterName).Objs() - ctx := context.Background() + ctx := t.Context() // Create an objectGraph bound a source cluster with all the CRDs for the types involved in the test. graph := getObjectGraphWithObjs(objs) diff --git a/cmd/clusterctl/client/cluster/objectgraph_test.go b/cmd/clusterctl/client/cluster/objectgraph_test.go index 11418edaaf3b..2a3b68291eef 100644 --- a/cmd/clusterctl/client/cluster/objectgraph_test.go +++ b/cmd/clusterctl/client/cluster/objectgraph_test.go @@ -17,7 +17,6 @@ limitations under the License. package cluster import ( - "context" "fmt" "sort" "testing" @@ -224,7 +223,7 @@ func TestObjectGraph_getDiscoveryTypeMetaList(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - ctx := context.Background() + ctx := t.Context() graph := newObjectGraph(tt.fields.proxy, nil) err := graph.getDiscoveryTypes(ctx) @@ -1789,7 +1788,7 @@ func TestObjectGraph_Discovery(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - ctx := context.Background() + ctx := t.Context() // Create an objectGraph bound to a source cluster with all the CRDs for the types involved in the test. graph := getObjectGraphWithObjs(tt.args.objs) @@ -2185,7 +2184,7 @@ func TestObjectGraph_DiscoveryByNamespace(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - ctx := context.Background() + ctx := t.Context() // Create an objectGraph bound to a source cluster with all the CRDs for the types involved in the test. graph := getObjectGraphWithObjs(tt.args.objs) diff --git a/cmd/clusterctl/client/cluster/template_test.go b/cmd/clusterctl/client/cluster/template_test.go index c1042fafa9c5..b366d54278ff 100644 --- a/cmd/clusterctl/client/cluster/template_test.go +++ b/cmd/clusterctl/client/cluster/template_test.go @@ -47,7 +47,7 @@ kind: Machine` func Test_templateClient_GetFromConfigMap(t *testing.T) { g := NewWithT(t) - configClient, err := config.New(context.Background(), "", config.InjectReader(test.NewFakeReader())) + configClient, err := config.New(t.Context(), "", config.InjectReader(test.NewFakeReader())) g.Expect(err).ToNot(HaveOccurred()) configMap := &corev1.ConfigMap{ @@ -135,7 +135,7 @@ func Test_templateClient_GetFromConfigMap(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - ctx := context.Background() + ctx := t.Context() processor := yaml.NewSimpleProcessor() tc := newTemplateClient(TemplateClientInput{tt.fields.proxy, tt.fields.configClient, processor}) @@ -165,7 +165,7 @@ func Test_templateClient_getGitHubFileContent(t *testing.T) { client, mux, teardown := test.NewFakeGitHub() defer teardown() - configClient, err := config.New(context.Background(), "", config.InjectReader(test.NewFakeReader())) + configClient, err := config.New(t.Context(), "", config.InjectReader(test.NewFakeReader())) g.Expect(err).ToNot(HaveOccurred()) mux.HandleFunc("/repos/kubernetes-sigs/cluster-api/contents/config/default/cluster-template.yaml", func(w http.ResponseWriter, _ *http.Request) { @@ -210,7 +210,7 @@ func Test_templateClient_getGitHubFileContent(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - ctx := context.Background() + ctx := t.Context() c := &templateClient{ configClient: configClient, @@ -260,7 +260,7 @@ func Test_templateClient_getRawUrlFileContent(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - ctx := context.Background() + ctx := t.Context() c := newTemplateClient(TemplateClientInput{}) got, err := c.getRawURLFileContent(ctx, tt.args.rURL) @@ -333,7 +333,7 @@ func Test_templateClient_GetFromURL(t *testing.T) { tmpDir := t.TempDir() - configClient, err := config.New(context.Background(), "", config.InjectReader(test.NewFakeReader())) + configClient, err := config.New(t.Context(), "", config.InjectReader(test.NewFakeReader())) g.Expect(err).ToNot(HaveOccurred()) fakeGithubClient, mux, teardown := test.NewFakeGitHub() @@ -482,7 +482,7 @@ func Test_templateClient_GetFromURL(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - ctx := context.Background() + ctx := t.Context() gitHubClientFactory := func(context.Context, config.VariablesClient) (*github.Client, error) { return fakeGithubClient, nil diff --git a/cmd/clusterctl/client/cluster/topology_test.go b/cmd/clusterctl/client/cluster/topology_test.go new file mode 100644 index 000000000000..3af795f93e0c --- /dev/null +++ b/cmd/clusterctl/client/cluster/topology_test.go @@ -0,0 +1,466 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cluster + +import ( + _ "embed" + "fmt" + "strings" + "testing" + + . "github.com/onsi/gomega" + "github.com/onsi/gomega/types" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "sigs.k8s.io/controller-runtime/pkg/client" + + "sigs.k8s.io/cluster-api/cmd/clusterctl/internal/test" + utilyaml "sigs.k8s.io/cluster-api/util/yaml" +) + +var ( + //go:embed assets/topology-test/new-clusterclass-and-cluster.yaml + newClusterClassAndClusterYAML []byte + + //go:embed assets/topology-test/mock-CRDs.yaml + mockCRDsYAML []byte + + //go:embed assets/topology-test/my-cluster-class.yaml + existingMyClusterClassYAML []byte + + //go:embed assets/topology-test/existing-my-cluster.yaml + existingMyClusterYAML []byte + + //go:embed assets/topology-test/existing-my-second-cluster.yaml + existingMySecondClusterYAML []byte + + // modifiedClusterYAML changes the control plane replicas from 1 to 3. + //go:embed assets/topology-test/modified-my-cluster.yaml + modifiedMyClusterYAML []byte + + // modifiedDockerMachineTemplateYAML adds metadata to the docker machine used by the control plane template.. + //go:embed assets/topology-test/modified-CP-dockermachinetemplate.yaml + modifiedDockerMachineTemplateYAML []byte + + // modifiedDockerMachinePoolTemplateYAML adds metadata to the docker machine pool used by the control plane template.. + //go:embed assets/topology-test/modified-CP-dockermachinepooltemplate.yaml + modifiedDockerMachinePoolTemplateYAML []byte + + //go:embed assets/topology-test/objects-in-different-namespaces.yaml + objsInDifferentNamespacesYAML []byte +) + +func Test_topologyClient_Plan(t *testing.T) { + type args struct { + in *TopologyPlanInput + } + type item struct { + kind string + namespace string + namePrefix string + } + type out struct { + affectedClusters []client.ObjectKey + affectedClusterClasses []client.ObjectKey + reconciledCluster *client.ObjectKey + created []item + modified []item + deleted []item + } + tests := []struct { + name string + existingObjects []*unstructured.Unstructured + args args + want out + wantErr bool + }{ + { + name: "Input with new ClusterClass and new Cluster", + args: args{ + in: &TopologyPlanInput{ + Objs: mustToUnstructured(newClusterClassAndClusterYAML), + }, + }, + want: out{ + created: []item{ + {kind: "DockerCluster", namespace: "default", namePrefix: "my-cluster-"}, + {kind: "DockerMachineTemplate", namespace: "default", namePrefix: "my-cluster-md-0-"}, + {kind: "DockerMachineTemplate", namespace: "default", namePrefix: "my-cluster-md-1-"}, + {kind: "DockerMachineTemplate", namespace: "default", namePrefix: "my-cluster-"}, + {kind: "DockerMachinePool", namespace: "default", namePrefix: "my-cluster-mp-0-"}, + {kind: "DockerMachinePool", namespace: "default", namePrefix: "my-cluster-mp-1-"}, + {kind: "KubeadmConfigTemplate", namespace: "default", namePrefix: "my-cluster-md-0-"}, + {kind: "KubeadmConfigTemplate", namespace: "default", namePrefix: "my-cluster-md-1-"}, + {kind: "KubeadmControlPlane", namespace: "default", namePrefix: "my-cluster-"}, + {kind: "MachineDeployment", namespace: "default", namePrefix: "my-cluster-md-0-"}, + {kind: "MachineDeployment", namespace: "default", namePrefix: "my-cluster-md-1-"}, + {kind: "MachinePool", namespace: "default", namePrefix: "my-cluster-mp-0-"}, + {kind: "MachinePool", namespace: "default", namePrefix: "my-cluster-mp-1-"}, + }, + modified: []item{ + {kind: "Cluster", namespace: "default", namePrefix: "my-cluster"}, + }, + affectedClusters: func() []client.ObjectKey { + cluster := client.ObjectKey{Namespace: "default", Name: "my-cluster"} + return []client.ObjectKey{cluster} + }(), + affectedClusterClasses: func() []client.ObjectKey { + cc := client.ObjectKey{Namespace: "default", Name: "my-cluster-class"} + return []client.ObjectKey{cc} + }(), + reconciledCluster: &client.ObjectKey{Namespace: "default", Name: "my-cluster"}, + }, + wantErr: false, + }, + { + name: "Modifying an existing Cluster", + existingObjects: mustToUnstructured( + mockCRDsYAML, + existingMyClusterClassYAML, + existingMyClusterYAML, + ), + args: args{ + in: &TopologyPlanInput{ + Objs: mustToUnstructured(modifiedMyClusterYAML), + }, + }, + want: out{ + affectedClusters: func() []client.ObjectKey { + cluster := client.ObjectKey{Namespace: "default", Name: "my-cluster"} + return []client.ObjectKey{cluster} + }(), + affectedClusterClasses: []client.ObjectKey{}, + modified: []item{ + {kind: "KubeadmControlPlane", namespace: "default", namePrefix: "my-cluster-"}, + }, + reconciledCluster: &client.ObjectKey{Namespace: "default", Name: "my-cluster"}, + }, + wantErr: false, + }, + { + name: "Modifying an existing DockerMachineTemplate. Template used by Control Plane of an existing Cluster.", + existingObjects: mustToUnstructured( + mockCRDsYAML, + existingMyClusterClassYAML, + existingMyClusterYAML, + ), + args: args{ + in: &TopologyPlanInput{ + Objs: mustToUnstructured(modifiedDockerMachineTemplateYAML), + }, + }, + want: out{ + affectedClusters: func() []client.ObjectKey { + cluster := client.ObjectKey{Namespace: "default", Name: "my-cluster"} + return []client.ObjectKey{cluster} + }(), + affectedClusterClasses: func() []client.ObjectKey { + cc := client.ObjectKey{Namespace: "default", Name: "my-cluster-class"} + return []client.ObjectKey{cc} + }(), + modified: []item{ + {kind: "KubeadmControlPlane", namespace: "default", namePrefix: "my-cluster-"}, + }, + created: []item{ + // Modifying the DockerClusterTemplate will result in template rotation. A new template will be created + // and used by KCP. + {kind: "DockerMachineTemplate", namespace: "default", namePrefix: "my-cluster-"}, + }, + reconciledCluster: &client.ObjectKey{Namespace: "default", Name: "my-cluster"}, + }, + wantErr: false, + }, + { + name: "Modifying an existing DockerMachineTemplate. Affects multiple clusters. Target Cluster not specified.", + existingObjects: mustToUnstructured( + mockCRDsYAML, + existingMyClusterClassYAML, + existingMyClusterYAML, + existingMySecondClusterYAML, + ), + args: args{ + in: &TopologyPlanInput{ + Objs: mustToUnstructured(modifiedDockerMachineTemplateYAML), + }, + }, + want: out{ + affectedClusters: func() []client.ObjectKey { + cluster := client.ObjectKey{Namespace: "default", Name: "my-cluster"} + cluster2 := client.ObjectKey{Namespace: "default", Name: "my-second-cluster"} + return []client.ObjectKey{cluster, cluster2} + }(), + affectedClusterClasses: func() []client.ObjectKey { + cc := client.ObjectKey{Namespace: "default", Name: "my-cluster-class"} + return []client.ObjectKey{cc} + }(), + modified: []item{}, + created: []item{}, + reconciledCluster: nil, + }, + wantErr: false, + }, + { + name: "Modifying an existing DockerMachinePoolTemplate. Affects multiple clusters. Target Cluster not specified.", + existingObjects: mustToUnstructured( + mockCRDsYAML, + existingMyClusterClassYAML, + existingMyClusterYAML, + existingMySecondClusterYAML, + ), + args: args{ + in: &TopologyPlanInput{ + Objs: mustToUnstructured(modifiedDockerMachinePoolTemplateYAML), + }, + }, + want: out{ + affectedClusters: func() []client.ObjectKey { + cluster := client.ObjectKey{Namespace: "default", Name: "my-cluster"} + cluster2 := client.ObjectKey{Namespace: "default", Name: "my-second-cluster"} + return []client.ObjectKey{cluster, cluster2} + }(), + affectedClusterClasses: func() []client.ObjectKey { + cc := client.ObjectKey{Namespace: "default", Name: "my-cluster-class"} + return []client.ObjectKey{cc} + }(), + modified: []item{}, + created: []item{}, + reconciledCluster: nil, + }, + wantErr: false, + }, + { + name: "Modifying an existing DockerMachineTemplate. Affects multiple clusters. Target Cluster specified.", + existingObjects: mustToUnstructured( + mockCRDsYAML, + existingMyClusterClassYAML, + existingMyClusterYAML, + existingMySecondClusterYAML, + ), + args: args{ + in: &TopologyPlanInput{ + Objs: mustToUnstructured(modifiedDockerMachineTemplateYAML), + TargetClusterName: "my-cluster", + }, + }, + want: out{ + affectedClusters: func() []client.ObjectKey { + cluster := client.ObjectKey{Namespace: "default", Name: "my-cluster"} + cluster2 := client.ObjectKey{Namespace: "default", Name: "my-second-cluster"} + return []client.ObjectKey{cluster, cluster2} + }(), + affectedClusterClasses: func() []client.ObjectKey { + cc := client.ObjectKey{Namespace: "default", Name: "my-cluster-class"} + return []client.ObjectKey{cc} + }(), + modified: []item{ + {kind: "KubeadmControlPlane", namespace: "default", namePrefix: "my-cluster-"}, + }, + created: []item{ + // Modifying the DockerClusterTemplate will result in template rotation. A new template will be created + // and used by KCP. + {kind: "DockerMachineTemplate", namespace: "default", namePrefix: "my-cluster-"}, + }, + reconciledCluster: &client.ObjectKey{Namespace: "default", Name: "my-cluster"}, + }, + wantErr: false, + }, + { + name: "Modifying an existing DockerMachinePoolTemplate. Affects multiple clusters. Target Cluster specified.", + existingObjects: mustToUnstructured( + mockCRDsYAML, + existingMyClusterClassYAML, + existingMyClusterYAML, + existingMySecondClusterYAML, + ), + args: args{ + in: &TopologyPlanInput{ + Objs: mustToUnstructured(modifiedDockerMachinePoolTemplateYAML), + TargetClusterName: "my-cluster", + }, + }, + want: out{ + affectedClusters: func() []client.ObjectKey { + cluster := client.ObjectKey{Namespace: "default", Name: "my-cluster"} + cluster2 := client.ObjectKey{Namespace: "default", Name: "my-second-cluster"} + return []client.ObjectKey{cluster, cluster2} + }(), + affectedClusterClasses: func() []client.ObjectKey { + cc := client.ObjectKey{Namespace: "default", Name: "my-cluster-class"} + return []client.ObjectKey{cc} + }(), + created: []item{ + {kind: "DockerMachinePool", namespace: "default", namePrefix: "my-cluster-"}, + }, + reconciledCluster: &client.ObjectKey{Namespace: "default", Name: "my-cluster"}, + }, + wantErr: false, + }, + { + name: "Input with objects in different namespaces should return error", + args: args{ + in: &TopologyPlanInput{ + Objs: mustToUnstructured(objsInDifferentNamespacesYAML), + }, + }, + wantErr: true, + }, + { + name: "Input with TargetNamespace different from objects in input should return error", + args: args{ + in: &TopologyPlanInput{ + Objs: mustToUnstructured(newClusterClassAndClusterYAML), + TargetNamespace: "different-namespace", + }, + }, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + ctx := t.Context() + + existingObjects := []client.Object{} + for _, o := range tt.existingObjects { + existingObjects = append(existingObjects, o) + } + proxy := test.NewFakeProxy().WithClusterAvailable(true).WithObjs(fakeCAPISetupObjects()...).WithObjs(existingObjects...) + inventoryClient := newInventoryClient(proxy, nil, currentContractVersion) + tc := newTopologyClient( + proxy, + inventoryClient, + ) + + res, err := tc.Plan(ctx, tt.args.in) + if tt.wantErr { + g.Expect(err).To(HaveOccurred()) + return + } + // The plan should function should not return any error. + g.Expect(err).ToNot(HaveOccurred()) + + // Check affected ClusterClasses. + g.Expect(res.ClusterClasses).To(HaveLen(len(tt.want.affectedClusterClasses))) + for _, cc := range tt.want.affectedClusterClasses { + g.Expect(res.ClusterClasses).To(ContainElement(cc)) + } + + // Check affected Clusters. + g.Expect(res.Clusters).To(HaveLen(len(tt.want.affectedClusters))) + for _, cluster := range tt.want.affectedClusters { + g.Expect(res.Clusters).To(ContainElement(cluster)) + } + + // Check the reconciled cluster. + if tt.want.reconciledCluster == nil { + g.Expect(res.ReconciledCluster).To(BeNil()) + } else { + g.Expect(res.ReconciledCluster).NotTo(BeNil()) + g.Expect(*res.ReconciledCluster).To(BeComparableTo(*tt.want.reconciledCluster)) + } + + // Check the created objects. + for _, created := range tt.want.created { + g.Expect(res.Created).To(ContainElement(MatchTopologyPlanOutputItem(created.kind, created.namespace, created.namePrefix))) + } + + // Check the modified objects. + actualModifiedObjs := []*unstructured.Unstructured{} + for _, m := range res.Modified { + actualModifiedObjs = append(actualModifiedObjs, m.After) + } + for _, modified := range tt.want.modified { + g.Expect(actualModifiedObjs).To(ContainElement(MatchTopologyPlanOutputItem(modified.kind, modified.namespace, modified.namePrefix))) + } + + // Check the deleted objects. + for _, deleted := range tt.want.deleted { + g.Expect(res.Deleted).To(ContainElement(MatchTopologyPlanOutputItem(deleted.kind, deleted.namespace, deleted.namePrefix))) + } + }) + } +} + +func fakeCAPISetupObjects() []client.Object { + return []client.Object{ + &apiextensionsv1.CustomResourceDefinition{ + ObjectMeta: metav1.ObjectMeta{Name: "clusters.cluster.x-k8s.io"}, + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: currentContractVersion, + Storage: true, + }, + }, + }, + }, + } +} + +func MatchTopologyPlanOutputItem(kind, namespace, namePrefix string) types.GomegaMatcher { + return &topologyPlanOutputItemMatcher{kind, namespace, namePrefix} +} + +type topologyPlanOutputItemMatcher struct { + kind string + namespace string + namePrefix string +} + +func (m *topologyPlanOutputItemMatcher) Match(actual interface{}) (bool, error) { + obj := actual.(*unstructured.Unstructured) + if obj.GetKind() != m.kind { + return false, nil + } + if obj.GetNamespace() != m.namespace { + return false, nil + } + if !strings.HasPrefix(obj.GetName(), m.namePrefix) { + return false, nil + } + return true, nil +} + +func (m *topologyPlanOutputItemMatcher) FailureMessage(_ interface{}) string { + return fmt.Sprintf("Expected item Kind=%s, Namespace=%s, Name(prefix)=%s to be present", m.kind, m.namespace, m.namePrefix) +} + +func (m *topologyPlanOutputItemMatcher) NegatedFailureMessage(_ interface{}) string { + return fmt.Sprintf("Expected item Kind=%s, Namespace=%s, Name(prefix)=%s not to be present", m.kind, m.namespace, m.namePrefix) +} + +func convertToPtrSlice(objs []unstructured.Unstructured) []*unstructured.Unstructured { + res := []*unstructured.Unstructured{} + for i := range objs { + res = append(res, &objs[i]) + } + return res +} + +func mustToUnstructured(rawyamls ...[]byte) []*unstructured.Unstructured { + objects := []unstructured.Unstructured{} + for _, raw := range rawyamls { + objs, err := utilyaml.ToUnstructured(raw) + if err != nil { + panic(err) + } + objects = append(objects, objs...) + } + return convertToPtrSlice(objects) +} diff --git a/cmd/clusterctl/client/cluster/upgrader_info_test.go b/cmd/clusterctl/client/cluster/upgrader_info_test.go index d13138ae3111..4b18724b371f 100644 --- a/cmd/clusterctl/client/cluster/upgrader_info_test.go +++ b/cmd/clusterctl/client/cluster/upgrader_info_test.go @@ -230,7 +230,7 @@ func Test_providerUpgrader_getUpgradeInfo(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - configClient, _ := config.New(context.Background(), "", config.InjectReader(tt.fields.reader)) + configClient, _ := config.New(t.Context(), "", config.InjectReader(tt.fields.reader)) u := &providerUpgrader{ configClient: configClient, @@ -238,7 +238,7 @@ func Test_providerUpgrader_getUpgradeInfo(t *testing.T) { return repository.New(ctx, provider, configClient, repository.InjectRepository(tt.fields.repo)) }, } - got, err := u.getUpgradeInfo(context.Background(), tt.args.provider) + got, err := u.getUpgradeInfo(t.Context(), tt.args.provider) if tt.wantErr { g.Expect(err).To(HaveOccurred()) } else { diff --git a/cmd/clusterctl/client/cluster/upgrader_test.go b/cmd/clusterctl/client/cluster/upgrader_test.go index 2eb73428a463..a87ac2419f70 100644 --- a/cmd/clusterctl/client/cluster/upgrader_test.go +++ b/cmd/clusterctl/client/cluster/upgrader_test.go @@ -414,7 +414,7 @@ func Test_providerUpgrader_Plan(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - ctx := context.Background() + ctx := t.Context() configClient, _ := config.New(ctx, "", config.InjectReader(tt.fields.reader)) @@ -985,7 +985,7 @@ func Test_providerUpgrader_createCustomPlan(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - ctx := context.Background() + ctx := t.Context() configClient, _ := config.New(ctx, "", config.InjectReader(tt.fields.reader)) @@ -1106,7 +1106,7 @@ func Test_providerUpgrader_ApplyPlan(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - ctx := context.Background() + ctx := t.Context() configClient, _ := config.New(ctx, "", config.InjectReader(tt.fields.reader)) @@ -1360,7 +1360,7 @@ func Test_providerUpgrader_ApplyCustomPlan(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - ctx := context.Background() + ctx := t.Context() configClient, _ := config.New(ctx, "", config.InjectReader(tt.fields.reader)) diff --git a/cmd/clusterctl/client/cluster/workload_cluster_test.go b/cmd/clusterctl/client/cluster/workload_cluster_test.go index ff07e711d344..b3e50dbb8537 100644 --- a/cmd/clusterctl/client/cluster/workload_cluster_test.go +++ b/cmd/clusterctl/client/cluster/workload_cluster_test.go @@ -17,7 +17,6 @@ limitations under the License. package cluster import ( - "context" "testing" . "github.com/onsi/gomega" @@ -85,7 +84,7 @@ users: t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - ctx := context.Background() + ctx := t.Context() wc := newWorkloadCluster(tt.proxy) data, err := wc.GetKubeconfig(ctx, "test1", "test") diff --git a/cmd/clusterctl/client/clusterclass_test.go b/cmd/clusterctl/client/clusterclass_test.go index 8cc8b176b6ca..731558ce19a8 100644 --- a/cmd/clusterctl/client/clusterclass_test.go +++ b/cmd/clusterctl/client/clusterclass_test.go @@ -17,7 +17,6 @@ limitations under the License. package client import ( - "context" "fmt" "testing" @@ -68,7 +67,7 @@ func TestClusterClassExists(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - ctx := context.Background() + ctx := t.Context() config := newFakeConfig(ctx) client := newFakeCluster(cluster.Kubeconfig{Path: "kubeconfig", Context: "mgmt-context"}, config).WithObjs(tt.objs...) @@ -178,7 +177,7 @@ func TestAddClusterClassIfMissing(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - ctx := context.Background() + ctx := t.Context() config1 := newFakeConfig(ctx).WithProvider(infraProviderConfig) repository1 := newFakeRepository(ctx, infraProviderConfig, config1). diff --git a/cmd/clusterctl/client/config/reader_memory_test.go b/cmd/clusterctl/client/config/reader_memory_test.go index 6cd907c0f9cc..3ac5bb7520ee 100644 --- a/cmd/clusterctl/client/config/reader_memory_test.go +++ b/cmd/clusterctl/client/config/reader_memory_test.go @@ -17,7 +17,6 @@ limitations under the License. package config import ( - "context" "testing" . "github.com/onsi/gomega" @@ -54,7 +53,7 @@ func TestMemoryReader(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - ctx := context.Background() + ctx := t.Context() f := NewMemoryReader() g.Expect(f.Init(ctx, "")).To(Succeed()) diff --git a/cmd/clusterctl/client/config/reader_viper_test.go b/cmd/clusterctl/client/config/reader_viper_test.go index e6613be1e9de..4c5f46ee6444 100644 --- a/cmd/clusterctl/client/config/reader_viper_test.go +++ b/cmd/clusterctl/client/config/reader_viper_test.go @@ -17,7 +17,6 @@ limitations under the License. package config import ( - "context" "fmt" "net/http" "net/http/httptest" @@ -106,7 +105,7 @@ func Test_viperReader_Init(t *testing.T) { t.Run(tt.name, func(t *testing.T) { gg := NewWithT(t) - ctx := context.Background() + ctx := t.Context() v, _ := newViperReader(injectConfigPaths(tt.configDirs)) if tt.expectErr { @@ -166,7 +165,7 @@ func Test_viperReader_Get(t *testing.T) { t.Run(tt.name, func(t *testing.T) { gs := NewWithT(t) - ctx := context.Background() + ctx := t.Context() v, _ := newViperReader(injectConfigPaths([]string{dir})) @@ -187,7 +186,7 @@ func Test_viperReader_Get(t *testing.T) { func Test_viperReader_GetWithoutDefaultConfig(t *testing.T) { g := NewWithT(t) - ctx := context.Background() + ctx := t.Context() dir := t.TempDir() @@ -235,7 +234,7 @@ func Test_viperReader_Set(t *testing.T) { t.Run(tt.name, func(t *testing.T) { gs := NewWithT(t) - ctx := context.Background() + ctx := t.Context() v := &viperReader{} diff --git a/cmd/clusterctl/client/config_test.go b/cmd/clusterctl/client/config_test.go index 19b68d6d172d..2f3b6b5e0671 100644 --- a/cmd/clusterctl/client/config_test.go +++ b/cmd/clusterctl/client/config_test.go @@ -51,7 +51,7 @@ func Test_clusterctlClient_GetProvidersConfig(t *testing.T) { { name: "Returns default providers", field: field{ - client: newFakeClient(context.Background(), newFakeConfig(context.Background())), + client: newFakeClient(t.Context(), newFakeConfig(t.Context())), }, // note: these will be sorted by name by the Providers() call, so be sure they are in alphabetical order here too wantProviders: []string{ @@ -117,7 +117,7 @@ func Test_clusterctlClient_GetProvidersConfig(t *testing.T) { { name: "Returns default providers and custom providers if defined", field: field{ - client: newFakeClient(context.Background(), newFakeConfig(context.Background()).WithProvider(customProviderConfig)), + client: newFakeClient(t.Context(), newFakeConfig(t.Context()).WithProvider(customProviderConfig)), }, // note: these will be sorted by name by the Providers() call, so be sure they are in alphabetical order here too wantProviders: []string{ @@ -204,7 +204,7 @@ func Test_clusterctlClient_GetProvidersConfig(t *testing.T) { } func Test_clusterctlClient_GetProviderComponents(t *testing.T) { - ctx := context.Background() + ctx := t.Context() config1 := newFakeConfig(ctx). WithProvider(capiProviderConfig) @@ -256,7 +256,7 @@ func Test_clusterctlClient_GetProviderComponents(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - ctx := context.Background() + ctx := t.Context() options := ComponentsOptions{ TargetNamespace: tt.args.targetNameSpace, @@ -277,7 +277,7 @@ func Test_clusterctlClient_GetProviderComponents(t *testing.T) { func Test_getComponentsByName_withEmptyVariables(t *testing.T) { g := NewWithT(t) - ctx := context.Background() + ctx := t.Context() // Create a fake config with a provider named P1 and a variable named foo. repository1Config := config.NewProvider("p1", "url", clusterctlv1.InfrastructureProviderType) @@ -452,7 +452,7 @@ func Test_clusterctlClient_templateOptionsToVariables(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - ctx := context.Background() + ctx := t.Context() config := newFakeConfig(ctx). WithVar("KUBERNETES_VERSION", "v3.4.5") // with this line we are simulating an env var @@ -477,7 +477,7 @@ func Test_clusterctlClient_templateOptionsToVariables(t *testing.T) { } func Test_clusterctlClient_templateOptionsToVariables_withExistingMachineCountVariables(t *testing.T) { - ctx := context.Background() + ctx := t.Context() configClient := newFakeConfig(ctx). WithVar("CONTROL_PLANE_MACHINE_COUNT", "3"). @@ -518,7 +518,7 @@ func Test_clusterctlClient_templateOptionsToVariables_withExistingMachineCountVa func Test_clusterctlClient_GetClusterTemplate(t *testing.T) { g := NewWithT(t) - ctx := context.Background() + ctx := t.Context() rawTemplate := templateYAML("ns3", "${ CLUSTER_NAME }") @@ -700,7 +700,7 @@ func Test_clusterctlClient_GetClusterTemplate(t *testing.T) { func Test_clusterctlClient_GetClusterTemplate_withClusterClass(t *testing.T) { g := NewWithT(t) - ctx := context.Background() + ctx := t.Context() rawTemplate := mangedTopologyTemplateYAML("ns4", "${CLUSTER_NAME}", "dev") rawClusterClassTemplate := clusterClassYAML("ns4", "dev") @@ -914,7 +914,7 @@ func newFakeClientWithoutCluster(configClient config.Client) *fakeClient { func Test_clusterctlClient_GetClusterTemplate_withoutCluster(t *testing.T) { rawTemplate := templateYAML("ns3", "${ CLUSTER_NAME }") - ctx := context.Background() + ctx := t.Context() config1 := newFakeConfig(ctx). WithProvider(infraProviderConfig) diff --git a/cmd/clusterctl/client/delete_test.go b/cmd/clusterctl/client/delete_test.go index a4e53b9015f3..61d47b0397cf 100644 --- a/cmd/clusterctl/client/delete_test.go +++ b/cmd/clusterctl/client/delete_test.go @@ -189,7 +189,7 @@ func Test_clusterctlClient_Delete(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - ctx := context.Background() + ctx := t.Context() err := tt.fields.client.Delete(ctx, tt.args.options) if tt.wantErr { diff --git a/cmd/clusterctl/client/get_kubeconfig_test.go b/cmd/clusterctl/client/get_kubeconfig_test.go index 19f367fe6dc1..ab616524468d 100644 --- a/cmd/clusterctl/client/get_kubeconfig_test.go +++ b/cmd/clusterctl/client/get_kubeconfig_test.go @@ -17,7 +17,6 @@ limitations under the License. package client import ( - "context" "testing" . "github.com/onsi/gomega" @@ -27,7 +26,7 @@ import ( ) func Test_clusterctlClient_GetKubeconfig(t *testing.T) { - ctx := context.Background() + ctx := t.Context() configClient := newFakeConfig(ctx) kubeconfig := cluster.Kubeconfig{Path: "kubeconfig", Context: "mgmt-context"} diff --git a/cmd/clusterctl/client/move_test.go b/cmd/clusterctl/client/move_test.go index 42998329ae7c..7801551df5b2 100644 --- a/cmd/clusterctl/client/move_test.go +++ b/cmd/clusterctl/client/move_test.go @@ -128,7 +128,7 @@ func Test_clusterctlClient_Move(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - ctx := context.Background() + ctx := t.Context() err := tt.fields.client.Move(ctx, tt.args.options) if tt.wantErr { @@ -189,7 +189,7 @@ func Test_clusterctlClient_ToDirectory(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - ctx := context.Background() + ctx := t.Context() err := tt.fields.client.Move(ctx, tt.args.options) if tt.wantErr { @@ -250,7 +250,7 @@ func Test_clusterctlClient_FromDirectory(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - ctx := context.Background() + ctx := t.Context() err := tt.fields.client.Move(ctx, tt.args.options) if tt.wantErr { diff --git a/cmd/clusterctl/client/repository/client_test.go b/cmd/clusterctl/client/repository/client_test.go index c67a9ccbbc71..08540db17b1a 100644 --- a/cmd/clusterctl/client/repository/client_test.go +++ b/cmd/clusterctl/client/repository/client_test.go @@ -17,7 +17,6 @@ limitations under the License. package repository import ( - "context" "testing" . "github.com/onsi/gomega" @@ -31,7 +30,7 @@ import ( func Test_newRepositoryClient_LocalFileSystemRepository(t *testing.T) { g := NewWithT(t) - ctx := context.Background() + ctx := t.Context() tmpDir := t.TempDir() @@ -82,7 +81,7 @@ func Test_newRepositoryClient_LocalFileSystemRepository(t *testing.T) { t.Run(tt.name, func(t *testing.T) { gs := NewWithT(t) - ctx := context.Background() + ctx := t.Context() repoClient, err := newRepositoryClient(ctx, tt.fields.provider, configClient) gs.Expect(err).ToNot(HaveOccurred()) @@ -130,7 +129,7 @@ func Test_newRepositoryClient_YamlProcessor(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - ctx := context.Background() + ctx := t.Context() configProvider := config.NewProvider("fakeProvider", "", clusterctlv1.CoreProviderType) configClient, err := config.New(ctx, "", config.InjectReader(test.NewFakeReader())) diff --git a/cmd/clusterctl/client/repository/clusterclass_client_test.go b/cmd/clusterctl/client/repository/clusterclass_client_test.go index 4ba742ce5626..c538a7a6ca38 100644 --- a/cmd/clusterctl/client/repository/clusterclass_client_test.go +++ b/cmd/clusterctl/client/repository/clusterclass_client_test.go @@ -17,7 +17,6 @@ limitations under the License. package repository import ( - "context" "fmt" "testing" @@ -162,7 +161,7 @@ func Test_ClusterClassClient_Get(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - ctx := context.Background() + ctx := t.Context() f := newClusterClassClient( ClusterClassClientInput{ diff --git a/cmd/clusterctl/client/repository/components_client_test.go b/cmd/clusterctl/client/repository/components_client_test.go index 1c70f1444038..922bd5d56513 100644 --- a/cmd/clusterctl/client/repository/components_client_test.go +++ b/cmd/clusterctl/client/repository/components_client_test.go @@ -17,7 +17,6 @@ limitations under the License. package repository import ( - "context" "fmt" "testing" @@ -67,7 +66,7 @@ func Test_componentsClient_Get(t *testing.T) { p1 := config.NewProvider("p1", "", clusterctlv1.BootstrapProviderType) - configClient, err := config.New(context.Background(), "", config.InjectReader(test.NewFakeReader().WithVar(variableName, variableValue))) + configClient, err := config.New(t.Context(), "", config.InjectReader(test.NewFakeReader().WithVar(variableName, variableValue))) g.Expect(err).ToNot(HaveOccurred()) type fields struct { @@ -260,7 +259,7 @@ func Test_componentsClient_Get(t *testing.T) { t.Run(tt.name, func(t *testing.T) { gs := NewWithT(t) - ctx := context.Background() + ctx := t.Context() options := ComponentsOptions{ Version: tt.args.version, diff --git a/cmd/clusterctl/client/repository/metadata_client_test.go b/cmd/clusterctl/client/repository/metadata_client_test.go index e194784e47f6..848915892fe3 100644 --- a/cmd/clusterctl/client/repository/metadata_client_test.go +++ b/cmd/clusterctl/client/repository/metadata_client_test.go @@ -17,7 +17,6 @@ limitations under the License. package repository import ( - "context" "testing" . "github.com/onsi/gomega" @@ -124,7 +123,7 @@ func Test_metadataClient_Get(t *testing.T) { version: tt.fields.version, repository: tt.fields.repository, } - got, err := f.Get(context.Background()) + got, err := f.Get(t.Context()) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return diff --git a/cmd/clusterctl/client/repository/repository_github_test.go b/cmd/clusterctl/client/repository/repository_github_test.go index 07eed06cc85e..9041de040fef 100644 --- a/cmd/clusterctl/client/repository/repository_github_test.go +++ b/cmd/clusterctl/client/repository/repository_github_test.go @@ -17,7 +17,6 @@ limitations under the License. package repository import ( - "context" "fmt" "net/http" "strings" @@ -117,7 +116,7 @@ func Test_gitHubRepository_GetVersions(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - ctx := context.Background() + ctx := t.Context() resetCaches() @@ -219,7 +218,7 @@ func Test_githubRepository_newGitHubRepository(t *testing.T) { g := NewWithT(t) resetCaches() - gitHub, err := NewGitHubRepository(context.Background(), tt.field.providerConfig, tt.field.variableClient) + gitHub, err := NewGitHubRepository(t.Context(), tt.field.providerConfig, tt.field.variableClient) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return @@ -321,10 +320,10 @@ func Test_githubRepository_getFile(t *testing.T) { g := NewWithT(t) resetCaches() - gitHub, err := NewGitHubRepository(context.Background(), providerConfig, configVariablesClient, injectGithubClient(client)) + gitHub, err := NewGitHubRepository(t.Context(), providerConfig, configVariablesClient, injectGithubClient(client)) g.Expect(err).ToNot(HaveOccurred()) - got, err := gitHub.GetFile(context.Background(), tt.release, tt.fileName) + got, err := gitHub.GetFile(t.Context(), tt.release, tt.fileName) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return @@ -398,7 +397,7 @@ func Test_gitHubRepository_getVersions(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - ctx := context.Background() + ctx := t.Context() resetCaches() @@ -526,10 +525,10 @@ func Test_gitHubRepository_getLatestContractRelease(t *testing.T) { g := NewWithT(t) resetCaches() - gRepo, err := NewGitHubRepository(context.Background(), tt.field.providerConfig, configVariablesClient, injectGithubClient(client), injectGoproxyClient(clientGoproxy)) + gRepo, err := NewGitHubRepository(t.Context(), tt.field.providerConfig, configVariablesClient, injectGithubClient(client), injectGoproxyClient(clientGoproxy)) g.Expect(err).ToNot(HaveOccurred()) - got, err := latestContractRelease(context.Background(), gRepo, tt.contract) + got, err := latestContractRelease(t.Context(), gRepo, tt.contract) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return @@ -637,7 +636,7 @@ func Test_gitHubRepository_getLatestRelease(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - ctx := context.Background() + ctx := t.Context() resetCaches() @@ -750,7 +749,7 @@ func Test_gitHubRepository_getLatestPatchRelease(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - ctx := context.Background() + ctx := t.Context() resetCaches() @@ -814,7 +813,7 @@ func Test_gitHubRepository_getReleaseByTag(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - ctx := context.Background() + ctx := t.Context() resetCaches() @@ -954,10 +953,10 @@ func Test_gitHubRepository_downloadFilesFromRelease(t *testing.T) { g := NewWithT(t) resetCaches() - gRepo, err := NewGitHubRepository(context.Background(), tt.providerConfig, configVariablesClient, injectGithubClient(client)) + gRepo, err := NewGitHubRepository(t.Context(), tt.providerConfig, configVariablesClient, injectGithubClient(client)) g.Expect(err).ToNot(HaveOccurred()) - got, err := gRepo.(*gitHubRepository).downloadFilesFromRelease(context.Background(), tt.args.release, tt.args.fileName) + got, err := gRepo.(*gitHubRepository).downloadFilesFromRelease(t.Context(), tt.args.release, tt.args.fileName) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return @@ -1053,7 +1052,7 @@ func Test_gitHubRepository_releaseNotFound(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - ctx := context.Background() + ctx := t.Context() configVariablesClient := test.NewFakeVariableClient() diff --git a/cmd/clusterctl/client/repository/repository_gitlab_test.go b/cmd/clusterctl/client/repository/repository_gitlab_test.go index 5e5137d8d1f3..9b63966ad446 100644 --- a/cmd/clusterctl/client/repository/repository_gitlab_test.go +++ b/cmd/clusterctl/client/repository/repository_gitlab_test.go @@ -17,7 +17,6 @@ limitations under the License. package repository import ( - "context" "fmt" "net/http" "net/http/httptest" @@ -131,7 +130,7 @@ func Test_gitLabRepository_newGitLabRepository(t *testing.T) { g := NewWithT(t) resetCaches() - gitLab, err := NewGitLabRepository(context.Background(), tt.field.providerConfig, tt.field.variableClient) + gitLab, err := NewGitLabRepository(t.Context(), tt.field.providerConfig, tt.field.variableClient) if tt.wantedErr != "" { g.Expect(err).To(MatchError(tt.wantedErr)) return @@ -193,11 +192,11 @@ func Test_gitLabRepository_getFile(t *testing.T) { g := NewWithT(t) resetCaches() - gitLab, err := NewGitLabRepository(context.Background(), providerConfig, configVariablesClient) + gitLab, err := NewGitLabRepository(t.Context(), providerConfig, configVariablesClient) gitLab.(*gitLabRepository).authenticatingHTTPClient = client g.Expect(err).ToNot(HaveOccurred()) - got, err := gitLab.GetFile(context.Background(), tt.version, tt.fileName) + got, err := gitLab.GetFile(t.Context(), tt.version, tt.fileName) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return diff --git a/cmd/clusterctl/client/repository/repository_local_test.go b/cmd/clusterctl/client/repository/repository_local_test.go index 1ca537f13475..cb7adff7d7ae 100644 --- a/cmd/clusterctl/client/repository/repository_local_test.go +++ b/cmd/clusterctl/client/repository/repository_local_test.go @@ -17,7 +17,6 @@ limitations under the License. package repository import ( - "context" "os" "path/filepath" "testing" @@ -113,7 +112,7 @@ func Test_localRepository_newLocalRepository(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - got, err := newLocalRepository(context.Background(), tt.fields.provider, tt.fields.configVariablesClient) + got, err := newLocalRepository(t.Context(), tt.fields.provider, tt.fields.configVariablesClient) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return @@ -160,7 +159,7 @@ func Test_localRepository_newLocalRepository_Latest(t *testing.T) { p2URLLatestAbs := filepath.Join(tmpDir, p2URLLatest) p2 := config.NewProvider("foo", p2URLLatestAbs, clusterctlv1.BootstrapProviderType) - got, err := newLocalRepository(context.Background(), p2, test.NewFakeVariableClient()) + got, err := newLocalRepository(t.Context(), p2, test.NewFakeVariableClient()) g.Expect(err).ToNot(HaveOccurred()) g.Expect(got.basepath).To(Equal(tmpDir)) @@ -291,10 +290,10 @@ func Test_localRepository_GetFile(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - r, err := newLocalRepository(context.Background(), tt.fields.provider, tt.fields.configVariablesClient) + r, err := newLocalRepository(t.Context(), tt.fields.provider, tt.fields.configVariablesClient) g.Expect(err).ToNot(HaveOccurred()) - got, err := r.GetFile(context.Background(), tt.args.version, tt.args.fileName) + got, err := r.GetFile(t.Context(), tt.args.version, tt.args.fileName) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return @@ -366,7 +365,7 @@ func Test_localRepository_GetVersions(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - ctx := context.Background() + ctx := t.Context() r, err := newLocalRepository(ctx, tt.fields.provider, tt.fields.configVariablesClient) g.Expect(err).ToNot(HaveOccurred()) diff --git a/cmd/clusterctl/client/repository/repository_memory_test.go b/cmd/clusterctl/client/repository/repository_memory_test.go index baa7828262eb..79dad0c4783c 100644 --- a/cmd/clusterctl/client/repository/repository_memory_test.go +++ b/cmd/clusterctl/client/repository/repository_memory_test.go @@ -17,7 +17,6 @@ limitations under the License. package repository import ( - "context" "testing" . "github.com/onsi/gomega" @@ -125,7 +124,7 @@ releaseSeries: t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - ctx := context.Background() + ctx := t.Context() r := tt.repo g.Expect(r.RootPath()).To(Equal("")) diff --git a/cmd/clusterctl/client/repository/template_client_test.go b/cmd/clusterctl/client/repository/template_client_test.go index 34bd6fa2f87a..e3897c694279 100644 --- a/cmd/clusterctl/client/repository/template_client_test.go +++ b/cmd/clusterctl/client/repository/template_client_test.go @@ -17,7 +17,6 @@ limitations under the License. package repository import ( - "context" "fmt" "testing" @@ -194,7 +193,7 @@ func Test_templates_Get(t *testing.T) { processor: tt.fields.processor, }, ) - got, err := f.Get(context.Background(), tt.args.flavor, tt.args.targetNamespace, tt.args.listVariablesOnly) + got, err := f.Get(t.Context(), tt.args.flavor, tt.args.targetNamespace, tt.args.listVariablesOnly) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return diff --git a/cmd/clusterctl/client/rollout_test.go b/cmd/clusterctl/client/rollout_test.go index 232c83d9de23..9cd50e31d4b7 100644 --- a/cmd/clusterctl/client/rollout_test.go +++ b/cmd/clusterctl/client/rollout_test.go @@ -173,7 +173,7 @@ func Test_clusterctlClient_RolloutRestart(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - ctx := context.Background() + ctx := t.Context() err := tt.fields.client.RolloutRestart(ctx, tt.args.options) if tt.wantErr { @@ -258,7 +258,7 @@ func Test_clusterctlClient_RolloutPause(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - ctx := context.Background() + ctx := t.Context() err := tt.fields.client.RolloutPause(ctx, tt.args.options) if tt.wantErr { @@ -343,7 +343,7 @@ func Test_clusterctlClient_RolloutResume(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - ctx := context.Background() + ctx := t.Context() err := tt.fields.client.RolloutResume(ctx, tt.args.options) if tt.wantErr { diff --git a/cmd/clusterctl/client/tree/discovery_test.go b/cmd/clusterctl/client/tree/discovery_test.go index 25600870bfe6..b6d3a37d775f 100644 --- a/cmd/clusterctl/client/tree/discovery_test.go +++ b/cmd/clusterctl/client/tree/discovery_test.go @@ -17,7 +17,6 @@ limitations under the License. package tree import ( - "context" "strings" "testing" @@ -845,11 +844,11 @@ func Test_Discovery(t *testing.T) { for _, crd := range test.FakeCRDList() { tt.args.objs = append(tt.args.objs, crd) } - client, err := test.NewFakeProxy().WithObjs(tt.args.objs...).NewClient(context.Background()) + client, err := test.NewFakeProxy().WithObjs(tt.args.objs...).NewClient(t.Context()) g.Expect(client).ToNot(BeNil()) g.Expect(err).ToNot(HaveOccurred()) - tree, err := Discovery(context.TODO(), client, "ns1", "cluster1", tt.args.discoverOptions) + tree, err := Discovery(t.Context(), client, "ns1", "cluster1", tt.args.discoverOptions) g.Expect(tree).ToNot(BeNil()) g.Expect(err).ToNot(HaveOccurred()) diff --git a/cmd/clusterctl/client/upgrade_test.go b/cmd/clusterctl/client/upgrade_test.go index d5df8f6e1862..0ec284f06e20 100644 --- a/cmd/clusterctl/client/upgrade_test.go +++ b/cmd/clusterctl/client/upgrade_test.go @@ -35,7 +35,7 @@ func Test_clusterctlClient_PlanCertUpgrade(t *testing.T) { // create a fake config with a provider named P1 and a variable named var repository1Config := config.NewProvider("p1", "url", clusterctlv1.CoreProviderType) - ctx := context.Background() + ctx := t.Context() config1 := newFakeConfig(ctx). WithVar("var", "value"). @@ -78,7 +78,7 @@ func Test_clusterctlClient_PlanCertUpgrade(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - ctx := context.Background() + ctx := t.Context() options := PlanUpgradeOptions{ Kubeconfig: Kubeconfig{Path: "cluster1"}, @@ -138,7 +138,7 @@ func Test_clusterctlClient_PlanUpgrade(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - ctx := context.Background() + ctx := t.Context() _, err := tt.fields.client.PlanUpgrade(ctx, tt.args.options) if tt.wantErr { @@ -269,7 +269,7 @@ func Test_clusterctlClient_ApplyUpgrade(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - ctx := context.Background() + ctx := t.Context() err := tt.fields.client.ApplyUpgrade(ctx, tt.args.options) if tt.wantErr { @@ -390,7 +390,7 @@ func Test_parseUpgradeItem(t *testing.T) { provider string } - ctx := context.Background() + ctx := t.Context() configClient := newFakeConfig(ctx) clusterClient := newFakeCluster(cluster.Kubeconfig{Path: "cluster1"}, configClient) @@ -485,7 +485,7 @@ func Test_parseUpgradeItem(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - ctx := context.Background() + ctx := t.Context() got, err := parseUpgradeItem(ctx, clusterClient, tt.args.provider, clusterctlv1.CoreProviderType) if tt.wantErr { diff --git a/cmd/clusterctl/cmd/version_checker_test.go b/cmd/clusterctl/cmd/version_checker_test.go index c52e6bfdb41a..4370747b4e8f 100644 --- a/cmd/clusterctl/cmd/version_checker_test.go +++ b/cmd/clusterctl/cmd/version_checker_test.go @@ -17,7 +17,6 @@ limitations under the License. package cmd import ( - "context" "fmt" "net/http" "os" @@ -36,7 +35,7 @@ import ( func TestVersionChecker_newVersionChecker(t *testing.T) { g := NewWithT(t) - ctx := context.Background() + ctx := t.Context() versionChecker, err := newVersionChecker(ctx, test.NewFakeVariableClient()) @@ -237,7 +236,7 @@ https://github.com/foo/bar/releases/v0.3.8-alpha.1 t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - ctx := context.Background() + ctx := t.Context() tmpVersionFile, cleanDir := generateTempVersionFilePath(g) defer cleanDir() @@ -275,7 +274,7 @@ https://github.com/foo/bar/releases/v0.3.8-alpha.1 func TestVersionChecker_WriteStateFile(t *testing.T) { g := NewWithT(t) - ctx := context.Background() + ctx := t.Context() fakeGithubClient, mux, cleanup := test.NewFakeGitHub() mux.HandleFunc( @@ -310,7 +309,7 @@ func TestVersionChecker_WriteStateFile(t *testing.T) { func TestVersionChecker_ReadFromStateFile(t *testing.T) { g := NewWithT(t) - ctx := context.Background() + ctx := t.Context() tmpVersionFile, cleanDir := generateTempVersionFilePath(g) defer cleanDir() @@ -360,7 +359,7 @@ func TestVersionChecker_ReadFromStateFile(t *testing.T) { func TestVersionChecker_ReadFromStateFileWithin24Hrs(t *testing.T) { g := NewWithT(t) - ctx := context.Background() + ctx := t.Context() tmpVersionFile, cleanDir := generateTempVersionFilePath(g) defer cleanDir() @@ -404,7 +403,7 @@ func TestVersionChecker_ReadFromStateFileWithin24Hrs(t *testing.T) { func TestVersionChecker_ReadFromCorruptedStateFile(t *testing.T) { g := NewWithT(t) - ctx := context.Background() + ctx := t.Context() tmpVersionFile, cleanDir := generateTempVersionFilePath(g) defer cleanDir() diff --git a/controllers/clustercache/cluster_accessor_test.go b/controllers/clustercache/cluster_accessor_test.go index dd9eaf64fcdb..45374bfb85e9 100644 --- a/controllers/clustercache/cluster_accessor_test.go +++ b/controllers/clustercache/cluster_accessor_test.go @@ -66,7 +66,7 @@ func TestConnect(t *testing.T) { Indexes: []CacheOptionsIndex{NodeProviderIDIndex}, }, }, nil) - accessor := newClusterAccessor(context.Background(), clusterKey, config) + accessor := newClusterAccessor(t.Context(), clusterKey, config) // Connect when kubeconfig Secret doesn't exist (should fail) err := accessor.Connect(ctx) @@ -164,7 +164,7 @@ func TestDisconnect(t *testing.T) { Timeout: 10 * time.Second, }, }, nil) - accessor := newClusterAccessor(context.Background(), clusterKey, config) + accessor := newClusterAccessor(t.Context(), clusterKey, config) // Connect (so we can disconnect afterward) g.Expect(accessor.Connect(ctx)).To(Succeed()) @@ -271,7 +271,7 @@ func TestHealthCheck(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - accessor := newClusterAccessor(context.Background(), clusterKey, &clusterAccessorConfig{ + accessor := newClusterAccessor(t.Context(), clusterKey, &clusterAccessorConfig{ HealthProbe: &clusterAccessorHealthProbeConfig{ Timeout: 5 * time.Second, FailureThreshold: 5, @@ -324,7 +324,7 @@ func TestWatch(t *testing.T) { Timeout: 10 * time.Second, }, }, nil) - accessor := newClusterAccessor(context.Background(), clusterKey, config) + accessor := newClusterAccessor(t.Context(), clusterKey, config) tw := &testWatcher{} wi := WatcherOptions{ diff --git a/controllers/clustercache/cluster_cache_test.go b/controllers/clustercache/cluster_cache_test.go index 7294fdf0bbe5..57602b59844f 100644 --- a/controllers/clustercache/cluster_cache_test.go +++ b/controllers/clustercache/cluster_cache_test.go @@ -77,7 +77,7 @@ func TestReconcile(t *testing.T) { client: env.GetAPIReader(), clusterAccessorConfig: accessorConfig, clusterAccessors: make(map[client.ObjectKey]*clusterAccessor), - cacheCtx: context.Background(), + cacheCtx: t.Context(), } // Add a Cluster source and start it (queue will be later used to verify the source works correctly) diff --git a/controlplane/kubeadm/internal/controllers/scale_test.go b/controlplane/kubeadm/internal/controllers/scale_test.go index 9d44893810e7..76f4bf91def2 100644 --- a/controlplane/kubeadm/internal/controllers/scale_test.go +++ b/controlplane/kubeadm/internal/controllers/scale_test.go @@ -17,7 +17,6 @@ limitations under the License. package controllers import ( - "context" "fmt" "testing" "time" @@ -230,13 +229,13 @@ func TestKubeadmControlPlaneReconciler_scaleUpControlPlane(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) g.Expect(adoptableMachineFound).To(BeFalse()) - result, err := r.scaleUpControlPlane(context.Background(), controlPlane) + result, err := r.scaleUpControlPlane(t.Context(), controlPlane) g.Expect(err).ToNot(HaveOccurred()) g.Expect(result).To(BeComparableTo(ctrl.Result{RequeueAfter: preflightFailedRequeueAfter})) // scaleUpControlPlane is never called due to health check failure and new machine is not created to scale up. controlPlaneMachines := &clusterv1.MachineList{} - g.Expect(env.GetAPIReader().List(context.Background(), controlPlaneMachines, client.InNamespace(namespace.Name))).To(Succeed()) + g.Expect(env.GetAPIReader().List(t.Context(), controlPlaneMachines, client.InNamespace(namespace.Name))).To(Succeed()) // No new machine should be created. // Note: expected length is 0 because no machine is created and hence no machine is on the API server. // Other machines are in-memory only during the test. @@ -284,12 +283,12 @@ func TestKubeadmControlPlaneReconciler_scaleDownControlPlane_NoError(t *testing. } controlPlane.InjectTestManagementCluster(r.managementCluster) - result, err := r.scaleDownControlPlane(context.Background(), controlPlane, controlPlane.Machines) + result, err := r.scaleDownControlPlane(t.Context(), controlPlane, controlPlane.Machines) g.Expect(err).ToNot(HaveOccurred()) g.Expect(result).To(BeComparableTo(ctrl.Result{Requeue: true})) controlPlaneMachines := clusterv1.MachineList{} - g.Expect(fakeClient.List(context.Background(), &controlPlaneMachines)).To(Succeed()) + g.Expect(fakeClient.List(t.Context(), &controlPlaneMachines)).To(Succeed()) g.Expect(controlPlaneMachines.Items).To(BeEmpty()) }) t.Run("deletes the oldest control plane Machine even if preflight checks fails", func(t *testing.T) { @@ -326,12 +325,12 @@ func TestKubeadmControlPlaneReconciler_scaleDownControlPlane_NoError(t *testing. } controlPlane.InjectTestManagementCluster(r.managementCluster) - result, err := r.scaleDownControlPlane(context.Background(), controlPlane, controlPlane.Machines) + result, err := r.scaleDownControlPlane(t.Context(), controlPlane, controlPlane.Machines) g.Expect(err).ToNot(HaveOccurred()) g.Expect(result).To(BeComparableTo(ctrl.Result{Requeue: true})) controlPlaneMachines := clusterv1.MachineList{} - g.Expect(fakeClient.List(context.Background(), &controlPlaneMachines)).To(Succeed()) + g.Expect(fakeClient.List(t.Context(), &controlPlaneMachines)).To(Succeed()) g.Expect(controlPlaneMachines.Items).To(HaveLen(2)) }) @@ -364,12 +363,12 @@ func TestKubeadmControlPlaneReconciler_scaleDownControlPlane_NoError(t *testing. } controlPlane.InjectTestManagementCluster(r.managementCluster) - result, err := r.scaleDownControlPlane(context.Background(), controlPlane, controlPlane.Machines) + result, err := r.scaleDownControlPlane(t.Context(), controlPlane, controlPlane.Machines) g.Expect(err).ToNot(HaveOccurred()) g.Expect(result).To(BeComparableTo(ctrl.Result{RequeueAfter: preflightFailedRequeueAfter})) controlPlaneMachines := clusterv1.MachineList{} - g.Expect(fakeClient.List(context.Background(), &controlPlaneMachines)).To(Succeed()) + g.Expect(fakeClient.List(t.Context(), &controlPlaneMachines)).To(Succeed()) g.Expect(controlPlaneMachines.Items).To(HaveLen(3)) }) } @@ -709,7 +708,7 @@ func TestPreflightChecks(t *testing.T) { KCP: tt.kcp, Machines: collections.FromMachines(tt.machines...), } - result, err := r.preflightChecks(context.TODO(), controlPlane) + result, err := r.preflightChecks(t.Context(), controlPlane) g.Expect(err).ToNot(HaveOccurred()) g.Expect(result).To(BeComparableTo(tt.expectResult)) g.Expect(controlPlane.PreflightCheckResults).To(Equal(tt.expectPreflight)) diff --git a/controlplane/kubeadm/internal/controllers/upgrade_test.go b/controlplane/kubeadm/internal/controllers/upgrade_test.go index 6d3f348bc22e..65faf0453bab 100644 --- a/controlplane/kubeadm/internal/controllers/upgrade_test.go +++ b/controlplane/kubeadm/internal/controllers/upgrade_test.go @@ -17,7 +17,6 @@ limitations under the License. package controllers import ( - "context" "fmt" "testing" "time" @@ -146,11 +145,11 @@ func TestKubeadmControlPlaneReconciler_RolloutStrategy_ScaleUp(t *testing.T) { } controlPlane.InjectTestManagementCluster(r.managementCluster) - result, err = r.reconcile(context.Background(), controlPlane) + result, err = r.reconcile(t.Context(), controlPlane) g.Expect(err).ToNot(HaveOccurred()) g.Expect(result).To(BeComparableTo(ctrl.Result{RequeueAfter: preflightFailedRequeueAfter})) g.Eventually(func(g Gomega) { - g.Expect(env.List(context.Background(), bothMachines, client.InNamespace(cluster.Namespace))).To(Succeed()) + g.Expect(env.List(t.Context(), bothMachines, client.InNamespace(cluster.Namespace))).To(Succeed()) g.Expect(bothMachines.Items).To(HaveLen(2)) }, timeout).Should(Succeed()) diff --git a/controlplane/kubeadm/internal/webhooks/scale_test.go b/controlplane/kubeadm/internal/webhooks/scale_test.go index b2067db4b1d4..72d082711bbc 100644 --- a/controlplane/kubeadm/internal/webhooks/scale_test.go +++ b/controlplane/kubeadm/internal/webhooks/scale_test.go @@ -17,7 +17,6 @@ limitations under the License. package webhooks import ( - "context" "testing" "time" @@ -201,7 +200,7 @@ func TestKubeadmControlPlaneValidateScale(t *testing.T) { decoder: admission.NewDecoder(scheme), } - resp := scaleHandler.Handle(context.Background(), tt.admissionRequest) + resp := scaleHandler.Handle(t.Context(), tt.admissionRequest) g.Expect(resp.Allowed).Should(Equal(tt.expectRespAllowed)) g.Expect(resp.Result.Message).Should(Equal(tt.expectRespMessage)) }) diff --git a/controlplane/kubeadm/internal/workload_cluster_etcd_test.go b/controlplane/kubeadm/internal/workload_cluster_etcd_test.go index d44911678417..5471e07fdd92 100644 --- a/controlplane/kubeadm/internal/workload_cluster_etcd_test.go +++ b/controlplane/kubeadm/internal/workload_cluster_etcd_test.go @@ -740,7 +740,7 @@ func TestReconcileEtcdMembersAndControlPlaneNodes(t *testing.T) { Client: env.Client, etcdClientGenerator: tt.etcdClientGenerator, } - ctx := context.TODO() + ctx := t.Context() _, err := w.ReconcileEtcdMembersAndControlPlaneNodes(ctx, tt.members, tt.nodes) if tt.expectErr { g.Expect(err).To(HaveOccurred()) diff --git a/exp/internal/webhooks/machinepool_test.go b/exp/internal/webhooks/machinepool_test.go index d9096c737a5e..299c855a27e9 100644 --- a/exp/internal/webhooks/machinepool_test.go +++ b/exp/internal/webhooks/machinepool_test.go @@ -17,7 +17,6 @@ limitations under the License. package webhooks import ( - "context" "strings" "testing" @@ -213,7 +212,7 @@ func TestCalculateMachinePoolReplicas(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - replicas, err := calculateMachinePoolReplicas(context.Background(), tt.oldMP, tt.newMP, false) + replicas, err := calculateMachinePoolReplicas(t.Context(), tt.oldMP, tt.newMP, false) if tt.expectErr { g.Expect(err).To(HaveOccurred()) diff --git a/exp/ipam/internal/webhooks/ipaddress_test.go b/exp/ipam/internal/webhooks/ipaddress_test.go index a5175f310f88..edd89b47a39e 100644 --- a/exp/ipam/internal/webhooks/ipaddress_test.go +++ b/exp/ipam/internal/webhooks/ipaddress_test.go @@ -17,7 +17,6 @@ limitations under the License. package webhooks import ( - "context" "testing" . "github.com/onsi/gomega" @@ -166,9 +165,9 @@ func TestIPAddressValidateCreate(t *testing.T) { Client: fake.NewClientBuilder().WithScheme(scheme).WithObjects(tt.extraObjs...).Build(), } if tt.expectErr { - g.Expect(wh.validate(context.Background(), &tt.ip)).NotTo(Succeed()) + g.Expect(wh.validate(t.Context(), &tt.ip)).NotTo(Succeed()) } else { - g.Expect(wh.validate(context.Background(), &tt.ip)).To(Succeed()) + g.Expect(wh.validate(t.Context(), &tt.ip)).To(Succeed()) } }) } @@ -224,7 +223,7 @@ func TestIPAddressValidateUpdate(t *testing.T) { wh := IPAddress{ Client: fake.NewClientBuilder().WithScheme(scheme).WithObjects(tt.extraObjs...).Build(), } - warnings, err := wh.ValidateUpdate(context.Background(), &tt.oldIP, &tt.newIP) + warnings, err := wh.ValidateUpdate(t.Context(), &tt.oldIP, &tt.newIP) if tt.expectErr { g.Expect(err).To(HaveOccurred()) } else { diff --git a/exp/ipam/internal/webhooks/ipaddressclaim_test.go b/exp/ipam/internal/webhooks/ipaddressclaim_test.go index f39f66e1b60c..68f45050b2b3 100644 --- a/exp/ipam/internal/webhooks/ipaddressclaim_test.go +++ b/exp/ipam/internal/webhooks/ipaddressclaim_test.go @@ -17,7 +17,6 @@ limitations under the License. package webhooks import ( - "context" "testing" . "github.com/onsi/gomega" @@ -66,7 +65,7 @@ func TestIPAddressClaimValidateCreate(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) wh := IPAddressClaim{} - warnings, err := wh.ValidateCreate(context.Background(), &tt.claim) + warnings, err := wh.ValidateCreate(t.Context(), &tt.claim) if tt.expectErr { g.Expect(err).To(HaveOccurred()) } else { @@ -117,7 +116,7 @@ func TestIPAddressClaimValidateUpdate(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) wh := IPAddressClaim{} - warnings, err := wh.ValidateUpdate(context.Background(), &tt.oldClaim, &tt.newClaim) + warnings, err := wh.ValidateUpdate(t.Context(), &tt.oldClaim, &tt.newClaim) if tt.expectErr { g.Expect(err).To(HaveOccurred()) } else { diff --git a/exp/runtime/internal/controllers/extensionconfig_controller_test.go b/exp/runtime/internal/controllers/extensionconfig_controller_test.go index 65a14e107b63..df4895095942 100644 --- a/exp/runtime/internal/controllers/extensionconfig_controller_test.go +++ b/exp/runtime/internal/controllers/extensionconfig_controller_test.go @@ -17,7 +17,6 @@ limitations under the License. package controllers import ( - "context" "crypto/tls" "encoding/json" "net/http" @@ -367,7 +366,7 @@ func Test_reconcileCABundle(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - err := reconcileCABundle(context.TODO(), tt.client, tt.config) + err := reconcileCABundle(t.Context(), tt.client, tt.config) g.Expect(err != nil).To(Equal(tt.wantErr)) g.Expect(tt.config.Spec.ClientConfig.CABundle).To(Equal(tt.wantCABundle)) diff --git a/exp/runtime/topologymutation/walker_test.go b/exp/runtime/topologymutation/walker_test.go index fad450a60ef7..18165e526e8a 100644 --- a/exp/runtime/topologymutation/walker_test.go +++ b/exp/runtime/topologymutation/walker_test.go @@ -222,7 +222,7 @@ func Test_WalkTemplates(t *testing.T) { response := &runtimehooksv1.GeneratePatchesResponse{} request := &runtimehooksv1.GeneratePatchesRequest{Variables: tt.globalVariables, Items: tt.requestItems} - WalkTemplates(context.Background(), decoder, request, response, mutatingFunc, tt.options...) + WalkTemplates(t.Context(), decoder, request, response, mutatingFunc, tt.options...) g.Expect(response.Status).To(Equal(tt.expectedResponse.Status)) g.Expect(response.Message).To(ContainSubstring(tt.expectedResponse.Message)) diff --git a/exp/topology/scope/state_test.go b/exp/topology/scope/state_test.go index 6794f2ab07b5..34131c6e1889 100644 --- a/exp/topology/scope/state_test.go +++ b/exp/topology/scope/state_test.go @@ -17,7 +17,6 @@ limitations under the License. package scope import ( - "context" "testing" . "github.com/onsi/gomega" @@ -35,7 +34,7 @@ func TestMDUpgrading(t *testing.T) { scheme := runtime.NewScheme() g.Expect(clusterv1.AddToScheme(scheme)).To(Succeed()) - ctx := context.Background() + ctx := t.Context() t.Run("should return the names of the upgrading MachineDeployments", func(*testing.T) { stableMD := builder.MachineDeployment("ns", "stableMD"). @@ -77,7 +76,7 @@ func TestMPUpgrading(t *testing.T) { g.Expect(clusterv1.AddToScheme(scheme)).To(Succeed()) g.Expect(corev1.AddToScheme(scheme)).To(Succeed()) - ctx := context.Background() + ctx := t.Context() t.Run("should return the names of the upgrading MachinePools", func(*testing.T) { stableMP := builder.MachinePool("ns", "stableMP"). diff --git a/internal/controllers/clusterclass/clusterclass_controller_test.go b/internal/controllers/clusterclass/clusterclass_controller_test.go index 076b61b94865..36231f7faab8 100644 --- a/internal/controllers/clusterclass/clusterclass_controller_test.go +++ b/internal/controllers/clusterclass/clusterclass_controller_test.go @@ -1249,7 +1249,7 @@ func TestReconciler_extensionConfigToClusterClass(t *testing.T) { {NamespacedName: types.NamespacedName{Namespace: onePatchClusterClass.Namespace, Name: onePatchClusterClass.Name}}, {NamespacedName: types.NamespacedName{Namespace: twoPatchClusterClass.Namespace, Name: twoPatchClusterClass.Name}}, } - if got := r.extensionConfigToClusterClass(context.Background(), firstExtConfig); !cmp.Equal(got, firstExtConfigExpected) { + if got := r.extensionConfigToClusterClass(t.Context(), firstExtConfig); !cmp.Equal(got, firstExtConfigExpected) { t.Errorf("extensionConfigToClusterClass() = %v, want %v", got, firstExtConfigExpected) } @@ -1257,7 +1257,7 @@ func TestReconciler_extensionConfigToClusterClass(t *testing.T) { secondExtConfigExpected := []reconcile.Request{ {NamespacedName: types.NamespacedName{Namespace: twoPatchClusterClass.Namespace, Name: twoPatchClusterClass.Name}}, } - if got := r.extensionConfigToClusterClass(context.Background(), secondExtConfig); !cmp.Equal(got, secondExtConfigExpected) { + if got := r.extensionConfigToClusterClass(t.Context(), secondExtConfig); !cmp.Equal(got, secondExtConfigExpected) { t.Errorf("extensionConfigToClusterClass() = %v, want %v", got, secondExtConfigExpected) } }) diff --git a/internal/controllers/clusterresourceset/clusterresourceset_helpers_test.go b/internal/controllers/clusterresourceset/clusterresourceset_helpers_test.go index 0d2e5e102512..fe7b7fabc549 100644 --- a/internal/controllers/clusterresourceset/clusterresourceset_helpers_test.go +++ b/internal/controllers/clusterresourceset/clusterresourceset_helpers_test.go @@ -17,7 +17,6 @@ limitations under the License. package clusterresourceset import ( - "context" "testing" "time" @@ -105,7 +104,7 @@ func TestGetorCreateClusterResourceSetBinding(t *testing.T) { t.Run(tt.name, func(t *testing.T) { gs := NewWithT(t) - clusterResourceSetBinding, err := r.getOrCreateClusterResourceSetBinding(context.TODO(), tt.cluster, &addonsv1.ClusterResourceSet{}) + clusterResourceSetBinding, err := r.getOrCreateClusterResourceSetBinding(t.Context(), tt.cluster, &addonsv1.ClusterResourceSet{}) gs.Expect(err).ToNot(HaveOccurred()) gs.Expect(clusterResourceSetBinding.Spec.Bindings).To(HaveLen(tt.numOfClusterResourceSets)) @@ -151,7 +150,7 @@ func TestGetSecretFromNamespacedName(t *testing.T) { WithObjects(existingSecret). Build() - got, err := getSecret(context.TODO(), c, tt.secretName) + got, err := getSecret(t.Context(), c, tt.secretName) if tt.wantErr { gs.Expect(err).To(HaveOccurred()) @@ -208,7 +207,7 @@ func TestGetConfigMapFromNamespacedName(t *testing.T) { WithObjects(existingConfigMap). Build() - got, err := getConfigMap(context.TODO(), c, tt.configMapName) + got, err := getConfigMap(t.Context(), c, tt.configMapName) if tt.wantErr { gs.Expect(err).To(HaveOccurred()) @@ -261,7 +260,7 @@ func TestEnsureKubernetesServiceCreated(t *testing.T) { WithObjects(tt.existingObjs...). Build() - err := ensureKubernetesServiceCreated(context.TODO(), c) + err := ensureKubernetesServiceCreated(t.Context(), c) if tt.wantErr { gs.Expect(err).To(HaveOccurred()) diff --git a/internal/controllers/clusterresourceset/clusterresourceset_scope_test.go b/internal/controllers/clusterresourceset/clusterresourceset_scope_test.go index 03442ca2bf86..a9f2fe2727b7 100644 --- a/internal/controllers/clusterresourceset/clusterresourceset_scope_test.go +++ b/internal/controllers/clusterresourceset/clusterresourceset_scope_test.go @@ -17,7 +17,6 @@ limitations under the License. package clusterresourceset import ( - "context" "testing" . "github.com/onsi/gomega" @@ -237,7 +236,7 @@ func TestReconcileApplyOnceScopeApplyObj(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { gs := NewWithT(t) - ctx := context.Background() + ctx := t.Context() client := fake.NewClientBuilder().WithObjects(tt.existingObjs...).Build() scope := &reconcileApplyOnceScope{} err := scope.applyObj(ctx, client, tt.obj) diff --git a/internal/controllers/machine/drain/drain_test.go b/internal/controllers/machine/drain/drain_test.go index 7f745e993c65..438598e4d836 100644 --- a/internal/controllers/machine/drain/drain_test.go +++ b/internal/controllers/machine/drain/drain_test.go @@ -77,10 +77,10 @@ func TestRunCordonOrUncordon(t *testing.T) { RemoteClient: fakeClient, } - g.Expect(drainer.CordonNode(context.Background(), tt.node)).To(Succeed()) + g.Expect(drainer.CordonNode(t.Context(), tt.node)).To(Succeed()) gotNode := tt.node.DeepCopy() - g.Expect(fakeClient.Get(context.Background(), client.ObjectKeyFromObject(gotNode), gotNode)).To(Succeed()) + g.Expect(fakeClient.Get(t.Context(), client.ObjectKeyFromObject(gotNode), gotNode)).To(Succeed()) g.Expect(gotNode.Spec.Unschedulable).To(BeTrue()) }) } @@ -888,7 +888,7 @@ func TestGetPodsForEviction(t *testing.T) { SkipWaitForDeleteTimeoutSeconds: 10, } - gotPodDeleteList, err := drainer.GetPodsForEviction(context.Background(), cluster, machine, "node-1") + gotPodDeleteList, err := drainer.GetPodsForEviction(t.Context(), cluster, machine, "node-1") if tt.wantErr != "" { g.Expect(err).To(HaveOccurred()) g.Expect(err.Error()).To(BeComparableTo(tt.wantErr)) @@ -1103,7 +1103,7 @@ func Test_getMatchingMachineDrainRules(t *testing.T) { Client: fakeClient, } - gotMachineDrainRules, err := drainer.getMatchingMachineDrainRules(context.Background(), cluster, machine) + gotMachineDrainRules, err := drainer.getMatchingMachineDrainRules(t.Context(), cluster, machine) if tt.wantErr != "" { g.Expect(err).To(HaveOccurred()) g.Expect(err.Error()).To(ContainSubstring(tt.wantErr)) @@ -1705,7 +1705,7 @@ func TestEvictPods(t *testing.T) { RemoteClient: fakeClient, } - gotEvictionResult := drainer.EvictPods(context.Background(), tt.podDeleteList) + gotEvictionResult := drainer.EvictPods(t.Context(), tt.podDeleteList) // Cleanup for easier diff. for i, pod := range gotEvictionResult.PodsDeletionTimestampSet { gotEvictionResult.PodsDeletionTimestampSet[i] = &corev1.Pod{ diff --git a/internal/controllers/machine/drain/filters_test.go b/internal/controllers/machine/drain/filters_test.go index 6532485b278c..d660b64ade19 100644 --- a/internal/controllers/machine/drain/filters_test.go +++ b/internal/controllers/machine/drain/filters_test.go @@ -17,7 +17,6 @@ limitations under the License. package drain import ( - "context" "testing" "time" @@ -67,7 +66,7 @@ func TestSkipDeletedFilter(t *testing.T) { pod.SetDeletionTimestamp(dTime) } - podDeleteStatus := h.skipDeletedFilter(context.Background(), &pod) + podDeleteStatus := h.skipDeletedFilter(t.Context(), &pod) if podDeleteStatus.DrainBehavior != tc.expectedDrainBehavior { t.Errorf("test %v: unexpected podDeleteStatus.DrainBehavior; actual %v; expected %v", i, podDeleteStatus.DrainBehavior, tc.expectedDrainBehavior) } diff --git a/internal/controllers/machine/machine_controller_test.go b/internal/controllers/machine/machine_controller_test.go index 9f72b1d29431..c9011d275e23 100644 --- a/internal/controllers/machine/machine_controller_test.go +++ b/internal/controllers/machine/machine_controller_test.go @@ -3474,7 +3474,7 @@ func TestNodeDeletion(t *testing.T) { infraMachineIsNotFound: true, bootstrapConfigIsNotFound: true, } - _, err := r.reconcileDelete(context.Background(), s) + _, err := r.reconcileDelete(t.Context(), s) if tc.resultErr { g.Expect(err).To(HaveOccurred()) @@ -3482,7 +3482,7 @@ func TestNodeDeletion(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) if tc.expectNodeDeletion { n := &corev1.Node{} - g.Expect(fakeClient.Get(context.Background(), client.ObjectKeyFromObject(node), n)).NotTo(Succeed()) + g.Expect(fakeClient.Get(t.Context(), client.ObjectKeyFromObject(node), n)).NotTo(Succeed()) } } g.Expect(s.deletingReason).To(Equal(tc.expectDeletingReason)) @@ -3603,7 +3603,7 @@ func TestNodeDeletionWithoutNodeRefFallback(t *testing.T) { infraMachineIsNotFound: true, bootstrapConfigIsNotFound: true, } - _, err := r.reconcileDelete(context.Background(), s) + _, err := r.reconcileDelete(t.Context(), s) if tc.resultErr { g.Expect(err).To(HaveOccurred()) @@ -3611,7 +3611,7 @@ func TestNodeDeletionWithoutNodeRefFallback(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) if tc.expectNodeDeletion { n := &corev1.Node{} - g.Expect(apierrors.IsNotFound(fakeClient.Get(context.Background(), client.ObjectKeyFromObject(node), n))).To(BeTrue()) + g.Expect(apierrors.IsNotFound(fakeClient.Get(t.Context(), client.ObjectKeyFromObject(node), n))).To(BeTrue()) } } g.Expect(s.deletingReason).To(Equal(tc.expectDeletingReason)) diff --git a/internal/controllers/machinedeployment/machinedeployment_sync_test.go b/internal/controllers/machinedeployment/machinedeployment_sync_test.go index 1ecd8efdb83f..6fdfd3eff853 100644 --- a/internal/controllers/machinedeployment/machinedeployment_sync_test.go +++ b/internal/controllers/machinedeployment/machinedeployment_sync_test.go @@ -17,7 +17,6 @@ limitations under the License. package machinedeployment import ( - "context" "fmt" "strings" "testing" @@ -359,7 +358,7 @@ func TestScaleMachineSet(t *testing.T) { recorder: record.NewFakeRecorder(32), } - err := r.scaleMachineSet(context.Background(), tc.machineSet, tc.newScale, tc.machineDeployment) + err := r.scaleMachineSet(t.Context(), tc.machineSet, tc.newScale, tc.machineDeployment) if tc.error != nil { g.Expect(err.Error()).To(BeEquivalentTo(tc.error.Error())) return diff --git a/internal/controllers/machinehealthcheck/machinehealthcheck_controller_test.go b/internal/controllers/machinehealthcheck/machinehealthcheck_controller_test.go index 19eaad11c2db..bdeddd2ff3e3 100644 --- a/internal/controllers/machinehealthcheck/machinehealthcheck_controller_test.go +++ b/internal/controllers/machinehealthcheck/machinehealthcheck_controller_test.go @@ -2825,11 +2825,11 @@ func TestPatchTargets(t *testing.T) { } // Target with wrong patch helper will fail but the other one will be patched. - g.Expect(r.patchUnhealthyTargets(context.TODO(), logr.New(log.NullLogSink{}), []healthCheckTarget{target1, target3}, defaultCluster, mhc)).ToNot(BeEmpty()) + g.Expect(r.patchUnhealthyTargets(t.Context(), logr.New(log.NullLogSink{}), []healthCheckTarget{target1, target3}, defaultCluster, mhc)).ToNot(BeEmpty()) g.Expect(cl.Get(ctx, client.ObjectKey{Name: machine2.Name, Namespace: machine2.Namespace}, machine2)).ToNot(HaveOccurred()) g.Expect(v1beta1conditions.Get(machine2, clusterv1.MachineOwnerRemediatedV1Beta1Condition).Status).To(Equal(corev1.ConditionFalse)) g.Expect(conditions.Get(machine2, clusterv1.MachineOwnerRemediatedCondition).Status).To(Equal(metav1.ConditionFalse)) // Target with wrong patch helper will fail but the other one will be patched. - g.Expect(r.patchHealthyTargets(context.TODO(), logr.New(log.NullLogSink{}), []healthCheckTarget{target1, target3}, mhc)).ToNot(BeEmpty()) + g.Expect(r.patchHealthyTargets(t.Context(), logr.New(log.NullLogSink{}), []healthCheckTarget{target1, target3}, mhc)).ToNot(BeEmpty()) } diff --git a/internal/controllers/machineset/machineset_controller_test.go b/internal/controllers/machineset/machineset_controller_test.go index 5dc3c033e075..1d9af583c9f5 100644 --- a/internal/controllers/machineset/machineset_controller_test.go +++ b/internal/controllers/machineset/machineset_controller_test.go @@ -2344,7 +2344,7 @@ func TestMachineSetReconciler_syncReplicas_WithErrors(t *testing.T) { bootstrapTmpl.SetAPIVersion(clusterv1.GroupVersionBootstrap.String()) bootstrapTmpl.SetName("ms-template") bootstrapTmpl.SetNamespace(metav1.NamespaceDefault) - g.Expect(r.Client.Create(context.TODO(), bootstrapTmpl)).To(Succeed()) + g.Expect(r.Client.Create(t.Context(), bootstrapTmpl)).To(Succeed()) // Create infrastructure template resource. infraResource := map[string]interface{}{ @@ -2370,7 +2370,7 @@ func TestMachineSetReconciler_syncReplicas_WithErrors(t *testing.T) { infraTmpl.SetAPIVersion(clusterv1.GroupVersionInfrastructure.String()) infraTmpl.SetName("ms-template") infraTmpl.SetNamespace(metav1.NamespaceDefault) - g.Expect(r.Client.Create(context.TODO(), infraTmpl)).To(Succeed()) + g.Expect(r.Client.Create(t.Context(), infraTmpl)).To(Succeed()) s := &scope{ cluster: testCluster, diff --git a/internal/controllers/topology/cluster/patches/engine_test.go b/internal/controllers/topology/cluster/patches/engine_test.go index 4b8fbf5bf091..08954c2be7bd 100644 --- a/internal/controllers/topology/cluster/patches/engine_test.go +++ b/internal/controllers/topology/cluster/patches/engine_test.go @@ -17,7 +17,6 @@ limitations under the License. package patches import ( - "context" "encoding/json" "fmt" "strings" @@ -1005,7 +1004,7 @@ func TestApply(t *testing.T) { } // Apply patches. - if err := patchEngine.Apply(context.Background(), blueprint, desired); err != nil { + if err := patchEngine.Apply(t.Context(), blueprint, desired); err != nil { if !tt.wantErr { t.Fatal(err) } diff --git a/internal/controllers/topology/cluster/patches/external/external_patch_generator_test.go b/internal/controllers/topology/cluster/patches/external/external_patch_generator_test.go index c93f26328ca8..e071192543b9 100644 --- a/internal/controllers/topology/cluster/patches/external/external_patch_generator_test.go +++ b/internal/controllers/topology/cluster/patches/external/external_patch_generator_test.go @@ -36,7 +36,7 @@ import ( func TestExternalPatchGenerator_Generate(t *testing.T) { utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.RuntimeSDK, true) - ctx := context.Background() + ctx := t.Context() tests := []struct { name string runtimeClient *fakeRuntimeClient diff --git a/internal/controllers/topology/cluster/patches/inline/json_patch_generator_test.go b/internal/controllers/topology/cluster/patches/inline/json_patch_generator_test.go index 1fd7b0844edc..b4fa7891b69b 100644 --- a/internal/controllers/topology/cluster/patches/inline/json_patch_generator_test.go +++ b/internal/controllers/topology/cluster/patches/inline/json_patch_generator_test.go @@ -18,7 +18,6 @@ package inline import ( "bytes" - "context" "encoding/json" "testing" @@ -415,7 +414,7 @@ func TestGenerate(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - got, err := NewGenerator(tt.patch).Generate(context.Background(), &clusterv1.Cluster{ObjectMeta: metav1.ObjectMeta{Namespace: "default"}}, tt.req) + got, err := NewGenerator(tt.patch).Generate(t.Context(), &clusterv1.Cluster{ObjectMeta: metav1.ObjectMeta{Namespace: "default"}}, tt.req) g.Expect(got).To(BeComparableTo(tt.want)) g.Expect(err).ToNot(HaveOccurred()) diff --git a/internal/goproxy/goproxy_test.go b/internal/goproxy/goproxy_test.go index 408684681690..4203a71cb47b 100644 --- a/internal/goproxy/goproxy_test.go +++ b/internal/goproxy/goproxy_test.go @@ -17,7 +17,6 @@ limitations under the License. package goproxy import ( - "context" "fmt" "net/http" "testing" @@ -92,7 +91,7 @@ func TestClient_GetVersions(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - ctx := context.Background() + ctx := t.Context() g := NewWithT(t) got, err := clientGoproxy.GetVersions(ctx, tt.gomodulePath) diff --git a/internal/hooks/tracking_test.go b/internal/hooks/tracking_test.go index 2538459f9cb2..1f1205f0d44b 100644 --- a/internal/hooks/tracking_test.go +++ b/internal/hooks/tracking_test.go @@ -17,7 +17,6 @@ limitations under the License. package hooks import ( - "context" "testing" . "github.com/onsi/gomega" @@ -167,7 +166,7 @@ func TestMarkAsPending(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) fakeClient := fake.NewClientBuilder().WithObjects(tt.obj).Build() - ctx := context.Background() + ctx := t.Context() g.Expect(MarkAsPending(ctx, fakeClient, tt.obj, tt.hook)).To(Succeed()) annotations := tt.obj.GetAnnotations() g.Expect(annotations[runtimev1.PendingHooksAnnotation]).To(ContainSubstring(runtimecatalog.HookName(tt.hook))) @@ -242,7 +241,7 @@ func TestMarkAsDone(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) fakeClient := fake.NewClientBuilder().WithObjects(tt.obj).Build() - ctx := context.Background() + ctx := t.Context() g.Expect(MarkAsDone(ctx, fakeClient, tt.obj, tt.hook)).To(Succeed()) annotations := tt.obj.GetAnnotations() g.Expect(annotations[runtimev1.PendingHooksAnnotation]).NotTo(ContainSubstring(runtimecatalog.HookName(tt.hook))) @@ -322,7 +321,7 @@ func TestMarkAsOkToDelete(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) fakeClient := fake.NewClientBuilder().WithObjects(tt.obj).Build() - ctx := context.Background() + ctx := t.Context() g.Expect(MarkAsOkToDelete(ctx, fakeClient, tt.obj)).To(Succeed()) annotations := tt.obj.GetAnnotations() g.Expect(annotations).To(HaveKey(runtimev1.OkToDeleteAnnotation)) diff --git a/internal/runtime/client/client_test.go b/internal/runtime/client/client_test.go index a592818af9aa..7d417c6094fe 100644 --- a/internal/runtime/client/client_test.go +++ b/internal/runtime/client/client_test.go @@ -17,7 +17,6 @@ limitations under the License. package client import ( - "context" "crypto/tls" "encoding/json" "fmt" @@ -200,7 +199,7 @@ func TestClient_httpCall(t *testing.T) { tt.opts.config.CABundle = testcerts.CACert } - err := httpCall(context.TODO(), tt.request, tt.response, tt.opts) + err := httpCall(t.Context(), tt.request, tt.response, tt.opts) if tt.wantErr { g.Expect(err).To(HaveOccurred()) } else { @@ -800,7 +799,7 @@ func TestClient_CallExtension(t *testing.T) { }, } // Call once without caching. - err := c.CallExtension(context.Background(), tt.args.hook, obj, tt.args.name, tt.args.request, tt.args.response) + err := c.CallExtension(t.Context(), tt.args.hook, obj, tt.args.name, tt.args.request, tt.args.response) if tt.wantErr { g.Expect(err).To(HaveOccurred()) } else { @@ -810,7 +809,7 @@ func TestClient_CallExtension(t *testing.T) { // Call again with caching. serverCallCount = 0 cache := cache.New[runtimeclient.CallExtensionCacheEntry](cache.DefaultTTL) - err = c.CallExtension(context.Background(), tt.args.hook, obj, tt.args.name, tt.args.request, tt.args.response, + err = c.CallExtension(t.Context(), tt.args.hook, obj, tt.args.name, tt.args.request, tt.args.response, runtimeclient.WithCaching{Cache: cache, CacheKeyFunc: cacheKeyFunc}) if tt.wantErr { g.Expect(err).To(HaveOccurred()) @@ -825,7 +824,7 @@ func TestClient_CallExtension(t *testing.T) { g.Expect(isCached).To(BeTrue()) g.Expect(cacheEntry).ToNot(BeNil()) - err = c.CallExtension(context.Background(), tt.args.hook, obj, tt.args.name, tt.args.request, tt.args.response, + err = c.CallExtension(t.Context(), tt.args.hook, obj, tt.args.name, tt.args.request, tt.args.response, runtimeclient.WithCaching{Cache: cache, CacheKeyFunc: cacheKeyFunc}) // When we expect the response to be cached we always expect no errors. g.Expect(err).ToNot(HaveOccurred()) @@ -1124,7 +1123,7 @@ func TestClient_CallAllExtensions(t *testing.T) { Namespace: "foo", }, } - err := c.CallAllExtensions(context.Background(), tt.args.hook, obj, tt.args.request, tt.args.response) + err := c.CallAllExtensions(t.Context(), tt.args.hook, obj, tt.args.request, tt.args.response) if tt.wantErr { g.Expect(err).To(HaveOccurred()) @@ -1237,7 +1236,7 @@ func Test_client_matchNamespace(t *testing.T) { WithObjects(tt.existingNamespaces...). Build(), } - got, err := c.matchNamespace(context.Background(), tt.selector, tt.namespace) + got, err := c.matchNamespace(t.Context(), tt.selector, tt.namespace) if (err != nil) != tt.wantErr { t.Errorf("matchNamespace() error = %v, wantErr %v", err, tt.wantErr) return diff --git a/internal/runtime/test/v1alpha1/conversion_test.go b/internal/runtime/test/v1alpha1/conversion_test.go index 7ca041c0d76f..032a5fa2867e 100644 --- a/internal/runtime/test/v1alpha1/conversion_test.go +++ b/internal/runtime/test/v1alpha1/conversion_test.go @@ -17,7 +17,6 @@ limitations under the License. package v1alpha1 import ( - "context" "testing" . "github.com/onsi/gomega" @@ -41,7 +40,7 @@ func TestConversion(t *testing.T) { }}} requestLocal := &FakeRequest{} - g.Expect(c.Convert(request, requestLocal, context.Background())).To(Succeed()) + g.Expect(c.Convert(request, requestLocal, t.Context())).To(Succeed()) g.Expect(requestLocal.Cluster.GetName()).To(Equal(request.Cluster.Name)) }) @@ -51,7 +50,7 @@ func TestConversion(t *testing.T) { Second: "foo", } response := &v1alpha2.FakeResponse{} - g.Expect(c.Convert(responseLocal, response, context.Background())).To(Succeed()) + g.Expect(c.Convert(responseLocal, response, t.Context())).To(Succeed()) g.Expect(response.First).To(Equal(responseLocal.First)) g.Expect(response.Second).To(Equal(responseLocal.Second)) diff --git a/internal/topology/check/upgrade_test.go b/internal/topology/check/upgrade_test.go index 148f510496ea..0d836371d844 100644 --- a/internal/topology/check/upgrade_test.go +++ b/internal/topology/check/upgrade_test.go @@ -17,7 +17,6 @@ limitations under the License. package check import ( - "context" "testing" . "github.com/onsi/gomega" @@ -96,7 +95,7 @@ func TestIsMachineDeploymentUpgrading(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - ctx := context.Background() + ctx := t.Context() objs := []client.Object{} objs = append(objs, tt.md) @@ -204,7 +203,7 @@ func TestIsMachinePoolUpgrading(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - ctx := context.Background() + ctx := t.Context() objs := []client.Object{} for _, m := range tt.nodes { diff --git a/internal/util/ssa/managedfields_test.go b/internal/util/ssa/managedfields_test.go index 0b4f432867d8..d5418ff05044 100644 --- a/internal/util/ssa/managedfields_test.go +++ b/internal/util/ssa/managedfields_test.go @@ -18,7 +18,6 @@ limitations under the License. package ssa import ( - "context" "encoding/json" "testing" @@ -32,7 +31,7 @@ import ( ) func TestDropManagedFields(t *testing.T) { - ctx := context.Background() + ctx := t.Context() ssaManager := "ssa-manager" @@ -161,7 +160,7 @@ func TestDropManagedFields(t *testing.T) { } func TestCleanUpManagedFieldsForSSAAdoption(t *testing.T) { - ctx := context.Background() + ctx := t.Context() ssaManager := "ssa-manager" diff --git a/internal/webhooks/machinedeployment_test.go b/internal/webhooks/machinedeployment_test.go index 87c6593c5737..fd0eff268307 100644 --- a/internal/webhooks/machinedeployment_test.go +++ b/internal/webhooks/machinedeployment_test.go @@ -17,7 +17,6 @@ limitations under the License. package webhooks import ( - "context" "strings" "testing" @@ -272,7 +271,7 @@ func TestCalculateMachineDeploymentReplicas(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - replicas, err := calculateMachineDeploymentReplicas(context.Background(), tt.oldMD, tt.newMD, false) + replicas, err := calculateMachineDeploymentReplicas(t.Context(), tt.oldMD, tt.newMD, false) if tt.expectErr { g.Expect(err).To(HaveOccurred()) diff --git a/internal/webhooks/machineset_test.go b/internal/webhooks/machineset_test.go index 8f545b16db0f..ece7414bb0ae 100644 --- a/internal/webhooks/machineset_test.go +++ b/internal/webhooks/machineset_test.go @@ -17,7 +17,6 @@ limitations under the License. package webhooks import ( - "context" "strings" "testing" @@ -213,7 +212,7 @@ func TestCalculateMachineSetReplicas(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - replicas, err := calculateMachineSetReplicas(context.Background(), tt.oldMS, tt.newMS, false) + replicas, err := calculateMachineSetReplicas(t.Context(), tt.oldMS, tt.newMS, false) if tt.expectErr { g.Expect(err).To(HaveOccurred()) diff --git a/internal/webhooks/runtime/extensionconfig_webhook_test.go b/internal/webhooks/runtime/extensionconfig_webhook_test.go index 6d625eaa2346..ca19bb4d838f 100644 --- a/internal/webhooks/runtime/extensionconfig_webhook_test.go +++ b/internal/webhooks/runtime/extensionconfig_webhook_test.go @@ -17,7 +17,6 @@ limitations under the License. package runtime import ( - "context" "testing" . "github.com/onsi/gomega" @@ -95,7 +94,7 @@ func TestExtensionConfigValidationFeatureGated(t *testing.T) { utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.RuntimeSDK, tt.featureGate) webhook := ExtensionConfig{} g := NewWithT(t) - warnings, err := webhook.validate(context.TODO(), tt.old, tt.new) + warnings, err := webhook.validate(t.Context(), tt.old, tt.new) if tt.expectErr { g.Expect(err).To(HaveOccurred()) g.Expect(warnings).To(BeEmpty()) diff --git a/test/extension/handlers/topologymutation/handler_test.go b/test/extension/handlers/topologymutation/handler_test.go index c6a1fb12474f..ca9d26bc3995 100644 --- a/test/extension/handlers/topologymutation/handler_test.go +++ b/test/extension/handlers/topologymutation/handler_test.go @@ -18,7 +18,6 @@ package topologymutation import ( "bytes" - "context" "encoding/json" "testing" @@ -86,8 +85,8 @@ func Test_patchDockerClusterTemplate(t *testing.T) { }, } for _, tt := range tests { - t.Run(tt.name, func(*testing.T) { - err := patchDockerClusterTemplate(context.Background(), tt.template, tt.variables) + t.Run(tt.name, func(t *testing.T) { + err := patchDockerClusterTemplate(t.Context(), tt.template, tt.variables) if tt.expectedErr { g.Expect(err).To(HaveOccurred()) } else { @@ -187,8 +186,8 @@ func Test_patchKubeadmControlPlaneTemplate(t *testing.T) { }, } for _, tt := range tests { - t.Run(tt.name, func(*testing.T) { - err := patchKubeadmControlPlaneTemplate(context.Background(), tt.template, tt.variables) + t.Run(tt.name, func(t *testing.T) { + err := patchKubeadmControlPlaneTemplate(t.Context(), tt.template, tt.variables) if tt.expectedErr { g.Expect(err).To(HaveOccurred()) } else { @@ -348,8 +347,8 @@ func Test_patchKubeadmConfigTemplate(t *testing.T) { }, } for _, tt := range tests { - t.Run(tt.name, func(*testing.T) { - err := patchKubeadmConfigTemplate(context.Background(), tt.template, tt.variables) + t.Run(tt.name, func(t *testing.T) { + err := patchKubeadmConfigTemplate(t.Context(), tt.template, tt.variables) if tt.expectedErr { g.Expect(err).To(HaveOccurred()) } else { @@ -419,8 +418,8 @@ func Test_patchDockerMachineTemplate(t *testing.T) { }, } for _, tt := range tests { - t.Run(tt.name, func(*testing.T) { - err := patchDockerMachineTemplate(context.Background(), tt.template, tt.variables) + t.Run(tt.name, func(t *testing.T) { + err := patchDockerMachineTemplate(t.Context(), tt.template, tt.variables) if tt.expectedErr { g.Expect(err).To(HaveOccurred()) } else { @@ -476,8 +475,8 @@ func Test_patchDockerMachinePoolTemplate(t *testing.T) { }, } for _, tt := range tests { - t.Run(tt.name, func(*testing.T) { - err := patchDockerMachinePoolTemplate(context.Background(), tt.template, tt.variables) + t.Run(tt.name, func(t *testing.T) { + err := patchDockerMachinePoolTemplate(t.Context(), tt.template, tt.variables) if tt.expectedErr { g.Expect(err).To(HaveOccurred()) } else { @@ -597,10 +596,10 @@ func TestHandler_GeneratePatches(t *testing.T) { }, } for _, tt := range tests { - t.Run(tt.name, func(*testing.T) { + t.Run(tt.name, func(t *testing.T) { response := &runtimehooksv1.GeneratePatchesResponse{} request := &runtimehooksv1.GeneratePatchesRequest{Items: tt.requestItems} - h.GeneratePatches(context.Background(), request, response) + h.GeneratePatches(t.Context(), request, response) // Expect all response fields to be as expected. responseItems are ignored here and tested below. // Ignore the message to not compare error strings. diff --git a/test/framework/clusterctl/e2e_config_test.go b/test/framework/clusterctl/e2e_config_test.go index ba93d51cacbf..cca2490e39b4 100644 --- a/test/framework/clusterctl/e2e_config_test.go +++ b/test/framework/clusterctl/e2e_config_test.go @@ -17,7 +17,6 @@ limitations under the License. package clusterctl import ( - "context" "fmt" "net/http" "net/http/httptest" @@ -121,7 +120,7 @@ func Test_resolveReleaseMarker(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - got, err := resolveReleaseMarker(context.Background(), tt.releaseMarker, clientGoproxy, toMetadataURL) + got, err := resolveReleaseMarker(t.Context(), tt.releaseMarker, clientGoproxy, toMetadataURL) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return diff --git a/test/infrastructure/container/interface_test.go b/test/infrastructure/container/interface_test.go index 55d660f3f89d..2af4424846f1 100644 --- a/test/infrastructure/container/interface_test.go +++ b/test/infrastructure/container/interface_test.go @@ -17,7 +17,6 @@ limitations under the License. package container import ( - "context" "testing" . "github.com/onsi/gomega" @@ -46,7 +45,7 @@ func TestFilterBuildKeyNameValue(t *testing.T) { func TestFakeContext(t *testing.T) { g := NewWithT(t) fake := FakeRuntime{} - ctx := RuntimeInto(context.Background(), &fake) + ctx := RuntimeInto(t.Context(), &fake) rtc, err := RuntimeFrom(ctx) g.Expect(err).ShouldNot(HaveOccurred()) @@ -58,7 +57,7 @@ func TestFakeContext(t *testing.T) { func TestDockerContext(t *testing.T) { g := NewWithT(t) docker := dockerRuntime{} - ctx := RuntimeInto(context.Background(), &docker) + ctx := RuntimeInto(t.Context(), &docker) rtc, err := RuntimeFrom(ctx) g.Expect(err).ShouldNot(HaveOccurred()) @@ -69,7 +68,7 @@ func TestDockerContext(t *testing.T) { func TestInvalidContext(t *testing.T) { g := NewWithT(t) - _, err := RuntimeFrom(context.Background()) + _, err := RuntimeFrom(t.Context()) g.Expect(err).Should(HaveOccurred()) } diff --git a/test/infrastructure/docker/internal/controllers/dockermachine_controller_test.go b/test/infrastructure/docker/internal/controllers/dockermachine_controller_test.go index b10d3856012c..e7b05839ffac 100644 --- a/test/infrastructure/docker/internal/controllers/dockermachine_controller_test.go +++ b/test/infrastructure/docker/internal/controllers/dockermachine_controller_test.go @@ -17,7 +17,6 @@ limitations under the License. package controllers import ( - "context" "testing" . "github.com/onsi/gomega" @@ -57,7 +56,7 @@ func TestDockerMachineReconciler_DockerClusterToDockerMachines(t *testing.T) { r := DockerMachineReconciler{ Client: c, } - out := r.dockerClusterToDockerMachines(context.Background(), dockerCluster) + out := r.dockerClusterToDockerMachines(t.Context(), dockerCluster) machineNames := make([]string, len(out)) for i := range out { machineNames[i] = out[i].Name diff --git a/test/infrastructure/docker/internal/docker/manager_test.go b/test/infrastructure/docker/internal/docker/manager_test.go index 41215b9855ca..16a3881d3c04 100644 --- a/test/infrastructure/docker/internal/docker/manager_test.go +++ b/test/infrastructure/docker/internal/docker/manager_test.go @@ -17,7 +17,6 @@ limitations under the License. package docker import ( - "context" "testing" . "github.com/onsi/gomega" @@ -31,7 +30,7 @@ import ( func TestCreateNode(t *testing.T) { g := NewWithT(t) containerRuntime := &container.FakeRuntime{} - ctx := container.RuntimeInto(context.Background(), containerRuntime) + ctx := container.RuntimeInto(t.Context(), containerRuntime) containerRuntime.ResetRunContainerCallLogs() portMappingsWithAPIServer := []v1alpha4.PortMapping{ @@ -74,7 +73,7 @@ func TestCreateNode(t *testing.T) { func TestCreateControlPlaneNode(t *testing.T) { g := NewWithT(t) containerRuntime := &container.FakeRuntime{} - ctx := container.RuntimeInto(context.Background(), containerRuntime) + ctx := container.RuntimeInto(t.Context(), containerRuntime) containerRuntime.ResetRunContainerCallLogs() containerRuntime.ResetRunContainerCallLogs() @@ -97,7 +96,7 @@ func TestCreateControlPlaneNode(t *testing.T) { func TestCreateWorkerNode(t *testing.T) { g := NewWithT(t) containerRuntime := &container.FakeRuntime{} - ctx := container.RuntimeInto(context.Background(), containerRuntime) + ctx := container.RuntimeInto(t.Context(), containerRuntime) containerRuntime.ResetRunContainerCallLogs() containerRuntime.ResetRunContainerCallLogs() @@ -120,7 +119,7 @@ func TestCreateWorkerNode(t *testing.T) { func TestCreateExternalLoadBalancerNode(t *testing.T) { g := NewWithT(t) containerRuntime := &container.FakeRuntime{} - ctx := container.RuntimeInto(context.Background(), containerRuntime) + ctx := container.RuntimeInto(t.Context(), containerRuntime) containerRuntime.ResetRunContainerCallLogs() containerRuntime.ResetRunContainerCallLogs() diff --git a/test/infrastructure/docker/internal/docker/types/node_test.go b/test/infrastructure/docker/internal/docker/types/node_test.go index c93325c5cbc9..5f197ac5b8f3 100644 --- a/test/infrastructure/docker/internal/docker/types/node_test.go +++ b/test/infrastructure/docker/internal/docker/types/node_test.go @@ -17,7 +17,6 @@ limitations under the License. package types import ( - "context" "testing" . "github.com/onsi/gomega" @@ -28,7 +27,7 @@ import ( func TestIP(t *testing.T) { g := NewWithT(t) containerRuntime := &container.FakeRuntime{} - ctx := container.RuntimeInto(context.Background(), containerRuntime) + ctx := container.RuntimeInto(t.Context(), containerRuntime) node := &Node{ Name: "TestNode", @@ -44,7 +43,7 @@ func TestIP(t *testing.T) { func TestDeleteContainer(t *testing.T) { g := NewWithT(t) containerRuntime := &container.FakeRuntime{} - ctx := container.RuntimeInto(context.Background(), containerRuntime) + ctx := container.RuntimeInto(t.Context(), containerRuntime) node := &Node{ Name: "TestNode", @@ -63,7 +62,7 @@ func TestDeleteContainer(t *testing.T) { func TestKillContainer(t *testing.T) { g := NewWithT(t) containerRuntime := &container.FakeRuntime{} - ctx := container.RuntimeInto(context.Background(), containerRuntime) + ctx := container.RuntimeInto(t.Context(), containerRuntime) node := &Node{ Name: "TestNode", @@ -83,7 +82,7 @@ func TestKillContainer(t *testing.T) { func TestCommandRun(t *testing.T) { g := NewWithT(t) containerRuntime := &container.FakeRuntime{} - ctx := container.RuntimeInto(context.Background(), containerRuntime) + ctx := container.RuntimeInto(t.Context(), containerRuntime) containerRuntime.ResetExecContainerCallLogs() cmd := GetContainerCmder("TestContainer").Command("test", "one", "two") @@ -101,7 +100,7 @@ func TestCommandRun(t *testing.T) { func TestWriteFile(t *testing.T) { g := NewWithT(t) containerRuntime := &container.FakeRuntime{} - ctx := container.RuntimeInto(context.Background(), containerRuntime) + ctx := container.RuntimeInto(t.Context(), containerRuntime) containerRuntime.ResetExecContainerCallLogs() diff --git a/test/infrastructure/docker/internal/webhooks/devmachinetemplate_webhook_test.go b/test/infrastructure/docker/internal/webhooks/devmachinetemplate_webhook_test.go index bb53d94e72d7..de9e0651919b 100644 --- a/test/infrastructure/docker/internal/webhooks/devmachinetemplate_webhook_test.go +++ b/test/infrastructure/docker/internal/webhooks/devmachinetemplate_webhook_test.go @@ -17,7 +17,6 @@ limitations under the License. package webhooks import ( - "context" "strings" "testing" @@ -117,7 +116,7 @@ func TestDevMachineTemplateInvalid(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) wh := &DevMachineTemplate{} - ctx := context.Background() + ctx := t.Context() if tt.req != nil { ctx = admission.NewContextWithRequest(ctx, *tt.req) } diff --git a/test/infrastructure/docker/internal/webhooks/dockermachinetemplate_webhook_test.go b/test/infrastructure/docker/internal/webhooks/dockermachinetemplate_webhook_test.go index 05095ce91fb2..1303643e9ad2 100644 --- a/test/infrastructure/docker/internal/webhooks/dockermachinetemplate_webhook_test.go +++ b/test/infrastructure/docker/internal/webhooks/dockermachinetemplate_webhook_test.go @@ -17,7 +17,6 @@ limitations under the License. package webhooks import ( - "context" "strings" "testing" @@ -111,7 +110,7 @@ func TestDockerMachineTemplateInvalid(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) wh := &DockerMachineTemplate{} - ctx := context.Background() + ctx := t.Context() if tt.req != nil { ctx = admission.NewContextWithRequest(ctx, *tt.req) } diff --git a/test/infrastructure/inmemory/pkg/runtime/cache/cache_test.go b/test/infrastructure/inmemory/pkg/runtime/cache/cache_test.go index ac56ee1ceefb..61af4daf85c0 100644 --- a/test/infrastructure/inmemory/pkg/runtime/cache/cache_test.go +++ b/test/infrastructure/inmemory/pkg/runtime/cache/cache_test.go @@ -60,7 +60,7 @@ func Test_cache_scale(t *testing.T) { deleteCount atomic.Uint64 ) - ctx, cancel := context.WithCancel(context.TODO()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() c := NewCache(scheme).(*cache) diff --git a/test/infrastructure/inmemory/pkg/runtime/cache/client_test.go b/test/infrastructure/inmemory/pkg/runtime/cache/client_test.go index de8909e76811..8fabc1e441e4 100644 --- a/test/infrastructure/inmemory/pkg/runtime/cache/client_test.go +++ b/test/infrastructure/inmemory/pkg/runtime/cache/client_test.go @@ -38,7 +38,7 @@ func Test_cache_client(t *testing.T) { c := NewCache(scheme).(*cache) h := &fakeHandler{} - iMachine, err := c.GetInformer(context.TODO(), &cloudv1.CloudMachine{}) + iMachine, err := c.GetInformer(t.Context(), &cloudv1.CloudMachine{}) g.Expect(err).ToNot(HaveOccurred()) err = iMachine.AddEventHandler(h) g.Expect(err).ToNot(HaveOccurred()) @@ -340,7 +340,7 @@ func Test_cache_client(t *testing.T) { c := NewCache(scheme).(*cache) h := &fakeHandler{} - i, err := c.GetInformer(context.TODO(), &cloudv1.CloudMachine{}) + i, err := c.GetInformer(t.Context(), &cloudv1.CloudMachine{}) g.Expect(err).ToNot(HaveOccurred()) err = i.AddEventHandler(h) g.Expect(err).ToNot(HaveOccurred()) @@ -552,7 +552,7 @@ func Test_cache_client(t *testing.T) { c := NewCache(scheme).(*cache) h := &fakeHandler{} - i, err := c.GetInformer(context.TODO(), &cloudv1.CloudMachine{}) + i, err := c.GetInformer(t.Context(), &cloudv1.CloudMachine{}) g.Expect(err).ToNot(HaveOccurred()) err = i.AddEventHandler(h) g.Expect(err).ToNot(HaveOccurred()) @@ -637,7 +637,7 @@ func Test_cache_client(t *testing.T) { t.Run("delete with finalizers", func(t *testing.T) { g := NewWithT(t) - ctx, cancel := context.WithCancel(context.TODO()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() c.garbageCollectorQueue = workqueue.NewTypedRateLimitingQueue[any](workqueue.DefaultTypedControllerRateLimiter[any]()) diff --git a/test/infrastructure/inmemory/pkg/runtime/cache/gc_test.go b/test/infrastructure/inmemory/pkg/runtime/cache/gc_test.go index f00f692e8852..b701108946e1 100644 --- a/test/infrastructure/inmemory/pkg/runtime/cache/gc_test.go +++ b/test/infrastructure/inmemory/pkg/runtime/cache/gc_test.go @@ -31,7 +31,7 @@ import ( func Test_cache_gc(t *testing.T) { g := NewWithT(t) - ctx, cancel := context.WithCancel(context.TODO()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() c := NewCache(scheme).(*cache) diff --git a/test/infrastructure/inmemory/pkg/runtime/cache/sync_test.go b/test/infrastructure/inmemory/pkg/runtime/cache/sync_test.go index c9498eb32480..fe119a9cfc99 100644 --- a/test/infrastructure/inmemory/pkg/runtime/cache/sync_test.go +++ b/test/infrastructure/inmemory/pkg/runtime/cache/sync_test.go @@ -31,7 +31,7 @@ import ( func Test_cache_sync(t *testing.T) { g := NewWithT(t) - ctx, cancel := context.WithCancel(context.TODO()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() c := NewCache(scheme).(*cache) diff --git a/test/infrastructure/inmemory/pkg/server/etcd/handler_test.go b/test/infrastructure/inmemory/pkg/server/etcd/handler_test.go index 9bff3830b134..90e7a70f3af7 100644 --- a/test/infrastructure/inmemory/pkg/server/etcd/handler_test.go +++ b/test/infrastructure/inmemory/pkg/server/etcd/handler_test.go @@ -17,7 +17,6 @@ limitations under the License. package etcd import ( - "context" "fmt" "testing" "time" @@ -41,7 +40,7 @@ func Test_etcd_scalingflow(t *testing.T) { // During a scale down event - for example during upgrade - KCP will call `MoveLeader` and `MemberRemove` in sequence. g := NewWithT(t) - ctx := metadata.NewIncomingContext(context.Background(), metadata.New(map[string]string{":authority": "etcd-1"})) + ctx := metadata.NewIncomingContext(t.Context(), metadata.New(map[string]string{":authority": "etcd-1"})) manager := inmemoryruntime.NewManager(scheme) resourceGroupResolver := func(string) (string, error) { return "group1", nil } c := &clusterServerServer{ diff --git a/test/infrastructure/inmemory/pkg/server/mux_test.go b/test/infrastructure/inmemory/pkg/server/mux_test.go index 31c1f304aa6f..5c43ab17c665 100644 --- a/test/infrastructure/inmemory/pkg/server/mux_test.go +++ b/test/infrastructure/inmemory/pkg/server/mux_test.go @@ -401,7 +401,7 @@ func TestAPI_corev1_Watch(t *testing.T) { DebugPort: DefaultDebugPort + 4, }) - ctx := context.Background() + ctx := t.Context() nodeWatcher, err := c.Watch(ctx, &corev1.NodeList{}) g.Expect(err).ToNot(HaveOccurred()) diff --git a/util/log/log_test.go b/util/log/log_test.go index 265ea5394bb8..faa2a3a9b44f 100644 --- a/util/log/log_test.go +++ b/util/log/log_test.go @@ -17,7 +17,6 @@ limitations under the License. package log import ( - "context" "testing" "github.com/go-logr/logr" @@ -192,7 +191,7 @@ func Test_AddObjectHierarchy(t *testing.T) { Build() // Create fake log sink so we can later verify the added k/v pairs. - ctx := ctrl.LoggerInto(context.Background(), logr.New(&fakeLogSink{})) + ctx := ctrl.LoggerInto(t.Context(), logr.New(&fakeLogSink{})) _, logger, err := AddOwners(ctx, c, tt.obj) g.Expect(err).ToNot(HaveOccurred()) diff --git a/util/paused/paused_test.go b/util/paused/paused_test.go index 1f3a62ab45f4..05e41a92c928 100644 --- a/util/paused/paused_test.go +++ b/util/paused/paused_test.go @@ -18,7 +18,6 @@ limitations under the License. package paused import ( - "context" "testing" . "github.com/onsi/gomega" @@ -101,7 +100,7 @@ func TestEnsurePausedCondition(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - ctx := context.Background() + ctx := t.Context() c := fake.NewClientBuilder().WithScheme(scheme).WithStatusSubresource(&clusterv1.Cluster{}, &builder.Phase1Obj{}). WithObjects(tt.object, tt.cluster).Build() diff --git a/util/util_test.go b/util/util_test.go index cfc1a82db11f..ca766b7f9d30 100644 --- a/util/util_test.go +++ b/util/util_test.go @@ -17,7 +17,6 @@ limitations under the License. package util import ( - "context" "fmt" "testing" @@ -225,7 +224,7 @@ func TestClusterToInfrastructureMapFunc(t *testing.T) { referenceObject.SetAPIVersion(tc.request.Spec.InfrastructureRef.APIVersion) referenceObject.SetKind(tc.request.Spec.InfrastructureRef.Kind) - fn := ClusterToInfrastructureMapFunc(context.Background(), tc.input, clientBuilder.Build(), referenceObject) + fn := ClusterToInfrastructureMapFunc(t.Context(), tc.input, clientBuilder.Build(), referenceObject) out := fn(ctx, tc.request) g.Expect(out).To(BeComparableTo(tc.output)) })