diff --git a/controllers/cloudstackmachine_controller.go b/controllers/cloudstackmachine_controller.go index 3f905f5c..bafcd286 100644 --- a/controllers/cloudstackmachine_controller.go +++ b/controllers/cloudstackmachine_controller.go @@ -48,12 +48,26 @@ var ( failuredomainMatcher = regexp.MustCompile(`ds\.meta_data\.failuredomain`) ) +const ( + BootstrapDataNotReady = "Bootstrap DataSecretName not yet available" + CSMachineCreationSuccess = "CloudStack instance Created" + CSMachineCreationFailed = "Creating CloudStack machine failed: %s" + MachineInstanceRunning = "Machine instance is Running..." + MachineInErrorMessage = "CloudStackMachine VM in error state. Deleting associated Machine" + MachineNotReadyMessage = "Instance not ready, is %s" + CSMachineStateCheckerCreationFailed = "error encountered when creating CloudStackMachineStateChecker" + CSMachineStateCheckerCreationSuccess = "CloudStackMachineStateChecker created" + CSMachineDeletionMessage = "Deleting CloudStack Machine %s" + CSMachineDeletionInstanceIDNotFoundMessage = "Deleting CloudStack Machine %s instanceID not found" +) + // +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=cloudstackmachines,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=cloudstackmachines/status,verbs=get;update;patch // +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=cloudstackmachines/finalizers,verbs=update // +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machines;machines/status,verbs=get;list;watch // +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machinesets,verbs=get;list;watch // +kubebuilder:rbac:groups=controlplane.cluster.x-k8s.io,resources=kubeadmcontrolplanes,verbs=get;list;watch +// +kubebuilder:rbac:groups="",resources=events,verbs=create;patch // CloudStackMachineReconciliationRunner is a ReconciliationRunner with extensions specific to CloudStack machine reconciliation. type CloudStackMachineReconciliationRunner struct { @@ -186,7 +200,8 @@ func (r *CloudStackMachineReconciliationRunner) DeleteMachineIfFailuredomainNotE // Implicitly it also fetches its bootstrap secret in order to create said instance. func (r *CloudStackMachineReconciliationRunner) GetOrCreateVMInstance() (retRes ctrl.Result, reterr error) { if r.CAPIMachine.Spec.Bootstrap.DataSecretName == nil { - return r.RequeueWithMessage("Bootstrap DataSecretName not yet available.") + r.Recorder.Event(r.ReconciliationSubject, "Normal", "Creating", BootstrapDataNotReady) + return r.RequeueWithMessage(BootstrapDataNotReady + ".") } r.Log.Info("Got Bootstrap DataSecretName.") @@ -204,8 +219,12 @@ func (r *CloudStackMachineReconciliationRunner) GetOrCreateVMInstance() (retRes userData := processCustomMetadata(data, r) err := r.CSUser.GetOrCreateVMInstance(r.ReconciliationSubject, r.CAPIMachine, r.CSCluster, r.FailureDomain, r.AffinityGroup, userData) + if err != nil { + r.Recorder.Eventf(r.ReconciliationSubject, "Warning", "Creating", CSMachineCreationFailed, err.Error()) + } if err == nil && !controllerutil.ContainsFinalizer(r.ReconciliationSubject, infrav1.MachineFinalizer) { // Fetched or Created? - r.Log.Info("CloudStack instance Created", "instanceStatus", r.ReconciliationSubject.Status) + r.Recorder.Eventf(r.ReconciliationSubject, "Normal", "Created", CSMachineCreationSuccess) + r.Log.Info(CSMachineCreationSuccess, "instanceStatus", r.ReconciliationSubject.Status) } // Always add the finalizer regardless. It can't be added twice anyway. controllerutil.AddFinalizer(r.ReconciliationSubject, infrav1.MachineFinalizer) @@ -223,16 +242,19 @@ func processCustomMetadata(data []byte, r *CloudStackMachineReconciliationRunner // ConfirmVMStatus checks the Instance's status for running state and requeues otherwise. func (r *CloudStackMachineReconciliationRunner) RequeueIfInstanceNotRunning() (retRes ctrl.Result, reterr error) { if r.ReconciliationSubject.Status.InstanceState == "Running" { - r.Log.Info("Machine instance is Running...") + r.Recorder.Event(r.ReconciliationSubject, "Normal", "Running", MachineInstanceRunning) + r.Log.Info(MachineInstanceRunning) r.ReconciliationSubject.Status.Ready = true } else if r.ReconciliationSubject.Status.InstanceState == "Error" { - r.Log.Info("CloudStackMachine VM in error state. Deleting associated Machine.", "csMachine", r.ReconciliationSubject.GetName()) + r.Recorder.Event(r.ReconciliationSubject, "Warning", "Error", MachineInErrorMessage) + r.Log.Info(MachineInErrorMessage, "csMachine", r.ReconciliationSubject.GetName()) if err := r.K8sClient.Delete(r.RequestCtx, r.CAPIMachine); err != nil { return ctrl.Result{}, err } return ctrl.Result{RequeueAfter: utils.RequeueTimeout}, nil } else { - r.Log.Info(fmt.Sprintf("Instance not ready, is %s.", r.ReconciliationSubject.Status.InstanceState)) + r.Recorder.Eventf(r.ReconciliationSubject, "Warning", r.ReconciliationSubject.Status.InstanceState, MachineNotReadyMessage, r.ReconciliationSubject.Status.InstanceState) + r.Log.Info(fmt.Sprintf(MachineNotReadyMessage, r.ReconciliationSubject.Status.InstanceState)) return ctrl.Result{RequeueAfter: utils.RequeueTimeout}, nil } return ctrl.Result{}, nil @@ -263,9 +285,10 @@ func (r *CloudStackMachineReconciliationRunner) GetOrCreateMachineStateChecker() } if err := r.K8sClient.Create(r.RequestCtx, csMachineStateChecker); err != nil && !utils.ContainsAlreadyExistsSubstring(err) { - return r.ReturnWrappedError(err, "error encountered when creating CloudStackMachineStateChecker") + r.Recorder.Eventf(r.ReconciliationSubject, "Warning", "Machine State Checker", CSMachineStateCheckerCreationFailed) + return r.ReturnWrappedError(err, CSMachineStateCheckerCreationFailed) } - + r.Recorder.Eventf(r.ReconciliationSubject, "Normal", "Machine State Checker", CSMachineStateCheckerCreationSuccess) return r.GetObjectByName(*checkerName, r.StateChecker)() } @@ -280,9 +303,12 @@ func (r *CloudStackMachineReconciliationRunner) ReconcileDelete() (retRes ctrl.R fmt.Sprintf(" If this VM has already been deleted, please remove the finalizer named %s from object %s", "cloudstackmachine.infrastructure.cluster.x-k8s.io", r.ReconciliationSubject.Name)) // Cloudstack VM may be not found or more than one found by name + r.Recorder.Eventf(r.ReconciliationSubject, "Warning", "Deleting", CSMachineDeletionInstanceIDNotFoundMessage, r.ReconciliationSubject.Name) + r.Log.Error(err, fmt.Sprintf(CSMachineDeletionInstanceIDNotFoundMessage, r.ReconciliationSubject.Name)) return ctrl.Result{}, err } } + r.Recorder.Eventf(r.ReconciliationSubject, "Normal", "Deleting", CSMachineDeletionMessage, r.ReconciliationSubject.Name) r.Log.Info("Deleting instance", "instance-id", r.ReconciliationSubject.Spec.InstanceID) // Use CSClient instead of CSUser here to expunge as admin. // The CloudStack-Go API does not return an error, but the VM won't delete with Expunge set if requested by @@ -364,6 +390,7 @@ func (reconciler *CloudStackMachineReconciler) SetupWithManager(mgr ctrl.Manager return err } + reconciler.Recorder = mgr.GetEventRecorderFor("capc-machine-controller") // Add a watch on CAPI Cluster objects for unpause and ready events. return controller.Watch( &source.Kind{Type: &clusterv1.Cluster{}}, diff --git a/controllers/cloudstackmachine_controller_test.go b/controllers/cloudstackmachine_controller_test.go index c81ea259..ab3d4b6e 100644 --- a/controllers/cloudstackmachine_controller_test.go +++ b/controllers/cloudstackmachine_controller_test.go @@ -32,6 +32,7 @@ import ( ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "strings" ) var _ = Describe("CloudStackMachineReconciler", func() { @@ -50,7 +51,7 @@ var _ = Describe("CloudStackMachineReconciler", func() { // Setup a failure domain for the machine reconciler to find. Ω(k8sClient.Create(ctx, dummies.CSFailureDomain1)).Should(Succeed()) - setClusterReady() + setClusterReady(k8sClient) }) It("Should call GetOrCreateVMInstance and set Status.Ready to true", func() { @@ -197,8 +198,9 @@ var _ = Describe("CloudStackMachineReconciler", func() { Context("With a fake ctrlRuntimeClient and no test Env at all.", func() { BeforeEach(func() { - dummies.SetDummyVars() setupFakeTestClient() + dummies.CSCluster.Spec.FailureDomains = dummies.CSCluster.Spec.FailureDomains[:1] + dummies.CSCluster.Spec.FailureDomains[0].Name = dummies.CSFailureDomain1.Spec.Name }) It("Should exit having not found a failure domain to place the machine in.", func() { @@ -219,5 +221,46 @@ var _ = Describe("CloudStackMachineReconciler", func() { Ω(err).ShouldNot(HaveOccurred()) Ω(res.RequeueAfter).ShouldNot(BeZero()) }) + + It("Should create event Machine instance is Running", func() { + key := client.ObjectKeyFromObject(dummies.CSCluster) + dummies.CAPIMachine.Name = "someMachine" + dummies.CAPIMachine.Spec.Bootstrap.DataSecretName = &dummies.BootstrapSecret.Name + dummies.CSMachine1.OwnerReferences = append(dummies.CSMachine1.OwnerReferences, metav1.OwnerReference{ + Kind: "Machine", + APIVersion: clusterv1.GroupVersion.String(), + Name: dummies.CAPIMachine.Name, + UID: "uniqueness", + }) + mockCloudClient.EXPECT().GetOrCreateVMInstance( + gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any(), gomock.Any(), gomock.Any()).Do( + func(arg1, _, _, _, _, _ interface{}) { + arg1.(*infrav1.CloudStackMachine).Status.InstanceState = "Running" + }).AnyTimes() + Ω(fakeCtrlClient.Get(ctx, key, dummies.CSCluster)).Should(Succeed()) + Ω(fakeCtrlClient.Create(ctx, dummies.CAPIMachine)).Should(Succeed()) + Ω(fakeCtrlClient.Create(ctx, dummies.CSMachine1)).Should(Succeed()) + Ω(fakeCtrlClient.Create(ctx, dummies.CSFailureDomain1)).Should(Succeed()) + Ω(fakeCtrlClient.Create(ctx, dummies.ACSEndpointSecret1)).Should(Succeed()) + Ω(fakeCtrlClient.Create(ctx, dummies.BootstrapSecret)).Should(Succeed()) + + setClusterReady(fakeCtrlClient) + + requestNamespacedName := types.NamespacedName{Namespace: dummies.ClusterNameSpace, Name: dummies.CSMachine1.Name} + MachineReconciler.AsFailureDomainUser(&dummies.CSFailureDomain1.Spec) + res, err := MachineReconciler.Reconcile(ctx, ctrl.Request{NamespacedName: requestNamespacedName}) + Ω(err).ShouldNot(HaveOccurred()) + Ω(res.RequeueAfter).Should(BeZero()) + + Eventually(func() bool { + for event := range fakeRecorder.Events { + return strings.Contains(event, "Normal Created CloudStack instance Created") || + strings.Contains(event, "Normal Running Machine instance is Running...") || + strings.Contains(event, "Normal Machine State Checker CloudStackMachineStateChecker created") + } + return false + }, timeout).Should(BeTrue()) + }) }) }) diff --git a/controllers/controllers_suite_test.go b/controllers/controllers_suite_test.go index bd60e2a9..b6cec1c4 100644 --- a/controllers/controllers_suite_test.go +++ b/controllers/controllers_suite_test.go @@ -21,6 +21,7 @@ import ( "flag" "fmt" "go/build" + "k8s.io/client-go/tools/record" "os" "os/exec" "path/filepath" @@ -69,8 +70,9 @@ var ( ) const ( - timeout = 10 * time.Second - pollInterval = 1 * time.Second + timeout = 10 * time.Second + pollInterval = 1 * time.Second + fakeEventBufferSize = 10 ) func envOr(envKey, defaultValue string) string { @@ -114,6 +116,7 @@ var ( cfg *rest.Config logger logr.Logger fakeCtrlClient client.Client + fakeRecorder *record.FakeRecorder // Mock Vars. mockCtrl *gomock.Controller @@ -254,17 +257,20 @@ func setupFakeTestClient() { // Make a fake k8s client with CloudStack and CAPI cluster. fakeCtrlClient = fake.NewClientBuilder().WithObjects(dummies.CSCluster, dummies.CAPICluster).Build() - + fakeRecorder = record.NewFakeRecorder(fakeEventBufferSize) // Setup mock clients. mockCSAPIClient = cloudstack.NewMockClient(mockCtrl) mockCloudClient = mocks.NewMockClient(mockCtrl) // Base reconciler shared across reconcilers. base := csCtrlrUtils.ReconcilerBase{ - K8sClient: fakeCtrlClient, - Scheme: scheme.Scheme, - CSClient: mockCloudClient, - BaseLogger: logger} + K8sClient: fakeCtrlClient, + Scheme: scheme.Scheme, + CSClient: mockCloudClient, + BaseLogger: logger, + Recorder: fakeRecorder, + CloudClientExtension: &MockCtrlrCloudClientImplementation{}, + } ctx, cancel = context.WithCancel(context.TODO()) @@ -312,9 +318,9 @@ var _ = AfterEach(func() { var _ = AfterSuite(func() {}) // setClusterReady patches the clsuter with ready status true. -func setClusterReady() { +func setClusterReady(client client.Client) { Eventually(func() error { - ph, err := patch.NewHelper(dummies.CSCluster, k8sClient) + ph, err := patch.NewHelper(dummies.CSCluster, client) Ω(err).ShouldNot(HaveOccurred()) dummies.CSCluster.Status.Ready = true return ph.Patch(ctx, dummies.CSCluster, patch.WithStatusObservedGeneration{}) diff --git a/controllers/utils/base_reconciler.go b/controllers/utils/base_reconciler.go index fa1a97e1..17eb697b 100644 --- a/controllers/utils/base_reconciler.go +++ b/controllers/utils/base_reconciler.go @@ -19,6 +19,7 @@ package utils import ( "context" "fmt" + "k8s.io/client-go/tools/record" "strings" "time" @@ -46,6 +47,7 @@ type ReconcilerBase struct { Scheme *runtime.Scheme K8sClient client.Client CSClient cloud.Client + Recorder record.EventRecorder CloudClientExtension } @@ -453,6 +455,7 @@ func (r *ReconcilerBase) InitFromMgr(mgr ctrl.Manager, client cloud.Client) { r.K8sClient = mgr.GetClient() r.BaseLogger = ctrl.Log.WithName("controllers") r.Scheme = mgr.GetScheme() + r.Recorder = mgr.GetEventRecorderFor("capc-controller-manager") r.CSClient = client } diff --git a/main.go b/main.go index 7e0f985a..b7dd3f0f 100644 --- a/main.go +++ b/main.go @@ -148,6 +148,7 @@ func main() { base := utils.ReconcilerBase{ K8sClient: mgr.GetClient(), BaseLogger: ctrl.Log.WithName("controllers"), + Recorder: mgr.GetEventRecorderFor("capc-controller-manager"), Scheme: mgr.GetScheme()} setupReconcilers(base, mgr)