diff --git a/api/v1alpha1/etcd_types.go b/api/v1alpha1/etcd_types.go index cf4b3a27..b43b745b 100644 --- a/api/v1alpha1/etcd_types.go +++ b/api/v1alpha1/etcd_types.go @@ -17,41 +17,106 @@ limitations under the License. package v1alpha1 import ( + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! -// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. +// NOTE: json tags are required. Any new fields you add must have json tags for +// the fields to be serialized. -// EtcdSpec defines the desired state of Etcd +// EtcdSpec defines the desired state of an Etcd cluster managed by the operator type EtcdSpec struct { - // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster - // Important: Run "make" to regenerate code after modifying this file - // The following markers will use OpenAPI v3 schema to validate the value - // More info: https://book.kubebuilder.io/reference/markers/crd-validation.html + // CellName is the name of the cell this Etcd belongs to. + // +kubebuilder:validation:MinLength=1 + // +optional + CellName string `json:"cellName,omitempty"` + + // Image is the container image for Etcd. + // NOTE: The version information is taken from Multigres repo's local + // provisioning setup: + // https://github.com/multigres/multigres/blob/38264ed3cb5049961a1e3d8a9de4836f8215ca76/go/provisioner/local/config.go#L186 + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:default="gcr.io/etcd-development/etcd:v3.5.9" + // +optional + Image string `json:"image,omitempty"` + + // ImagePullSecrets is an optional list of references to secrets in the same namespace + // to use for pulling the image. + // +optional + ImagePullSecrets []corev1.LocalObjectReference `json:"imagePullSecrets,omitempty"` + + // Replicas is the desired number of Etcd members. + // For high availability, use an odd number (typically 3 or 5). + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:default=3 + // +optional + Replicas *int32 `json:"replicas,omitempty"` + + // Resources defines the resource requirements for the Etcd container. + // +optional + Resources corev1.ResourceRequirements `json:"resources,omitempty"` + + // ServiceAccountName is the name of the ServiceAccount to use for the Etcd pods. + // +optional + ServiceAccountName string `json:"serviceAccountName,omitempty"` + + // StorageClassName is the name of the StorageClass to use for Etcd data volumes. + // If not specified, the default StorageClass will be used. + // +optional + StorageClassName *string `json:"storageClassName,omitempty"` + + // StorageSize is the size of the persistent volume for each Etcd member. + // +kubebuilder:default="10Gi" + // +optional + StorageSize string `json:"storageSize,omitempty"` + + // VolumeClaimTemplate allows customization of the PersistentVolumeClaim for Etcd data. + // If specified, this takes precedence over StorageClassName and StorageSize. + // +optional + VolumeClaimTemplate *corev1.PersistentVolumeClaimSpec `json:"volumeClaimTemplate,omitempty"` + + // Affinity defines pod affinity and anti-affinity rules. + // +optional + Affinity *corev1.Affinity `json:"affinity,omitempty"` + + // Tolerations allows pods to schedule onto nodes with matching taints. + // +optional + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` + + // NodeSelector is a selector which must be true for the pod to fit on a node. + // +optional + NodeSelector map[string]string `json:"nodeSelector,omitempty"` - // foo is an example field of Etcd. Edit etcd_types.go to remove/update + // TopologySpreadConstraints controls how pods are spread across topology domains. // +optional - Foo *string `json:"foo,omitempty"` + TopologySpreadConstraints []corev1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"` + + // PodAnnotations are annotations to add to the Etcd pods. + // +optional + PodAnnotations map[string]string `json:"podAnnotations,omitempty"` + + // PodLabels are additional labels to add to the Etcd pods. + // These are merged with the standard labels generated by the operator. + // In case of a key conflict, the operator's standard labels take precedence. + // +optional + PodLabels map[string]string `json:"podLabels,omitempty"` } // EtcdStatus defines the observed state of Etcd. type EtcdStatus struct { - // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster - // Important: Run "make" to regenerate code after modifying this file - - // For Kubernetes API conventions, see: - // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties - - // conditions represent the current state of the Etcd resource. - // Each condition has a unique type and reflects the status of a specific aspect of the resource. - // - // Standard condition types include: - // - "Available": the resource is fully functional - // - "Progressing": the resource is being created or updated - // - "Degraded": the resource failed to reach or maintain its desired state - // - // The status of each condition is one of True, False, or Unknown. + // Ready indicates whether the Etcd cluster is healthy and available. + Ready bool `json:"ready"` + + // Replicas is the desired number of Etcd members. + Replicas int32 `json:"replicas"` + + // ReadyReplicas is the number of ready Etcd members. + ReadyReplicas int32 `json:"readyReplicas"` + + // ObservedGeneration reflects the generation of the most recently observed Etcd spec. + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // Conditions represent the latest available observations of the Etcd cluster's state. // +listType=map // +listMapKey=type // +optional @@ -60,6 +125,10 @@ type EtcdStatus struct { // +kubebuilder:object:root=true // +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Ready",type=boolean,JSONPath=`.status.ready` +// +kubebuilder:printcolumn:name="DesiredReplicas",type=string,JSONPath=`.status.replicas` +// +kubebuilder:printcolumn:name="ReadyReplicas",type=string,JSONPath=`.status.readyReplicas` +// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` // Etcd is the Schema for the etcds API type Etcd struct { diff --git a/pkg/resource-handler/controller/etcd/etcd_controller.go b/pkg/resource-handler/controller/etcd/etcd_controller.go new file mode 100644 index 00000000..bd49c0ba --- /dev/null +++ b/pkg/resource-handler/controller/etcd/etcd_controller.go @@ -0,0 +1,267 @@ +package etcd + +import ( + "context" + "fmt" + "slices" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + + multigresv1alpha1 "github.com/numtide/multigres-operator/api/v1alpha1" +) + +const ( + finalizerName = "etcd.multigres.com/finalizer" +) + +// EtcdReconciler reconciles an Etcd object. +type EtcdReconciler struct { + client.Client + Scheme *runtime.Scheme +} + +// +kubebuilder:rbac:groups=multigres.com,resources=etcds,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=multigres.com,resources=etcds/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=multigres.com,resources=etcds/finalizers,verbs=update +// +kubebuilder:rbac:groups=apps,resources=statefulsets,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups="",resources=services,verbs=get;list;watch;create;update;patch;delete + +// Reconcile handles Etcd resource reconciliation. +func (r *EtcdReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + logger := log.FromContext(ctx) + + // Fetch the Etcd instance + etcd := &multigresv1alpha1.Etcd{} + if err := r.Get(ctx, req.NamespacedName, etcd); err != nil { + if errors.IsNotFound(err) { + logger.Info("Etcd resource not found, ignoring") + return ctrl.Result{}, nil + } + logger.Error(err, "Failed to get Etcd") + return ctrl.Result{}, err + } + + // Handle deletion + if !etcd.DeletionTimestamp.IsZero() { + return r.handleDeletion(ctx, etcd) + } + + // Add finalizer if not present + if !slices.Contains(etcd.Finalizers, finalizerName) { + etcd.Finalizers = append(etcd.Finalizers, finalizerName) + if err := r.Update(ctx, etcd); err != nil { + logger.Error(err, "Failed to add finalizer") + return ctrl.Result{}, err + } + } + + // Reconcile StatefulSet + if err := r.reconcileStatefulSet(ctx, etcd); err != nil { + logger.Error(err, "Failed to reconcile StatefulSet") + return ctrl.Result{}, err + } + + // Reconcile headless Service + if err := r.reconcileHeadlessService(ctx, etcd); err != nil { + logger.Error(err, "Failed to reconcile headless Service") + return ctrl.Result{}, err + } + + // Reconcile client Service + if err := r.reconcileClientService(ctx, etcd); err != nil { + logger.Error(err, "Failed to reconcile client Service") + return ctrl.Result{}, err + } + + // Update status + if err := r.updateStatus(ctx, etcd); err != nil { + logger.Error(err, "Failed to update status") + return ctrl.Result{}, err + } + + return ctrl.Result{}, nil +} + +// handleDeletion handles cleanup when Etcd is being deleted. +func (r *EtcdReconciler) handleDeletion(ctx context.Context, etcd *multigresv1alpha1.Etcd) (ctrl.Result, error) { + logger := log.FromContext(ctx) + + if slices.Contains(etcd.Finalizers, finalizerName) { + // Perform cleanup if needed + // Currently no special cleanup required - owner references handle resource deletion + + // Remove finalizer + etcd.Finalizers = slices.DeleteFunc(etcd.Finalizers, func(s string) bool { + return s == finalizerName + }) + if err := r.Update(ctx, etcd); err != nil { + logger.Error(err, "Failed to remove finalizer") + return ctrl.Result{}, err + } + } + + return ctrl.Result{}, nil +} + +// reconcileStatefulSet creates or updates the StatefulSet for Etcd. +func (r *EtcdReconciler) reconcileStatefulSet(ctx context.Context, etcd *multigresv1alpha1.Etcd) error { + desired, err := BuildStatefulSet(etcd, r.Scheme) + if err != nil { + return fmt.Errorf("failed to build StatefulSet: %w", err) + } + + existing := &appsv1.StatefulSet{} + err = r.Get(ctx, client.ObjectKey{Namespace: etcd.Namespace, Name: etcd.Name}, existing) + if err != nil { + if errors.IsNotFound(err) { + // Create new StatefulSet + if err := r.Create(ctx, desired); err != nil { + return fmt.Errorf("failed to create StatefulSet: %w", err) + } + return nil + } + return fmt.Errorf("failed to get StatefulSet: %w", err) + } + + // Update existing StatefulSet + existing.Spec = desired.Spec + existing.Labels = desired.Labels + if err := r.Update(ctx, existing); err != nil { + return fmt.Errorf("failed to update StatefulSet: %w", err) + } + + return nil +} + +// reconcileHeadlessService creates or updates the headless Service for Etcd. +func (r *EtcdReconciler) reconcileHeadlessService(ctx context.Context, etcd *multigresv1alpha1.Etcd) error { + desired, err := BuildHeadlessService(etcd, r.Scheme) + if err != nil { + return fmt.Errorf("failed to build headless Service: %w", err) + } + + existing := &corev1.Service{} + err = r.Get(ctx, client.ObjectKey{Namespace: etcd.Namespace, Name: etcd.Name + "-headless"}, existing) + if err != nil { + if errors.IsNotFound(err) { + // Create new Service + if err := r.Create(ctx, desired); err != nil { + return fmt.Errorf("failed to create headless Service: %w", err) + } + return nil + } + return fmt.Errorf("failed to get headless Service: %w", err) + } + + // Update existing Service + existing.Spec.Ports = desired.Spec.Ports + existing.Spec.Selector = desired.Spec.Selector + existing.Labels = desired.Labels + if err := r.Update(ctx, existing); err != nil { + return fmt.Errorf("failed to update headless Service: %w", err) + } + + return nil +} + +// reconcileClientService creates or updates the client Service for Etcd. +func (r *EtcdReconciler) reconcileClientService(ctx context.Context, etcd *multigresv1alpha1.Etcd) error { + desired, err := BuildClientService(etcd, r.Scheme) + if err != nil { + return fmt.Errorf("failed to build client Service: %w", err) + } + + existing := &corev1.Service{} + err = r.Get(ctx, client.ObjectKey{Namespace: etcd.Namespace, Name: etcd.Name}, existing) + if err != nil { + if errors.IsNotFound(err) { + // Create new Service + if err := r.Create(ctx, desired); err != nil { + return fmt.Errorf("failed to create client Service: %w", err) + } + return nil + } + return fmt.Errorf("failed to get client Service: %w", err) + } + + // Update existing Service + existing.Spec.Ports = desired.Spec.Ports + existing.Spec.Selector = desired.Spec.Selector + existing.Labels = desired.Labels + if err := r.Update(ctx, existing); err != nil { + return fmt.Errorf("failed to update client Service: %w", err) + } + + return nil +} + +// updateStatus updates the Etcd status based on observed state. +func (r *EtcdReconciler) updateStatus(ctx context.Context, etcd *multigresv1alpha1.Etcd) error { + // Get the StatefulSet to check status + sts := &appsv1.StatefulSet{} + err := r.Get(ctx, client.ObjectKey{Namespace: etcd.Namespace, Name: etcd.Name}, sts) + if err != nil { + if errors.IsNotFound(err) { + // StatefulSet not created yet + return nil + } + return fmt.Errorf("failed to get StatefulSet for status: %w", err) + } + + // Update status fields + etcd.Status.Replicas = sts.Status.Replicas + etcd.Status.ReadyReplicas = sts.Status.ReadyReplicas + etcd.Status.Ready = sts.Status.ReadyReplicas == sts.Status.Replicas && sts.Status.Replicas > 0 + etcd.Status.ObservedGeneration = etcd.Generation + + // Update conditions + etcd.Status.Conditions = r.buildConditions(etcd, sts) + + if err := r.Status().Update(ctx, etcd); err != nil { + return fmt.Errorf("failed to update status: %w", err) + } + + return nil +} + +// buildConditions creates status conditions based on observed state. +func (r *EtcdReconciler) buildConditions(etcd *multigresv1alpha1.Etcd, sts *appsv1.StatefulSet) []metav1.Condition { + conditions := []metav1.Condition{} + + // Ready condition + readyCondition := metav1.Condition{ + Type: "Ready", + ObservedGeneration: etcd.Generation, + LastTransitionTime: metav1.Now(), + } + + if sts.Status.ReadyReplicas == sts.Status.Replicas && sts.Status.Replicas > 0 { + readyCondition.Status = metav1.ConditionTrue + readyCondition.Reason = "AllReplicasReady" + readyCondition.Message = fmt.Sprintf("All %d replicas are ready", sts.Status.ReadyReplicas) + } else { + readyCondition.Status = metav1.ConditionFalse + readyCondition.Reason = "NotAllReplicasReady" + readyCondition.Message = fmt.Sprintf("%d/%d replicas ready", sts.Status.ReadyReplicas, sts.Status.Replicas) + } + + conditions = append(conditions, readyCondition) + return conditions +} + +// SetupWithManager sets up the controller with the Manager. +// TODO: This is missing test coverage, and will need to use envtest setup. +func (r *EtcdReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&multigresv1alpha1.Etcd{}). + Owns(&appsv1.StatefulSet{}). + Owns(&corev1.Service{}). + Complete(r) +} diff --git a/pkg/resource-handler/controller/etcd/etcd_controller_internal_test.go b/pkg/resource-handler/controller/etcd/etcd_controller_internal_test.go new file mode 100644 index 00000000..ca7f04c0 --- /dev/null +++ b/pkg/resource-handler/controller/etcd/etcd_controller_internal_test.go @@ -0,0 +1,235 @@ +package etcd + +import ( + "context" + "testing" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + multigresv1alpha1 "github.com/numtide/multigres-operator/api/v1alpha1" + "github.com/numtide/multigres-operator/pkg/resource-handler/controller/testutil" +) + +// TestReconcileStatefulSet_InvalidScheme tests the error path when BuildStatefulSet fails. +// This should never happen in production - scheme is properly set up in main.go. +// Test exists for coverage of defensive error handling. +func TestReconcileStatefulSet_InvalidScheme(t *testing.T) { + // Empty scheme without Etcd type registered + invalidScheme := runtime.NewScheme() + + etcd := &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + }, + Spec: multigresv1alpha1.EtcdSpec{}, + } + + fakeClient := fake.NewClientBuilder(). + WithScheme(invalidScheme). + Build() + + reconciler := &EtcdReconciler{ + Client: fakeClient, + Scheme: invalidScheme, + } + + err := reconciler.reconcileStatefulSet(context.Background(), etcd) + if err == nil { + t.Error("reconcileStatefulSet() should error with invalid scheme") + } +} + +// TestReconcileHeadlessService_InvalidScheme tests the error path when BuildHeadlessService fails. +func TestReconcileHeadlessService_InvalidScheme(t *testing.T) { + invalidScheme := runtime.NewScheme() + + etcd := &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + }, + Spec: multigresv1alpha1.EtcdSpec{}, + } + + fakeClient := fake.NewClientBuilder(). + WithScheme(invalidScheme). + Build() + + reconciler := &EtcdReconciler{ + Client: fakeClient, + Scheme: invalidScheme, + } + + err := reconciler.reconcileHeadlessService(context.Background(), etcd) + if err == nil { + t.Error("reconcileHeadlessService() should error with invalid scheme") + } +} + +// TestReconcileClientService_InvalidScheme tests the error path when BuildClientService fails. +func TestReconcileClientService_InvalidScheme(t *testing.T) { + invalidScheme := runtime.NewScheme() + + etcd := &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + }, + Spec: multigresv1alpha1.EtcdSpec{}, + } + + fakeClient := fake.NewClientBuilder(). + WithScheme(invalidScheme). + Build() + + reconciler := &EtcdReconciler{ + Client: fakeClient, + Scheme: invalidScheme, + } + + err := reconciler.reconcileClientService(context.Background(), etcd) + if err == nil { + t.Error("reconcileClientService() should error with invalid scheme") + } +} + +// TestUpdateStatus_StatefulSetNotFound tests the NotFound path in updateStatus. +func TestUpdateStatus_StatefulSetNotFound(t *testing.T) { + scheme := runtime.NewScheme() + _ = multigresv1alpha1.AddToScheme(scheme) + _ = appsv1.AddToScheme(scheme) // Need StatefulSet type registered for Get to work + + etcd := &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + }, + Spec: multigresv1alpha1.EtcdSpec{}, + } + + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(etcd). + WithStatusSubresource(&multigresv1alpha1.Etcd{}). + Build() + + reconciler := &EtcdReconciler{ + Client: fakeClient, + Scheme: scheme, + } + + // Call updateStatus when StatefulSet doesn't exist yet + err := reconciler.updateStatus(context.Background(), etcd) + if err != nil { + t.Errorf("updateStatus() should not error when StatefulSet not found, got: %v", err) + } +} + +// TestHandleDeletion_NoFinalizer tests early return when no finalizer is present. +func TestHandleDeletion_NoFinalizer(t *testing.T) { + scheme := runtime.NewScheme() + _ = multigresv1alpha1.AddToScheme(scheme) + + etcd := &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + Finalizers: []string{}, // No finalizer + }, + Spec: multigresv1alpha1.EtcdSpec{}, + } + + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(etcd). + Build() + + reconciler := &EtcdReconciler{ + Client: fakeClient, + Scheme: scheme, + } + + result, err := reconciler.handleDeletion(context.Background(), etcd) + if err != nil { + t.Errorf("handleDeletion() should not error when no finalizer, got: %v", err) + } + if result.Requeue { + t.Error("handleDeletion() should not requeue when no finalizer") + } +} + +// TestReconcileClientService_GetError tests error path on Get client Service (not NotFound). +func TestReconcileClientService_GetError(t *testing.T) { + scheme := runtime.NewScheme() + _ = multigresv1alpha1.AddToScheme(scheme) + _ = appsv1.AddToScheme(scheme) + _ = corev1.AddToScheme(scheme) + + etcd := &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + }, + Spec: multigresv1alpha1.EtcdSpec{}, + } + + // Create client with failure injection + baseClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(etcd). + Build() + + fakeClient := testutil.NewFakeClientWithFailures(baseClient, &testutil.FailureConfig{ + OnGet: testutil.FailOnKeyName("test-etcd", testutil.ErrNetworkTimeout), + }) + + reconciler := &EtcdReconciler{ + Client: fakeClient, + Scheme: scheme, + } + + err := reconciler.reconcileClientService(context.Background(), etcd) + if err == nil { + t.Error("reconcileClientService() should error on Get failure") + } +} + +// TestUpdateStatus_GetError tests error path on Get StatefulSet (not NotFound). +func TestUpdateStatus_GetError(t *testing.T) { + scheme := runtime.NewScheme() + _ = multigresv1alpha1.AddToScheme(scheme) + _ = appsv1.AddToScheme(scheme) + + etcd := &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + }, + Spec: multigresv1alpha1.EtcdSpec{}, + } + + baseClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(etcd). + WithStatusSubresource(&multigresv1alpha1.Etcd{}). + Build() + + fakeClient := testutil.NewFakeClientWithFailures(baseClient, &testutil.FailureConfig{ + OnGet: testutil.FailOnKeyName("test-etcd", testutil.ErrNetworkTimeout), + }) + + reconciler := &EtcdReconciler{ + Client: fakeClient, + Scheme: scheme, + } + + err := reconciler.updateStatus(context.Background(), etcd) + if err == nil { + t.Error("updateStatus() should error on Get failure") + } +} diff --git a/pkg/resource-handler/controller/etcd/etcd_controller_test.go b/pkg/resource-handler/controller/etcd/etcd_controller_test.go new file mode 100644 index 00000000..ccece394 --- /dev/null +++ b/pkg/resource-handler/controller/etcd/etcd_controller_test.go @@ -0,0 +1,715 @@ +package etcd + +import ( + "slices" + "testing" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + multigresv1alpha1 "github.com/numtide/multigres-operator/api/v1alpha1" + "github.com/numtide/multigres-operator/pkg/resource-handler/controller/testutil" +) + +func TestEtcdReconciler_Reconcile(t *testing.T) { + t.Parallel() + + scheme := runtime.NewScheme() + _ = multigresv1alpha1.AddToScheme(scheme) + _ = appsv1.AddToScheme(scheme) + _ = corev1.AddToScheme(scheme) + + tests := map[string]struct { + etcd *multigresv1alpha1.Etcd + existingObjects []client.Object + failureConfig *testutil.FailureConfig + // TODO: If wantErr is false but failureConfig is set, assertions may fail + // due to failure injection. This should be addressed when we need to test + // partial failures that don't prevent reconciliation success. + wantErr bool + wantRequeue bool + assertFunc func(t *testing.T, c client.Client, etcd *multigresv1alpha1.Etcd) + }{ + ////---------------------------------------- + /// Success + //------------------------------------------ + "create all resources for new Etcd": { + etcd: &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + }, + Spec: multigresv1alpha1.EtcdSpec{}, + }, + existingObjects: []client.Object{}, + assertFunc: func(t *testing.T, c client.Client, etcd *multigresv1alpha1.Etcd) { + // Verify all three resources were created + sts := &appsv1.StatefulSet{} + if err := c.Get(t.Context(), + types.NamespacedName{Name: "test-etcd", Namespace: "default"}, + sts); err != nil { + t.Errorf("StatefulSet should exist: %v", err) + } + + headlessSvc := &corev1.Service{} + if err := c.Get(t.Context(), + types.NamespacedName{Name: "test-etcd-headless", Namespace: "default"}, + headlessSvc); err != nil { + t.Errorf("Headless Service should exist: %v", err) + } + + clientSvc := &corev1.Service{} + if err := c.Get(t.Context(), + types.NamespacedName{Name: "test-etcd", Namespace: "default"}, + clientSvc); err != nil { + t.Errorf("Client Service should exist: %v", err) + } + + // Verify defaults and finalizer + if *sts.Spec.Replicas != DefaultReplicas { + t.Errorf("StatefulSet replicas = %d, want %d", *sts.Spec.Replicas, DefaultReplicas) + } + + updatedEtcd := &multigresv1alpha1.Etcd{} + if err := c.Get(t.Context(), types.NamespacedName{Name: "test-etcd", Namespace: "default"}, updatedEtcd); err != nil { + t.Fatalf("Failed to get Etcd: %v", err) + } + if !slices.Contains(updatedEtcd.Finalizers, finalizerName) { + t.Errorf("Finalizer should be added") + } + }, + }, + "update existing resources": { + etcd: &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing-etcd", + Namespace: "default", + Finalizers: []string{finalizerName}, + }, + Spec: multigresv1alpha1.EtcdSpec{ + Replicas: int32Ptr(5), + Image: "quay.io/coreos/etcd:v3.5.15", + }, + }, + existingObjects: []client.Object{ + &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing-etcd", + Namespace: "default", + }, + Spec: appsv1.StatefulSetSpec{ + Replicas: int32Ptr(3), // will be updated to 5 + }, + Status: appsv1.StatefulSetStatus{ + Replicas: 3, + ReadyReplicas: 3, + }, + }, + &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing-etcd-headless", + Namespace: "default", + }, + }, + &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing-etcd", + Namespace: "default", + }, + }, + }, + assertFunc: func(t *testing.T, c client.Client, etcd *multigresv1alpha1.Etcd) { + sts := &appsv1.StatefulSet{} + err := c.Get(t.Context(), types.NamespacedName{ + Name: "existing-etcd", + Namespace: "default", + }, sts) + if err != nil { + t.Fatalf("Failed to get StatefulSet: %v", err) + } + + if *sts.Spec.Replicas != 5 { + t.Errorf("StatefulSet replicas = %d, want 5", *sts.Spec.Replicas) + } + + if sts.Spec.Template.Spec.Containers[0].Image != "quay.io/coreos/etcd:v3.5.15" { + t.Errorf("StatefulSet image = %s, want quay.io/coreos/etcd:v3.5.15", sts.Spec.Template.Spec.Containers[0].Image) + } + }, + }, + "etcd with cellName": { + etcd: &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "etcd-zone1", + Namespace: "default", + }, + Spec: multigresv1alpha1.EtcdSpec{ + CellName: "zone1", + }, + }, + existingObjects: []client.Object{}, + assertFunc: func(t *testing.T, c client.Client, etcd *multigresv1alpha1.Etcd) { + sts := &appsv1.StatefulSet{} + if err := c.Get(t.Context(), + types.NamespacedName{Name: "etcd-zone1", Namespace: "default"}, + sts); err != nil { + t.Fatalf("Failed to get StatefulSet: %v", err) + } + if sts.Labels["multigres.com/cell"] != "zone1" { + t.Errorf("StatefulSet cell label = %s, want zone1", sts.Labels["multigres.com/cell"]) + } + + headlessSvc := &corev1.Service{} + if err := c.Get(t.Context(), + types.NamespacedName{Name: "etcd-zone1-headless", Namespace: "default"}, + headlessSvc); err != nil { + t.Fatalf("Failed to get headless Service: %v", err) + } + if headlessSvc.Labels["multigres.com/cell"] != "zone1" { + t.Errorf("Headless Service cell label = %s, want zone1", headlessSvc.Labels["multigres.com/cell"]) + } + + clientSvc := &corev1.Service{} + if err := c.Get(t.Context(), + types.NamespacedName{Name: "etcd-zone1", Namespace: "default"}, + clientSvc); err != nil { + t.Fatalf("Failed to get client Service: %v", err) + } + if clientSvc.Labels["multigres.com/cell"] != "zone1" { + t.Errorf("Client Service cell label = %s, want zone1", clientSvc.Labels["multigres.com/cell"]) + } + }, + }, + "deletion with finalizer": { + etcd: &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd-deletion", + Namespace: "default", + DeletionTimestamp: &metav1.Time{Time: metav1.Now().Time}, + Finalizers: []string{finalizerName}, + }, + Spec: multigresv1alpha1.EtcdSpec{}, + }, + existingObjects: []client.Object{ + &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd-deletion", + Namespace: "default", + DeletionTimestamp: &metav1.Time{Time: metav1.Now().Time}, + Finalizers: []string{finalizerName}, + }, + Spec: multigresv1alpha1.EtcdSpec{}, + }, + }, + assertFunc: func(t *testing.T, c client.Client, etcd *multigresv1alpha1.Etcd) { + updatedEtcd := &multigresv1alpha1.Etcd{} + err := c.Get(t.Context(), + types.NamespacedName{Name: "test-etcd-deletion", Namespace: "default"}, + updatedEtcd) + if err == nil { + t.Errorf("Etcd object should be deleted but still exists (finalizers: %v)", updatedEtcd.Finalizers) + } + }, + }, + "all replicas ready status": { + etcd: &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd-ready", + Namespace: "default", + Finalizers: []string{finalizerName}, + }, + Spec: multigresv1alpha1.EtcdSpec{ + Replicas: int32Ptr(3), + }, + }, + existingObjects: []client.Object{ + &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd-ready", + Namespace: "default", + }, + Spec: appsv1.StatefulSetSpec{ + Replicas: int32Ptr(3), + }, + Status: appsv1.StatefulSetStatus{ + Replicas: 3, + ReadyReplicas: 3, + }, + }, + }, + assertFunc: func(t *testing.T, c client.Client, etcd *multigresv1alpha1.Etcd) { + updatedEtcd := &multigresv1alpha1.Etcd{} + if err := c.Get(t.Context(), + types.NamespacedName{Name: "test-etcd-ready", Namespace: "default"}, + updatedEtcd); err != nil { + t.Fatalf("Failed to get Etcd: %v", err) + } + + if !updatedEtcd.Status.Ready { + t.Error("Status.Ready should be true") + } + if updatedEtcd.Status.Replicas != 3 { + t.Errorf("Status.Replicas = %d, want 3", updatedEtcd.Status.Replicas) + } + if updatedEtcd.Status.ReadyReplicas != 3 { + t.Errorf("Status.ReadyReplicas = %d, want 3", updatedEtcd.Status.ReadyReplicas) + } + if len(updatedEtcd.Status.Conditions) == 0 { + t.Error("Status.Conditions should not be empty") + } else { + readyCondition := updatedEtcd.Status.Conditions[0] + if readyCondition.Type != "Ready" { + t.Errorf("Condition type = %s, want Ready", readyCondition.Type) + } + if readyCondition.Status != metav1.ConditionTrue { + t.Errorf("Condition status = %s, want True", readyCondition.Status) + } + } + + if !slices.Contains(updatedEtcd.Finalizers, finalizerName) { + t.Errorf("Finalizer should be present") + } + }, + }, + ////---------------------------------------- + /// Error + //------------------------------------------ + "error on StatefulSet create": { + etcd: &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + }, + Spec: multigresv1alpha1.EtcdSpec{}, + }, + existingObjects: []client.Object{}, + failureConfig: &testutil.FailureConfig{ + OnCreate: func(obj client.Object) error { + if _, ok := obj.(*appsv1.StatefulSet); ok { + return testutil.ErrPermissionError + } + return nil + }, + }, + wantErr: true, + }, + "error on headless Service create": { + etcd: &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + }, + Spec: multigresv1alpha1.EtcdSpec{}, + }, + existingObjects: []client.Object{}, + failureConfig: &testutil.FailureConfig{ + OnCreate: func(obj client.Object) error { + if svc, ok := obj.(*corev1.Service); ok && svc.Name == "test-etcd-headless" { + return testutil.ErrPermissionError + } + return nil + }, + }, + wantErr: true, + }, + "error on client Service create": { + etcd: &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + }, + Spec: multigresv1alpha1.EtcdSpec{}, + }, + existingObjects: []client.Object{}, + failureConfig: &testutil.FailureConfig{ + OnCreate: func(obj client.Object) error { + if svc, ok := obj.(*corev1.Service); ok && svc.Name == "test-etcd" { + return testutil.ErrPermissionError + } + return nil + }, + }, + wantErr: true, + }, + "error on status update": { + etcd: &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + }, + Spec: multigresv1alpha1.EtcdSpec{}, + }, + existingObjects: []client.Object{}, + failureConfig: &testutil.FailureConfig{ + OnStatusUpdate: testutil.FailOnObjectName("test-etcd", testutil.ErrInjected), + }, + wantErr: true, + }, + "error on Get Etcd": { + etcd: &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + }, + Spec: multigresv1alpha1.EtcdSpec{}, + }, + existingObjects: []client.Object{}, + failureConfig: &testutil.FailureConfig{ + OnGet: testutil.FailOnKeyName("test-etcd", testutil.ErrNetworkTimeout), + }, + wantErr: true, + }, + "error on finalizer Update": { + etcd: &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + }, + Spec: multigresv1alpha1.EtcdSpec{}, + }, + existingObjects: []client.Object{}, + failureConfig: &testutil.FailureConfig{ + OnUpdate: testutil.FailOnObjectName("test-etcd", testutil.ErrInjected), + }, + wantErr: true, + }, + "error on StatefulSet Update": { + etcd: &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + Finalizers: []string{finalizerName}, + }, + Spec: multigresv1alpha1.EtcdSpec{ + Replicas: int32Ptr(5), + }, + }, + existingObjects: []client.Object{ + &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + }, + Spec: appsv1.StatefulSetSpec{ + Replicas: int32Ptr(3), + }, + }, + }, + failureConfig: &testutil.FailureConfig{ + OnUpdate: func(obj client.Object) error { + if _, ok := obj.(*appsv1.StatefulSet); ok { + return testutil.ErrInjected + } + return nil + }, + }, + wantErr: true, + }, + "error on headless Service Update": { + etcd: &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + Finalizers: []string{finalizerName}, + }, + Spec: multigresv1alpha1.EtcdSpec{}, + }, + existingObjects: []client.Object{ + &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + }, + }, + &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd-headless", + Namespace: "default", + }, + }, + }, + failureConfig: &testutil.FailureConfig{ + OnUpdate: func(obj client.Object) error { + if svc, ok := obj.(*corev1.Service); ok && svc.Name == "test-etcd-headless" { + return testutil.ErrInjected + } + return nil + }, + }, + wantErr: true, + }, + "error on client Service Update": { + etcd: &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + Finalizers: []string{finalizerName}, + }, + Spec: multigresv1alpha1.EtcdSpec{}, + }, + existingObjects: []client.Object{ + &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + }, + }, + &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd-headless", + Namespace: "default", + }, + }, + &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + }, + }, + }, + failureConfig: &testutil.FailureConfig{ + OnUpdate: func(obj client.Object) error { + if svc, ok := obj.(*corev1.Service); ok && svc.Name == "test-etcd" { + return testutil.ErrInjected + } + return nil + }, + }, + wantErr: true, + }, + "error on Get StatefulSet in updateStatus": { + etcd: &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd-status", + Namespace: "default", + Finalizers: []string{finalizerName}, + }, + Spec: multigresv1alpha1.EtcdSpec{}, + }, + existingObjects: []client.Object{ + &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd-status", + Namespace: "default", + }, + }, + }, + failureConfig: &testutil.FailureConfig{ + // Fail StatefulSet Get after first successful call + // First Get succeeds (in reconcileStatefulSet) + // Second Get fails (in updateStatus) + OnGet: testutil.FailKeyAfterNCalls(1, testutil.ErrNetworkTimeout), + }, + wantErr: true, + }, + "error on Get StatefulSet (not NotFound)": { + etcd: &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + Finalizers: []string{finalizerName}, + }, + Spec: multigresv1alpha1.EtcdSpec{}, + }, + existingObjects: []client.Object{}, + failureConfig: &testutil.FailureConfig{ + OnGet: func(key client.ObjectKey) error { + // Fail StatefulSet Get with non-NotFound error + if key.Name == "test-etcd" { + return testutil.ErrNetworkTimeout + } + return nil + }, + }, + wantErr: true, + }, + "error on Get headless Service (not NotFound)": { + etcd: &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + Finalizers: []string{finalizerName}, + }, + Spec: multigresv1alpha1.EtcdSpec{}, + }, + existingObjects: []client.Object{ + &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + }, + }, + }, + failureConfig: &testutil.FailureConfig{ + OnGet: func(key client.ObjectKey) error { + // Fail headless Service Get with non-NotFound error + if key.Name == "test-etcd-headless" { + return testutil.ErrNetworkTimeout + } + return nil + }, + }, + wantErr: true, + }, + "error on Get client Service (not NotFound)": { + etcd: &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd-svc", + Namespace: "default", + Finalizers: []string{finalizerName}, + }, + Spec: multigresv1alpha1.EtcdSpec{}, + }, + existingObjects: []client.Object{ + &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd-svc", + Namespace: "default", + }, + }, + &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd-svc-headless", + Namespace: "default", + }, + }, + }, + failureConfig: &testutil.FailureConfig{ + OnGet: testutil.FailOnNamespacedKeyName("test-etcd-svc", "default", testutil.ErrNetworkTimeout), + }, + wantErr: true, + }, + "deletion error on finalizer removal": { + etcd: &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd-del", + Namespace: "default", + DeletionTimestamp: &metav1.Time{Time: metav1.Now().Time}, + Finalizers: []string{finalizerName}, + }, + Spec: multigresv1alpha1.EtcdSpec{}, + }, + existingObjects: []client.Object{ + &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd-del", + Namespace: "default", + DeletionTimestamp: &metav1.Time{Time: metav1.Now().Time}, + Finalizers: []string{finalizerName}, + }, + Spec: multigresv1alpha1.EtcdSpec{}, + }, + }, + failureConfig: &testutil.FailureConfig{ + OnUpdate: testutil.FailOnObjectName("test-etcd-del", testutil.ErrInjected), + }, + wantErr: true, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + t.Parallel() + + // Create base fake client + baseClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(tc.existingObjects...). + WithStatusSubresource(&multigresv1alpha1.Etcd{}). + Build() + + fakeClient := client.Client(baseClient) + // Wrap with failure injection if configured + if tc.failureConfig != nil { + fakeClient = testutil.NewFakeClientWithFailures(baseClient, tc.failureConfig) + } + + reconciler := &EtcdReconciler{ + Client: fakeClient, + Scheme: scheme, + } + + // Create the Etcd resource if not in existing objects + etcdInExisting := false + for _, obj := range tc.existingObjects { + if etcd, ok := obj.(*multigresv1alpha1.Etcd); ok && etcd.Name == tc.etcd.Name { + etcdInExisting = true + break + } + } + if !etcdInExisting { + err := fakeClient.Create(t.Context(), tc.etcd) + if err != nil { + t.Fatalf("Failed to create Etcd: %v", err) + } + } + + // Reconcile + req := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: tc.etcd.Name, + Namespace: tc.etcd.Namespace, + }, + } + + result, err := reconciler.Reconcile(t.Context(), req) + if (err != nil) != tc.wantErr { + t.Errorf("Reconcile() error = %v, wantErr %v", err, tc.wantErr) + return + } + if tc.wantErr { + return + } + + // NOTE: Check for requeue delay when we need to support such setup. + _ = result + // // Check requeue + // if (result.RequeueAfter != 0) != tc.wantRequeue { + // t.Errorf("Reconcile() result.Requeue = %v, want %v", result.RequeueAfter, tc.wantRequeue) + // } + + // Run custom assertions if provided + if tc.assertFunc != nil { + tc.assertFunc(t, fakeClient, tc.etcd) + } + }) + } +} + +func TestEtcdReconciler_ReconcileNotFound(t *testing.T) { + scheme := runtime.NewScheme() + _ = multigresv1alpha1.AddToScheme(scheme) + _ = appsv1.AddToScheme(scheme) + _ = corev1.AddToScheme(scheme) + + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + Build() + + reconciler := &EtcdReconciler{ + Client: fakeClient, + Scheme: scheme, + } + + // Reconcile non-existent resource + req := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "nonexistent-etcd", + Namespace: "default", + }, + } + + result, err := reconciler.Reconcile(t.Context(), req) + if err != nil { + t.Errorf("Reconcile() should not error on NotFound, got: %v", err) + } + if result.Requeue { + t.Errorf("Reconcile() should not requeue on NotFound") + } +} diff --git a/pkg/resource-handler/controller/etcd/etcd_env.go b/pkg/resource-handler/controller/etcd/etcd_env.go new file mode 100644 index 00000000..c8ce2db3 --- /dev/null +++ b/pkg/resource-handler/controller/etcd/etcd_env.go @@ -0,0 +1,120 @@ +package etcd + +import ( + "fmt" + "strings" + + corev1 "k8s.io/api/core/v1" +) + +// buildEtcdEnv constructs all environment variables for etcd clustering in +// StatefulSets. This combines pod identity, etcd config, and cluster peer +// discovery details. +func buildEtcdEnv(etcdName, namespace string, replicas int32, serviceName string) []corev1.EnvVar { + envVars := make([]corev1.EnvVar, 0) + + // Add pod identity variables from downward API + envVars = append(envVars, buildPodIdentityEnv()...) + + // Add etcd configuration variables + envVars = append(envVars, buildEtcdConfigEnv(etcdName, serviceName, namespace)...) + + // Add the initial cluster peer list + clusterPeerList := buildEtcdClusterPeerList(etcdName, serviceName, namespace, replicas) + envVars = append(envVars, corev1.EnvVar{ + Name: "ETCD_INITIAL_CLUSTER", + Value: clusterPeerList, + }) + + return envVars +} + +// buildPodIdentityEnv creates environment variables for pod name and namespace. +// These are required for etcd to construct its advertise URLs in StatefulSets, +// and this association of both Pod name and namespace are common. +// +// Ref: https://etcd.io/docs/latest/op-guide/clustering/ +func buildPodIdentityEnv() []corev1.EnvVar { + return []corev1.EnvVar{ + { + Name: "POD_NAME", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.name", + }, + }, + }, + { + Name: "POD_NAMESPACE", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.namespace", + }, + }, + }, + } +} + +// buildEtcdConfigEnv creates etcd configuration environment variables. +// These configure etcd's network endpoints and cluster formation. +// +// Ref: https://etcd.io/docs/latest/op-guide/configuration/ +func buildEtcdConfigEnv(etcdName, serviceName, namespace string) []corev1.EnvVar { + return []corev1.EnvVar{ + { + Name: "ETCD_NAME", + Value: "$(POD_NAME)", + }, + { + Name: "ETCD_DATA_DIR", + Value: "/var/lib/etcd", + }, + { + Name: "ETCD_LISTEN_CLIENT_URLS", + Value: "http://0.0.0.0:2379", + }, + { + Name: "ETCD_LISTEN_PEER_URLS", + Value: "http://0.0.0.0:2380", + }, + { + Name: "ETCD_ADVERTISE_CLIENT_URLS", + Value: fmt.Sprintf("http://$(POD_NAME).%s.$(POD_NAMESPACE).svc.cluster.local:2379", serviceName), + }, + { + Name: "ETCD_INITIAL_ADVERTISE_PEER_URLS", + Value: fmt.Sprintf("http://$(POD_NAME).%s.$(POD_NAMESPACE).svc.cluster.local:2380", serviceName), + }, + { + Name: "ETCD_INITIAL_CLUSTER_STATE", + Value: "new", + }, + { + Name: "ETCD_INITIAL_CLUSTER_TOKEN", + Value: etcdName, + }, + } +} + +// buildEtcdClusterPeerList generates the initial cluster member list for +// bootstrap. This tells each etcd member about all other members during cluster +// formation. +// +// Format: member-0=http://member-0.service.ns.svc.cluster.local:2380,... +// +// Ref: https://etcd.io/docs/latest/op-guide/clustering/#static +func buildEtcdClusterPeerList(etcdName, serviceName, namespace string, replicas int32) string { + if replicas < 0 { + return "" + } + + peers := make([]string, 0, replicas) + for i := range replicas { + podName := fmt.Sprintf("%s-%d", etcdName, i) + peerURL := fmt.Sprintf("%s=http://%s.%s.%s.svc.cluster.local:2380", + podName, podName, serviceName, namespace) + peers = append(peers, peerURL) + } + + return strings.Join(peers, ",") +} diff --git a/pkg/resource-handler/controller/etcd/etcd_env_test.go b/pkg/resource-handler/controller/etcd/etcd_env_test.go new file mode 100644 index 00000000..38ce47ac --- /dev/null +++ b/pkg/resource-handler/controller/etcd/etcd_env_test.go @@ -0,0 +1,286 @@ +package etcd + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + corev1 "k8s.io/api/core/v1" +) + +func TestBuildPodIdentityEnv(t *testing.T) { + got := buildPodIdentityEnv() + + want := []corev1.EnvVar{ + { + Name: "POD_NAME", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.name", + }, + }, + }, + { + Name: "POD_NAMESPACE", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.namespace", + }, + }, + }, + } + + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("buildPodIdentityEnv() mismatch (-want +got):\n%s", diff) + } +} + +func TestBuildEtcdConfigEnv(t *testing.T) { + tests := map[string]struct { + etcdName string + serviceName string + namespace string + want []corev1.EnvVar + }{ + "basic configuration": { + etcdName: "my-etcd", + serviceName: "my-etcd-headless", + namespace: "default", + want: []corev1.EnvVar{ + {Name: "ETCD_NAME", Value: "$(POD_NAME)"}, + {Name: "ETCD_DATA_DIR", Value: "/var/lib/etcd"}, + {Name: "ETCD_LISTEN_CLIENT_URLS", Value: "http://0.0.0.0:2379"}, + {Name: "ETCD_LISTEN_PEER_URLS", Value: "http://0.0.0.0:2380"}, + {Name: "ETCD_ADVERTISE_CLIENT_URLS", Value: "http://$(POD_NAME).my-etcd-headless.$(POD_NAMESPACE).svc.cluster.local:2379"}, + {Name: "ETCD_INITIAL_ADVERTISE_PEER_URLS", Value: "http://$(POD_NAME).my-etcd-headless.$(POD_NAMESPACE).svc.cluster.local:2380"}, + {Name: "ETCD_INITIAL_CLUSTER_STATE", Value: "new"}, + {Name: "ETCD_INITIAL_CLUSTER_TOKEN", Value: "my-etcd"}, + }, + }, + "different namespace": { + etcdName: "test-etcd", + serviceName: "test-etcd-headless", + namespace: "production", + want: []corev1.EnvVar{ + {Name: "ETCD_NAME", Value: "$(POD_NAME)"}, + {Name: "ETCD_DATA_DIR", Value: "/var/lib/etcd"}, + {Name: "ETCD_LISTEN_CLIENT_URLS", Value: "http://0.0.0.0:2379"}, + {Name: "ETCD_LISTEN_PEER_URLS", Value: "http://0.0.0.0:2380"}, + {Name: "ETCD_ADVERTISE_CLIENT_URLS", Value: "http://$(POD_NAME).test-etcd-headless.$(POD_NAMESPACE).svc.cluster.local:2379"}, + {Name: "ETCD_INITIAL_ADVERTISE_PEER_URLS", Value: "http://$(POD_NAME).test-etcd-headless.$(POD_NAMESPACE).svc.cluster.local:2380"}, + {Name: "ETCD_INITIAL_CLUSTER_STATE", Value: "new"}, + {Name: "ETCD_INITIAL_CLUSTER_TOKEN", Value: "test-etcd"}, + }, + }, + "long names": { + etcdName: "very-long-etcd-cluster-name", + serviceName: "very-long-etcd-cluster-name-headless", + namespace: "kube-system", + want: []corev1.EnvVar{ + {Name: "ETCD_NAME", Value: "$(POD_NAME)"}, + {Name: "ETCD_DATA_DIR", Value: "/var/lib/etcd"}, + {Name: "ETCD_LISTEN_CLIENT_URLS", Value: "http://0.0.0.0:2379"}, + {Name: "ETCD_LISTEN_PEER_URLS", Value: "http://0.0.0.0:2380"}, + {Name: "ETCD_ADVERTISE_CLIENT_URLS", Value: "http://$(POD_NAME).very-long-etcd-cluster-name-headless.$(POD_NAMESPACE).svc.cluster.local:2379"}, + {Name: "ETCD_INITIAL_ADVERTISE_PEER_URLS", Value: "http://$(POD_NAME).very-long-etcd-cluster-name-headless.$(POD_NAMESPACE).svc.cluster.local:2380"}, + {Name: "ETCD_INITIAL_CLUSTER_STATE", Value: "new"}, + {Name: "ETCD_INITIAL_CLUSTER_TOKEN", Value: "very-long-etcd-cluster-name"}, + }, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + got := buildEtcdConfigEnv(tc.etcdName, tc.serviceName, tc.namespace) + if diff := cmp.Diff(tc.want, got); diff != "" { + t.Errorf("buildEtcdConfigEnv() mismatch (-want +got):\n%s", diff) + } + }) + } +} + +func TestBuildEtcdClusterPeerList(t *testing.T) { + tests := map[string]struct { + etcdName string + serviceName string + namespace string + replicas int32 + want string + }{ + "single replica": { + etcdName: "my-etcd", + serviceName: "my-etcd-headless", + namespace: "default", + replicas: 1, + want: "my-etcd-0=http://my-etcd-0.my-etcd-headless.default.svc.cluster.local:2380", + }, + "three replicas (typical HA)": { + etcdName: "my-etcd", + serviceName: "my-etcd-headless", + namespace: "default", + replicas: 3, + want: "my-etcd-0=http://my-etcd-0.my-etcd-headless.default.svc.cluster.local:2380,my-etcd-1=http://my-etcd-1.my-etcd-headless.default.svc.cluster.local:2380,my-etcd-2=http://my-etcd-2.my-etcd-headless.default.svc.cluster.local:2380", + }, + "five replicas": { + etcdName: "etcd-prod", + serviceName: "etcd-prod-headless", + namespace: "production", + replicas: 5, + want: "etcd-prod-0=http://etcd-prod-0.etcd-prod-headless.production.svc.cluster.local:2380,etcd-prod-1=http://etcd-prod-1.etcd-prod-headless.production.svc.cluster.local:2380,etcd-prod-2=http://etcd-prod-2.etcd-prod-headless.production.svc.cluster.local:2380,etcd-prod-3=http://etcd-prod-3.etcd-prod-headless.production.svc.cluster.local:2380,etcd-prod-4=http://etcd-prod-4.etcd-prod-headless.production.svc.cluster.local:2380", + }, + "zero replicas": { + etcdName: "my-etcd", + serviceName: "my-etcd-headless", + namespace: "default", + replicas: 0, + want: "", + }, + "negative replicas": { + etcdName: "my-etcd", + serviceName: "my-etcd-headless", + namespace: "default", + replicas: -1, + want: "", + }, + "different namespace": { + etcdName: "kube-etcd", + serviceName: "kube-etcd-headless", + namespace: "kube-system", + replicas: 3, + want: "kube-etcd-0=http://kube-etcd-0.kube-etcd-headless.kube-system.svc.cluster.local:2380,kube-etcd-1=http://kube-etcd-1.kube-etcd-headless.kube-system.svc.cluster.local:2380,kube-etcd-2=http://kube-etcd-2.kube-etcd-headless.kube-system.svc.cluster.local:2380", + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + got := buildEtcdClusterPeerList(tc.etcdName, tc.serviceName, tc.namespace, tc.replicas) + if got != tc.want { + t.Errorf("buildEtcdClusterPeerList() = %v, want %v", got, tc.want) + } + }) + } +} + +func TestBuildEtcdEnv(t *testing.T) { + tests := map[string]struct { + etcdName string + namespace string + replicas int32 + serviceName string + want []corev1.EnvVar + }{ + "complete environment with 3 replicas": { + etcdName: "my-etcd", + namespace: "default", + replicas: 3, + serviceName: "my-etcd-headless", + want: []corev1.EnvVar{ + { + Name: "POD_NAME", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.name", + }, + }, + }, + { + Name: "POD_NAMESPACE", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.namespace", + }, + }, + }, + {Name: "ETCD_NAME", Value: "$(POD_NAME)"}, + {Name: "ETCD_DATA_DIR", Value: "/var/lib/etcd"}, + {Name: "ETCD_LISTEN_CLIENT_URLS", Value: "http://0.0.0.0:2379"}, + {Name: "ETCD_LISTEN_PEER_URLS", Value: "http://0.0.0.0:2380"}, + {Name: "ETCD_ADVERTISE_CLIENT_URLS", Value: "http://$(POD_NAME).my-etcd-headless.$(POD_NAMESPACE).svc.cluster.local:2379"}, + {Name: "ETCD_INITIAL_ADVERTISE_PEER_URLS", Value: "http://$(POD_NAME).my-etcd-headless.$(POD_NAMESPACE).svc.cluster.local:2380"}, + {Name: "ETCD_INITIAL_CLUSTER_STATE", Value: "new"}, + {Name: "ETCD_INITIAL_CLUSTER_TOKEN", Value: "my-etcd"}, + {Name: "ETCD_INITIAL_CLUSTER", Value: "my-etcd-0=http://my-etcd-0.my-etcd-headless.default.svc.cluster.local:2380,my-etcd-1=http://my-etcd-1.my-etcd-headless.default.svc.cluster.local:2380,my-etcd-2=http://my-etcd-2.my-etcd-headless.default.svc.cluster.local:2380"}, + }, + }, + "single replica": { + etcdName: "test-etcd", + namespace: "test", + replicas: 1, + serviceName: "test-etcd-headless", + want: []corev1.EnvVar{ + { + Name: "POD_NAME", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.name", + }, + }, + }, + { + Name: "POD_NAMESPACE", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.namespace", + }, + }, + }, + {Name: "ETCD_NAME", Value: "$(POD_NAME)"}, + {Name: "ETCD_DATA_DIR", Value: "/var/lib/etcd"}, + {Name: "ETCD_LISTEN_CLIENT_URLS", Value: "http://0.0.0.0:2379"}, + {Name: "ETCD_LISTEN_PEER_URLS", Value: "http://0.0.0.0:2380"}, + {Name: "ETCD_ADVERTISE_CLIENT_URLS", Value: "http://$(POD_NAME).test-etcd-headless.$(POD_NAMESPACE).svc.cluster.local:2379"}, + {Name: "ETCD_INITIAL_ADVERTISE_PEER_URLS", Value: "http://$(POD_NAME).test-etcd-headless.$(POD_NAMESPACE).svc.cluster.local:2380"}, + // Cluster setup won't happen in a single cluster, and these + // env variables are only used at startup. + {Name: "ETCD_INITIAL_CLUSTER_STATE", Value: "new"}, + {Name: "ETCD_INITIAL_CLUSTER_TOKEN", Value: "test-etcd"}, + {Name: "ETCD_INITIAL_CLUSTER", Value: "test-etcd-0=http://test-etcd-0.test-etcd-headless.test.svc.cluster.local:2380"}, + }, + }, + "zero replicas - no ETCD_INITIAL_CLUSTER": { + etcdName: "empty-etcd", + namespace: "default", + replicas: 0, + serviceName: "empty-etcd-headless", + want: []corev1.EnvVar{ + { + Name: "POD_NAME", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.name", + }, + }, + }, + { + Name: "POD_NAMESPACE", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.namespace", + }, + }, + }, + {Name: "ETCD_NAME", Value: "$(POD_NAME)"}, + {Name: "ETCD_DATA_DIR", Value: "/var/lib/etcd"}, + {Name: "ETCD_LISTEN_CLIENT_URLS", Value: "http://0.0.0.0:2379"}, + {Name: "ETCD_LISTEN_PEER_URLS", Value: "http://0.0.0.0:2380"}, + {Name: "ETCD_ADVERTISE_CLIENT_URLS", Value: "http://$(POD_NAME).empty-etcd-headless.$(POD_NAMESPACE).svc.cluster.local:2379"}, + {Name: "ETCD_INITIAL_ADVERTISE_PEER_URLS", Value: "http://$(POD_NAME).empty-etcd-headless.$(POD_NAMESPACE).svc.cluster.local:2380"}, + // Cluster setup won't happen in a single cluster, and these + // env variables are only used at startup. In case of scaling up + // from zero replica, the updated env variable will be picked up + // correctly, and thus an empty variable like this will be OK. + {Name: "ETCD_INITIAL_CLUSTER_STATE", Value: "new"}, + {Name: "ETCD_INITIAL_CLUSTER_TOKEN", Value: "empty-etcd"}, + {Name: "ETCD_INITIAL_CLUSTER"}, + }, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + got := buildEtcdEnv(tc.etcdName, tc.namespace, tc.replicas, tc.serviceName) + if diff := cmp.Diff(tc.want, got); diff != "" { + t.Errorf("BuildEtcdEnv() mismatch (-want +got):\n%s", diff) + } + }) + } +} diff --git a/pkg/resource-handler/controller/etcd/ports.go b/pkg/resource-handler/controller/etcd/ports.go new file mode 100644 index 00000000..8d77e880 --- /dev/null +++ b/pkg/resource-handler/controller/etcd/ports.go @@ -0,0 +1,94 @@ +package etcd + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/intstr" + + multigresv1alpha1 "github.com/numtide/multigres-operator/api/v1alpha1" +) + +const ( + // ClientPort is the default port for etcd client connections. + ClientPort int32 = 2379 + + // PeerPort is the default port for etcd peer connections. + PeerPort int32 = 2380 +) + +// buildContainerPorts creates the port definitions for the etcd container. +// Uses default ports since EtcdSpec doesn't have port configuration yet. +func buildContainerPorts(etcd *multigresv1alpha1.Etcd) []corev1.ContainerPort { + clientPort := ClientPort + peerPort := PeerPort + + // TODO: When EtcdSpec has port fields, use them: + // if etcd.Spec.ClientPort != 0 { + // clientPort = etcd.Spec.ClientPort + // } + // if etcd.Spec.PeerPort != 0 { + // peerPort = etcd.Spec.PeerPort + // } + + return []corev1.ContainerPort{ + { + Name: "client", + ContainerPort: clientPort, + Protocol: corev1.ProtocolTCP, + }, + { + Name: "peer", + ContainerPort: peerPort, + Protocol: corev1.ProtocolTCP, + }, + } +} + +// buildHeadlessServicePorts creates service ports for the headless service. +// Includes both client and peer ports for StatefulSet pod discovery. +func buildHeadlessServicePorts(etcd *multigresv1alpha1.Etcd) []corev1.ServicePort { + clientPort := ClientPort + peerPort := PeerPort + + // TODO: When EtcdSpec has port fields, use them: + // if etcd.Spec.ClientPort != 0 { + // clientPort = etcd.Spec.ClientPort + // } + // if etcd.Spec.PeerPort != 0 { + // peerPort = etcd.Spec.PeerPort + // } + + return []corev1.ServicePort{ + { + Name: "client", + Port: clientPort, + TargetPort: intstr.FromString("client"), + Protocol: corev1.ProtocolTCP, + }, + { + Name: "peer", + Port: peerPort, + TargetPort: intstr.FromString("peer"), + Protocol: corev1.ProtocolTCP, + }, + } +} + +// buildClientServicePorts creates service ports for the client service. +// Only includes the client port for external access. +func buildClientServicePorts(etcd *multigresv1alpha1.Etcd) []corev1.ServicePort { + clientPort := ClientPort + + // TODO: When EtcdSpec has clientPort field, use it: + // if etcd.Spec.ClientPort != 0 { + // clientPort = etcd.Spec.ClientPort + // } + + return []corev1.ServicePort{ + { + Name: "client", + Port: clientPort, + TargetPort: intstr.FromString("client"), + Protocol: corev1.ProtocolTCP, + }, + } +} diff --git a/pkg/resource-handler/controller/etcd/ports_test.go b/pkg/resource-handler/controller/etcd/ports_test.go new file mode 100644 index 00000000..73475a2b --- /dev/null +++ b/pkg/resource-handler/controller/etcd/ports_test.go @@ -0,0 +1,124 @@ +package etcd + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + + multigresv1alpha1 "github.com/numtide/multigres-operator/api/v1alpha1" +) + +func TestBuildContainerPorts(t *testing.T) { + tests := map[string]struct { + etcd *multigresv1alpha1.Etcd + want []corev1.ContainerPort + }{ + "default ports": { + etcd: &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + }, + Spec: multigresv1alpha1.EtcdSpec{}, + }, + want: []corev1.ContainerPort{ + { + Name: "client", + ContainerPort: 2379, + Protocol: corev1.ProtocolTCP, + }, + { + Name: "peer", + ContainerPort: 2380, + Protocol: corev1.ProtocolTCP, + }, + }, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + got := buildContainerPorts(tc.etcd) + if diff := cmp.Diff(tc.want, got); diff != "" { + t.Errorf("buildContainerPorts() mismatch (-want +got):\n%s", diff) + } + }) + } +} + +func TestBuildHeadlessServicePorts(t *testing.T) { + tests := map[string]struct { + etcd *multigresv1alpha1.Etcd + want []corev1.ServicePort + }{ + "default ports": { + etcd: &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + }, + Spec: multigresv1alpha1.EtcdSpec{}, + }, + want: []corev1.ServicePort{ + { + Name: "client", + Port: 2379, + TargetPort: intstr.FromString("client"), + Protocol: corev1.ProtocolTCP, + }, + { + Name: "peer", + Port: 2380, + TargetPort: intstr.FromString("peer"), + Protocol: corev1.ProtocolTCP, + }, + }, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + got := buildHeadlessServicePorts(tc.etcd) + if diff := cmp.Diff(tc.want, got); diff != "" { + t.Errorf("buildHeadlessServicePorts() mismatch (-want +got):\n%s", diff) + } + }) + } +} + +func TestBuildClientServicePorts(t *testing.T) { + tests := map[string]struct { + etcd *multigresv1alpha1.Etcd + want []corev1.ServicePort + }{ + "default port": { + etcd: &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + }, + Spec: multigresv1alpha1.EtcdSpec{}, + }, + want: []corev1.ServicePort{ + { + Name: "client", + Port: 2379, + TargetPort: intstr.FromString("client"), + Protocol: corev1.ProtocolTCP, + }, + }, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + got := buildClientServicePorts(tc.etcd) + if diff := cmp.Diff(tc.want, got); diff != "" { + t.Errorf("buildClientServicePorts() mismatch (-want +got):\n%s", diff) + } + }) + } +} diff --git a/pkg/resource-handler/controller/etcd/service.go b/pkg/resource-handler/controller/etcd/service.go new file mode 100644 index 00000000..0486198f --- /dev/null +++ b/pkg/resource-handler/controller/etcd/service.go @@ -0,0 +1,64 @@ +package etcd + +import ( + "fmt" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + + multigresv1alpha1 "github.com/numtide/multigres-operator/api/v1alpha1" + "github.com/numtide/multigres-operator/pkg/resource-handler/controller/metadata" +) + +// BuildHeadlessService creates a headless Service for the Etcd StatefulSet. +// Headless services are required for StatefulSet pod DNS records. +func BuildHeadlessService(etcd *multigresv1alpha1.Etcd, scheme *runtime.Scheme) (*corev1.Service, error) { + labels := metadata.BuildStandardLabels(etcd.Name, ComponentName, etcd.Spec.CellName) + + svc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: etcd.Name + "-headless", + Namespace: etcd.Namespace, + Labels: labels, + }, + Spec: corev1.ServiceSpec{ + ClusterIP: corev1.ClusterIPNone, + Selector: labels, + Ports: buildHeadlessServicePorts(etcd), + PublishNotReadyAddresses: true, + }, + } + + if err := ctrl.SetControllerReference(etcd, svc, scheme); err != nil { + return nil, fmt.Errorf("failed to set controller reference: %w", err) + } + + return svc, nil +} + +// BuildClientService creates a client Service for external access to Etcd. +// This service load balances across all etcd members. +func BuildClientService(etcd *multigresv1alpha1.Etcd, scheme *runtime.Scheme) (*corev1.Service, error) { + labels := metadata.BuildStandardLabels(etcd.Name, ComponentName, etcd.Spec.CellName) + + svc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: etcd.Name, + Namespace: etcd.Namespace, + Labels: labels, + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeClusterIP, + Selector: labels, + Ports: buildClientServicePorts(etcd), + }, + } + + if err := ctrl.SetControllerReference(etcd, svc, scheme); err != nil { + return nil, fmt.Errorf("failed to set controller reference: %w", err) + } + + return svc, nil +} diff --git a/pkg/resource-handler/controller/etcd/service_test.go b/pkg/resource-handler/controller/etcd/service_test.go new file mode 100644 index 00000000..9a9ca6dd --- /dev/null +++ b/pkg/resource-handler/controller/etcd/service_test.go @@ -0,0 +1,311 @@ +package etcd + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" + + multigresv1alpha1 "github.com/numtide/multigres-operator/api/v1alpha1" +) + +func TestBuildHeadlessService(t *testing.T) { + scheme := runtime.NewScheme() + _ = multigresv1alpha1.AddToScheme(scheme) + + tests := map[string]struct { + etcd *multigresv1alpha1.Etcd + scheme *runtime.Scheme + want *corev1.Service + wantErr bool + }{ + "minimal spec": { + etcd: &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + UID: "test-uid", + }, + Spec: multigresv1alpha1.EtcdSpec{}, + }, + scheme: scheme, + want: &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd-headless", + Namespace: "default", + Labels: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "test-etcd", + "app.kubernetes.io/component": "etcd", + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + "multigres.com/cell": "multigres-global-topo", + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "multigres.com/v1alpha1", + Kind: "Etcd", + Name: "test-etcd", + UID: "test-uid", + Controller: boolPtr(true), + BlockOwnerDeletion: boolPtr(true), + }, + }, + }, + Spec: corev1.ServiceSpec{ + ClusterIP: corev1.ClusterIPNone, + Selector: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "test-etcd", + "app.kubernetes.io/component": "etcd", + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + "multigres.com/cell": "multigres-global-topo", + }, + Ports: []corev1.ServicePort{ + { + Name: "client", + Port: 2379, + TargetPort: intstr.FromString("client"), + Protocol: corev1.ProtocolTCP, + }, + { + Name: "peer", + Port: 2380, + TargetPort: intstr.FromString("peer"), + Protocol: corev1.ProtocolTCP, + }, + }, + PublishNotReadyAddresses: true, + }, + }, + }, + "with cellName": { + etcd: &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "etcd-zone1", + Namespace: "production", + UID: "zone1-uid", + }, + Spec: multigresv1alpha1.EtcdSpec{ + CellName: "zone1", + }, + }, + scheme: scheme, + want: &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "etcd-zone1-headless", + Namespace: "production", + Labels: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "etcd-zone1", + "app.kubernetes.io/component": "etcd", + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + "multigres.com/cell": "zone1", + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "multigres.com/v1alpha1", + Kind: "Etcd", + Name: "etcd-zone1", + UID: "zone1-uid", + Controller: boolPtr(true), + BlockOwnerDeletion: boolPtr(true), + }, + }, + }, + Spec: corev1.ServiceSpec{ + ClusterIP: corev1.ClusterIPNone, + Selector: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "etcd-zone1", + "app.kubernetes.io/component": "etcd", + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + "multigres.com/cell": "zone1", + }, + Ports: []corev1.ServicePort{ + { + Name: "client", + Port: 2379, + TargetPort: intstr.FromString("client"), + Protocol: corev1.ProtocolTCP, + }, + { + Name: "peer", + Port: 2380, + TargetPort: intstr.FromString("peer"), + Protocol: corev1.ProtocolTCP, + }, + }, + PublishNotReadyAddresses: true, + }, + }, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + got, err := BuildHeadlessService(tc.etcd, tc.scheme) + + if (err != nil) != tc.wantErr { + t.Errorf("BuildHeadlessService() error = %v, wantErr %v", err, tc.wantErr) + return + } + + if tc.wantErr { + return + } + + if diff := cmp.Diff(tc.want, got); diff != "" { + t.Errorf("BuildHeadlessService() mismatch (-want +got):\n%s", diff) + } + }) + } +} + +func TestBuildClientService(t *testing.T) { + scheme := runtime.NewScheme() + _ = multigresv1alpha1.AddToScheme(scheme) + + tests := map[string]struct { + etcd *multigresv1alpha1.Etcd + scheme *runtime.Scheme + want *corev1.Service + wantErr bool + }{ + "minimal spec": { + etcd: &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + UID: "test-uid", + }, + Spec: multigresv1alpha1.EtcdSpec{}, + }, + scheme: scheme, + want: &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + Labels: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "test-etcd", + "app.kubernetes.io/component": "etcd", + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + "multigres.com/cell": "multigres-global-topo", + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "multigres.com/v1alpha1", + Kind: "Etcd", + Name: "test-etcd", + UID: "test-uid", + Controller: boolPtr(true), + BlockOwnerDeletion: boolPtr(true), + }, + }, + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeClusterIP, + Selector: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "test-etcd", + "app.kubernetes.io/component": "etcd", + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + "multigres.com/cell": "multigres-global-topo", + }, + Ports: []corev1.ServicePort{ + { + Name: "client", + Port: 2379, + TargetPort: intstr.FromString("client"), + Protocol: corev1.ProtocolTCP, + }, + }, + }, + }, + }, + "with cellName": { + etcd: &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "etcd-zone2", + Namespace: "production", + UID: "zone2-uid", + }, + Spec: multigresv1alpha1.EtcdSpec{ + CellName: "zone2", + }, + }, + scheme: scheme, + want: &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "etcd-zone2", + Namespace: "production", + Labels: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "etcd-zone2", + "app.kubernetes.io/component": "etcd", + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + "multigres.com/cell": "zone2", + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "multigres.com/v1alpha1", + Kind: "Etcd", + Name: "etcd-zone2", + UID: "zone2-uid", + Controller: boolPtr(true), + BlockOwnerDeletion: boolPtr(true), + }, + }, + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeClusterIP, + Selector: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "etcd-zone2", + "app.kubernetes.io/component": "etcd", + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + "multigres.com/cell": "zone2", + }, + Ports: []corev1.ServicePort{ + { + Name: "client", + Port: 2379, + TargetPort: intstr.FromString("client"), + Protocol: corev1.ProtocolTCP, + }, + }, + }, + }, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + got, err := BuildClientService(tc.etcd, tc.scheme) + + if (err != nil) != tc.wantErr { + t.Errorf("BuildClientService() error = %v, wantErr %v", err, tc.wantErr) + return + } + + if tc.wantErr { + return + } + + if diff := cmp.Diff(tc.want, got); diff != "" { + t.Errorf("BuildClientService() mismatch (-want +got):\n%s", diff) + } + }) + } +} diff --git a/pkg/resource-handler/controller/etcd/statefulset.go b/pkg/resource-handler/controller/etcd/statefulset.go new file mode 100644 index 00000000..281126a3 --- /dev/null +++ b/pkg/resource-handler/controller/etcd/statefulset.go @@ -0,0 +1,133 @@ +package etcd + +import ( + "fmt" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + + multigresv1alpha1 "github.com/numtide/multigres-operator/api/v1alpha1" + "github.com/numtide/multigres-operator/pkg/resource-handler/controller/metadata" + "github.com/numtide/multigres-operator/pkg/resource-handler/controller/storage" +) + +const ( + // ComponentName is the component label value for etcd resources + ComponentName = "etcd" + + // DefaultReplicas is the default number of etcd replicas + DefaultReplicas int32 = 3 + + // DefaultImage is the default etcd container image + DefaultImage = "gcr.io/etcd-development/etcd:v3.5.9" + + // DefaultStorageSize is the default storage size for etcd data + DefaultStorageSize = "10Gi" + + // DataVolumeName is the name of the data volume + DataVolumeName = "data" + + // DataMountPath is the mount path for etcd data + DataMountPath = "/var/lib/etcd" +) + +// BuildStatefulSet creates a StatefulSet for the Etcd cluster. +// Returns a deterministic StatefulSet based on the Etcd spec. +func BuildStatefulSet(etcd *multigresv1alpha1.Etcd, scheme *runtime.Scheme) (*appsv1.StatefulSet, error) { + replicas := DefaultReplicas + // TODO: Debatable whether this defaulting makes sense. + if etcd.Spec.Replicas != nil { + replicas = *etcd.Spec.Replicas + } + + image := DefaultImage + if etcd.Spec.Image != "" { + image = etcd.Spec.Image + } + + headlessServiceName := etcd.Name + "-headless" + labels := metadata.BuildStandardLabels(etcd.Name, ComponentName, etcd.Spec.CellName) + podLabels := metadata.MergeLabels(labels, etcd.Spec.PodLabels) + + sts := &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: etcd.Name, + Namespace: etcd.Namespace, + Labels: labels, + }, + Spec: appsv1.StatefulSetSpec{ + ServiceName: headlessServiceName, + Replicas: &replicas, + Selector: &metav1.LabelSelector{ + MatchLabels: labels, + }, + PodManagementPolicy: appsv1.ParallelPodManagement, + UpdateStrategy: appsv1.StatefulSetUpdateStrategy{ + Type: appsv1.RollingUpdateStatefulSetStrategyType, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: podLabels, + Annotations: etcd.Spec.PodAnnotations, + }, + Spec: corev1.PodSpec{ + ServiceAccountName: etcd.Spec.ServiceAccountName, + ImagePullSecrets: etcd.Spec.ImagePullSecrets, + Containers: []corev1.Container{ + { + Name: "etcd", + Image: image, + Resources: etcd.Spec.Resources, + Env: buildEtcdEnv(etcd.Name, etcd.Namespace, replicas, headlessServiceName), + Ports: buildContainerPorts(etcd), + VolumeMounts: []corev1.VolumeMount{ + { + Name: DataVolumeName, + MountPath: DataMountPath, + }, + }, + }, + }, + Affinity: etcd.Spec.Affinity, + Tolerations: etcd.Spec.Tolerations, + NodeSelector: etcd.Spec.NodeSelector, + TopologySpreadConstraints: etcd.Spec.TopologySpreadConstraints, + }, + }, + VolumeClaimTemplates: buildVolumeClaimTemplates(etcd), + }, + } + + if err := ctrl.SetControllerReference(etcd, sts, scheme); err != nil { + return nil, fmt.Errorf("failed to set controller reference: %w", err) + } + + return sts, nil +} + +// buildVolumeClaimTemplates creates the PVC templates for etcd data storage. +// Caller decides whether to use VolumeClaimTemplate or build from simple fields. +func buildVolumeClaimTemplates(etcd *multigresv1alpha1.Etcd) []corev1.PersistentVolumeClaim { + if etcd.Spec.VolumeClaimTemplate != nil { + return []corev1.PersistentVolumeClaim{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: DataVolumeName, + }, + Spec: *etcd.Spec.VolumeClaimTemplate, + }, + } + } + + storageSize := DefaultStorageSize + if etcd.Spec.StorageSize != "" { + storageSize = etcd.Spec.StorageSize + } + + return []corev1.PersistentVolumeClaim{ + storage.BuildPVCTemplate(DataVolumeName, etcd.Spec.StorageClassName, storageSize), + } +} diff --git a/pkg/resource-handler/controller/etcd/statefulset_test.go b/pkg/resource-handler/controller/etcd/statefulset_test.go new file mode 100644 index 00000000..8e4d3a01 --- /dev/null +++ b/pkg/resource-handler/controller/etcd/statefulset_test.go @@ -0,0 +1,483 @@ +package etcd + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + + multigresv1alpha1 "github.com/numtide/multigres-operator/api/v1alpha1" +) + +func int32Ptr(i int32) *int32 { + return &i +} + +func boolPtr(b bool) *bool { + return &b +} + +func stringPtr(s string) *string { + return &s +} + +func TestBuildStatefulSet(t *testing.T) { + scheme := runtime.NewScheme() + _ = multigresv1alpha1.AddToScheme(scheme) + + tests := map[string]struct { + etcd *multigresv1alpha1.Etcd + scheme *runtime.Scheme + want *appsv1.StatefulSet + wantErr bool + }{ + "minimal spec - all defaults": { + etcd: &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + UID: "test-uid", + }, + Spec: multigresv1alpha1.EtcdSpec{}, + }, + scheme: scheme, + want: &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + Labels: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "test-etcd", + "app.kubernetes.io/component": "etcd", + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + "multigres.com/cell": "multigres-global-topo", + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "multigres.com/v1alpha1", + Kind: "Etcd", + Name: "test-etcd", + UID: "test-uid", + Controller: boolPtr(true), + BlockOwnerDeletion: boolPtr(true), + }, + }, + }, + Spec: appsv1.StatefulSetSpec{ + ServiceName: "test-etcd-headless", + Replicas: int32Ptr(3), + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "test-etcd", + "app.kubernetes.io/component": "etcd", + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + "multigres.com/cell": "multigres-global-topo", + }, + }, + PodManagementPolicy: appsv1.ParallelPodManagement, + UpdateStrategy: appsv1.StatefulSetUpdateStrategy{ + Type: appsv1.RollingUpdateStatefulSetStrategyType, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "test-etcd", + "app.kubernetes.io/component": "etcd", + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + "multigres.com/cell": "multigres-global-topo", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "etcd", + Image: DefaultImage, + Resources: corev1.ResourceRequirements{}, + Env: buildEtcdEnv("test-etcd", "default", 3, "test-etcd-headless"), + Ports: buildContainerPorts(nil), // Default + VolumeMounts: []corev1.VolumeMount{ + { + Name: DataVolumeName, + MountPath: DataMountPath, + }, + }, + }, + }, + }, + }, + VolumeClaimTemplates: []corev1.PersistentVolumeClaim{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: DataVolumeName, + }, + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteOnce, + }, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse(DefaultStorageSize), + }, + }, + }, + }, + }, + }, + }, + }, + "custom replicas and image": { + etcd: &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "etcd-custom", + Namespace: "test", + UID: "custom-uid", + }, + Spec: multigresv1alpha1.EtcdSpec{ + Replicas: int32Ptr(5), + Image: "quay.io/coreos/etcd:v3.5.15", + }, + }, + scheme: scheme, + want: &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "etcd-custom", + Namespace: "test", + Labels: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "etcd-custom", + "app.kubernetes.io/component": "etcd", + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + "multigres.com/cell": "multigres-global-topo", + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "multigres.com/v1alpha1", + Kind: "Etcd", + Name: "etcd-custom", + UID: "custom-uid", + Controller: boolPtr(true), + BlockOwnerDeletion: boolPtr(true), + }, + }, + }, + Spec: appsv1.StatefulSetSpec{ + ServiceName: "etcd-custom-headless", + Replicas: int32Ptr(5), + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "etcd-custom", + "app.kubernetes.io/component": "etcd", + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + "multigres.com/cell": "multigres-global-topo", + }, + }, + PodManagementPolicy: appsv1.ParallelPodManagement, + UpdateStrategy: appsv1.StatefulSetUpdateStrategy{ + Type: appsv1.RollingUpdateStatefulSetStrategyType, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "etcd-custom", + "app.kubernetes.io/component": "etcd", + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + "multigres.com/cell": "multigres-global-topo", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "etcd", + Image: "quay.io/coreos/etcd:v3.5.15", + Resources: corev1.ResourceRequirements{}, + Env: buildEtcdEnv("etcd-custom", "test", 5, "etcd-custom-headless"), + Ports: buildContainerPorts(nil), + VolumeMounts: []corev1.VolumeMount{ + { + Name: DataVolumeName, + MountPath: DataMountPath, + }, + }, + }, + }, + }, + }, + VolumeClaimTemplates: []corev1.PersistentVolumeClaim{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: DataVolumeName, + }, + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteOnce, + }, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse(DefaultStorageSize), + }, + }, + }, + }, + }, + }, + }, + }, + "custom storage size": { + etcd: &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + UID: "test-uid", + }, + Spec: multigresv1alpha1.EtcdSpec{ + StorageSize: "20Gi", + }, + }, + scheme: scheme, + want: &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + Labels: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "test-etcd", + "app.kubernetes.io/component": "etcd", + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + "multigres.com/cell": "multigres-global-topo", + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "multigres.com/v1alpha1", + Kind: "Etcd", + Name: "test-etcd", + UID: "test-uid", + Controller: boolPtr(true), + BlockOwnerDeletion: boolPtr(true), + }, + }, + }, + Spec: appsv1.StatefulSetSpec{ + ServiceName: "test-etcd-headless", + Replicas: int32Ptr(3), + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "test-etcd", + "app.kubernetes.io/component": "etcd", + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + "multigres.com/cell": "multigres-global-topo", + }, + }, + PodManagementPolicy: appsv1.ParallelPodManagement, + UpdateStrategy: appsv1.StatefulSetUpdateStrategy{ + Type: appsv1.RollingUpdateStatefulSetStrategyType, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "test-etcd", + "app.kubernetes.io/component": "etcd", + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + "multigres.com/cell": "multigres-global-topo", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "etcd", + Image: DefaultImage, + Resources: corev1.ResourceRequirements{}, + Env: buildEtcdEnv("test-etcd", "default", 3, "test-etcd-headless"), + Ports: buildContainerPorts(nil), + VolumeMounts: []corev1.VolumeMount{ + { + Name: DataVolumeName, + MountPath: DataMountPath, + }, + }, + }, + }, + }, + }, + VolumeClaimTemplates: []corev1.PersistentVolumeClaim{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: DataVolumeName, + }, + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteOnce, + }, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("20Gi"), + }, + }, + }, + }, + }, + }, + }, + }, + "custom VolumeClaimTemplate": { + etcd: &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + UID: "test-uid", + }, + Spec: multigresv1alpha1.EtcdSpec{ + VolumeClaimTemplate: &corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteMany, + }, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("50Gi"), + }, + }, + StorageClassName: stringPtr("fast-ssd"), + }, + }, + }, + scheme: scheme, + want: &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + Labels: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "test-etcd", + "app.kubernetes.io/component": "etcd", + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + "multigres.com/cell": "multigres-global-topo", + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "multigres.com/v1alpha1", + Kind: "Etcd", + Name: "test-etcd", + UID: "test-uid", + Controller: boolPtr(true), + BlockOwnerDeletion: boolPtr(true), + }, + }, + }, + Spec: appsv1.StatefulSetSpec{ + ServiceName: "test-etcd-headless", + Replicas: int32Ptr(3), + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "test-etcd", + "app.kubernetes.io/component": "etcd", + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + "multigres.com/cell": "multigres-global-topo", + }, + }, + PodManagementPolicy: appsv1.ParallelPodManagement, + UpdateStrategy: appsv1.StatefulSetUpdateStrategy{ + Type: appsv1.RollingUpdateStatefulSetStrategyType, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "test-etcd", + "app.kubernetes.io/component": "etcd", + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + "multigres.com/cell": "multigres-global-topo", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "etcd", + Image: DefaultImage, + Resources: corev1.ResourceRequirements{}, + Env: buildEtcdEnv("test-etcd", "default", 3, "test-etcd-headless"), + Ports: buildContainerPorts(nil), + VolumeMounts: []corev1.VolumeMount{ + { + Name: DataVolumeName, + MountPath: DataMountPath, + }, + }, + }, + }, + }, + }, + VolumeClaimTemplates: []corev1.PersistentVolumeClaim{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: DataVolumeName, + }, + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteMany, + }, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("50Gi"), + }, + }, + StorageClassName: stringPtr("fast-ssd"), + }, + }, + }, + }, + }, + }, + "scheme without Etcd type - should error": { + etcd: &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + }, + Spec: multigresv1alpha1.EtcdSpec{}, + }, + scheme: runtime.NewScheme(), // empty scheme without Etcd type + wantErr: true, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + got, err := BuildStatefulSet(tc.etcd, tc.scheme) + + if (err != nil) != tc.wantErr { + t.Errorf("BuildStatefulSet() error = %v, wantErr %v", err, tc.wantErr) + return + } + + if tc.wantErr { + return + } + + if diff := cmp.Diff(tc.want, got); diff != "" { + t.Errorf("BuildStatefulSet() mismatch (-want +got):\n%s", diff) + } + }) + } +} diff --git a/pkg/resource-handler/controller/testutil/fake_client.go b/pkg/resource-handler/controller/testutil/fake_client.go new file mode 100644 index 00000000..ec5a76f4 --- /dev/null +++ b/pkg/resource-handler/controller/testutil/fake_client.go @@ -0,0 +1,255 @@ +// Package testutil provides testing utilities for controller tests. +package testutil + +import ( + "context" + "fmt" + + "k8s.io/apimachinery/pkg/api/meta" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// FailureConfig configures when the fake client should return errors. +// Each field is a function that receives the object/key and returns an error if the operation should fail. +type FailureConfig struct { + // OnGet is called before Get operations. Return non-nil to fail the operation. + OnGet func(key client.ObjectKey) error + + // OnList is called before List operations. Return non-nil to fail the operation. + OnList func(list client.ObjectList) error + + // OnCreate is called before Create operations. Return non-nil to fail the operation. + OnCreate func(obj client.Object) error + + // OnUpdate is called before Update operations. Return non-nil to fail the operation. + OnUpdate func(obj client.Object) error + + // OnPatch is called before Patch operations. Return non-nil to fail the operation. + OnPatch func(obj client.Object) error + + // OnDelete is called before Delete operations. Return non-nil to fail the operation. + OnDelete func(obj client.Object) error + + // OnDeleteAllOf is called before DeleteAllOf operations. Return non-nil to fail the operation. + OnDeleteAllOf func(obj client.Object) error + + // OnStatusUpdate is called before Status().Update() operations. Return non-nil to fail the operation. + OnStatusUpdate func(obj client.Object) error + + // OnStatusPatch is called before Status().Patch() operations. Return non-nil to fail the operation. + OnStatusPatch func(obj client.Object) error +} + +// fakeClientWithFailures wraps a real fake client and injects failures based on configuration. +type fakeClientWithFailures struct { + client.Client + config *FailureConfig +} + +// NewFakeClientWithFailures creates a fake client that can be configured to fail operations. +// This is useful for testing error handling paths in controllers. +func NewFakeClientWithFailures(baseClient client.Client, config *FailureConfig) client.Client { + if config == nil { + config = &FailureConfig{} + } + return &fakeClientWithFailures{ + Client: baseClient, + config: config, + } +} + +func (c *fakeClientWithFailures) Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { + if c.config.OnGet != nil { + if err := c.config.OnGet(key); err != nil { + return err + } + } + return c.Client.Get(ctx, key, obj, opts...) +} + +func (c *fakeClientWithFailures) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { + if c.config.OnList != nil { + if err := c.config.OnList(list); err != nil { + return err + } + } + return c.Client.List(ctx, list, opts...) +} + +func (c *fakeClientWithFailures) Create(ctx context.Context, obj client.Object, opts ...client.CreateOption) error { + if c.config.OnCreate != nil { + if err := c.config.OnCreate(obj); err != nil { + return err + } + } + return c.Client.Create(ctx, obj, opts...) +} + +func (c *fakeClientWithFailures) Update(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error { + if c.config.OnUpdate != nil { + if err := c.config.OnUpdate(obj); err != nil { + return err + } + } + return c.Client.Update(ctx, obj, opts...) +} + +func (c *fakeClientWithFailures) Patch(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + if c.config.OnPatch != nil { + if err := c.config.OnPatch(obj); err != nil { + return err + } + } + return c.Client.Patch(ctx, obj, patch, opts...) +} + +func (c *fakeClientWithFailures) Delete(ctx context.Context, obj client.Object, opts ...client.DeleteOption) error { + if c.config.OnDelete != nil { + if err := c.config.OnDelete(obj); err != nil { + return err + } + } + return c.Client.Delete(ctx, obj, opts...) +} + +func (c *fakeClientWithFailures) DeleteAllOf(ctx context.Context, obj client.Object, opts ...client.DeleteAllOfOption) error { + if c.config.OnDeleteAllOf != nil { + if err := c.config.OnDeleteAllOf(obj); err != nil { + return err + } + } + return c.Client.DeleteAllOf(ctx, obj, opts...) +} + +func (c *fakeClientWithFailures) Status() client.StatusWriter { + return &statusWriterWithFailures{ + StatusWriter: c.Client.Status(), + config: c.config, + } +} + +type statusWriterWithFailures struct { + client.StatusWriter + config *FailureConfig +} + +func (s *statusWriterWithFailures) Update(ctx context.Context, obj client.Object, opts ...client.SubResourceUpdateOption) error { + if s.config.OnStatusUpdate != nil { + if err := s.config.OnStatusUpdate(obj); err != nil { + return err + } + } + return s.StatusWriter.Update(ctx, obj, opts...) +} + +func (s *statusWriterWithFailures) Patch(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.SubResourcePatchOption) error { + if s.config.OnStatusPatch != nil { + if err := s.config.OnStatusPatch(obj); err != nil { + return err + } + } + return s.StatusWriter.Patch(ctx, obj, patch, opts...) +} + +// Helper functions for common failure scenarios + +// FailOnObjectName returns an error if the object name matches. +func FailOnObjectName(name string, err error) func(client.Object) error { + return func(obj client.Object) error { + accessor, metaErr := meta.Accessor(obj) + if metaErr != nil { + panic(fmt.Sprintf("meta.Accessor failed: %v", metaErr)) + } + if accessor.GetName() == name { + return err + } + return nil + } +} + +// FailOnKeyName returns an error if the key name matches. +func FailOnKeyName(name string, err error) func(client.ObjectKey) error { + return func(key client.ObjectKey) error { + if key.Name == name { + return err + } + return nil + } +} + +// FailOnNamespacedKeyName returns an error if both the key name and namespace match. +func FailOnNamespacedKeyName(name, namespace string, err error) func(client.ObjectKey) error { + return func(key client.ObjectKey) error { + if key.Name == name && key.Namespace == namespace { + return err + } + return nil + } +} + +// FailOnNamespace returns an error if the namespace matches. +func FailOnNamespace(namespace string, err error) func(client.Object) error { + return func(obj client.Object) error { + accessor, metaErr := meta.Accessor(obj) + if metaErr != nil { + panic(fmt.Sprintf("meta.Accessor failed: %v", metaErr)) + } + if accessor.GetNamespace() == namespace { + return err + } + return nil + } +} + +// AlwaysFail returns the given error for all operations. +func AlwaysFail(err error) func(any) error { + return func(interface{}) error { + return err + } +} + +// FailKeyAfterNCalls returns an ObjectKey failure function that fails after N successful calls. +// Use for OnGet. +func FailKeyAfterNCalls(n int, err error) func(client.ObjectKey) error { + count := 0 + return func(client.ObjectKey) error { + count++ + if count > n { + return err + } + return nil + } +} + +// FailObjAfterNCalls returns an Object failure function that fails after N successful calls. +// Use for OnCreate, OnUpdate, OnDelete, OnPatch, OnDeleteAllOf, OnStatusUpdate, OnStatusPatch. +func FailObjAfterNCalls(n int, err error) func(client.Object) error { + count := 0 + return func(client.Object) error { + count++ + if count > n { + return err + } + return nil + } +} + +// FailObjListAfterNCalls returns an ObjectList failure function that fails after N successful calls. +// Use for OnList. +func FailObjListAfterNCalls(n int, err error) func(client.ObjectList) error { + count := 0 + return func(client.ObjectList) error { + count++ + if count > n { + return err + } + return nil + } +} + +// Common errors for testing +var ( + ErrInjected = fmt.Errorf("injected test error") + ErrNetworkTimeout = fmt.Errorf("network timeout") + ErrPermissionError = fmt.Errorf("permission denied") +) diff --git a/pkg/resource-handler/controller/testutil/fake_client_test.go b/pkg/resource-handler/controller/testutil/fake_client_test.go new file mode 100644 index 00000000..97867777 --- /dev/null +++ b/pkg/resource-handler/controller/testutil/fake_client_test.go @@ -0,0 +1,722 @@ +package testutil + +import ( + "context" + "testing" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +func TestFakeClientWithFailures_Get(t *testing.T) { + t.Parallel() + + scheme := runtime.NewScheme() + _ = corev1.AddToScheme(scheme) + + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "default", + }, + } + + tests := map[string]struct { + config *FailureConfig + key client.ObjectKey + wantErr bool + }{ + "no failure - get succeeds": { + config: nil, + key: client.ObjectKey{ + Name: "test-pod", + Namespace: "default", + }, + wantErr: false, + }, + "fail on specific name": { + config: &FailureConfig{ + OnGet: FailOnKeyName("test-pod", ErrInjected), + }, + key: client.ObjectKey{ + Name: "test-pod", + Namespace: "default", + }, + wantErr: true, + }, + "no failure on different name": { + config: &FailureConfig{ + OnGet: FailOnKeyName("other-pod", ErrInjected), + }, + key: client.ObjectKey{ + Name: "test-pod", + Namespace: "default", + }, + wantErr: false, + }, + "always fail": { + config: &FailureConfig{ + OnGet: func(key client.ObjectKey) error { + return ErrInjected + }, + }, + key: client.ObjectKey{ + Name: "test-pod", + Namespace: "default", + }, + wantErr: true, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + t.Parallel() + + baseClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(pod). + Build() + + fakeClient := NewFakeClientWithFailures(baseClient, tc.config) + + result := &corev1.Pod{} + err := fakeClient.Get(context.Background(), tc.key, result) + + if (err != nil) != tc.wantErr { + t.Errorf("Get() error = %v, wantErr %v", err, tc.wantErr) + } + }) + } +} + +func TestFakeClientWithFailures_Create(t *testing.T) { + t.Parallel() + + scheme := runtime.NewScheme() + _ = corev1.AddToScheme(scheme) + + tests := map[string]struct { + config *FailureConfig + obj *corev1.Pod + wantErr bool + }{ + "no failure - create succeeds": { + config: nil, + obj: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "new-pod", + Namespace: "default", + }, + }, + wantErr: false, + }, + "fail on specific object name": { + config: &FailureConfig{ + OnCreate: FailOnObjectName("new-pod", ErrPermissionError), + }, + obj: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "new-pod", + Namespace: "default", + }, + }, + wantErr: true, + }, + "no failure on different object name": { + config: &FailureConfig{ + OnCreate: FailOnObjectName("other-pod", ErrPermissionError), + }, + obj: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "new-pod", + Namespace: "default", + }, + }, + wantErr: false, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + t.Parallel() + + baseClient := fake.NewClientBuilder(). + WithScheme(scheme). + Build() + + fakeClient := NewFakeClientWithFailures(baseClient, tc.config) + + err := fakeClient.Create(context.Background(), tc.obj) + + if (err != nil) != tc.wantErr { + t.Errorf("Create() error = %v, wantErr %v", err, tc.wantErr) + } + }) + } +} + +func TestFakeClientWithFailures_Update(t *testing.T) { + t.Parallel() + + scheme := runtime.NewScheme() + _ = corev1.AddToScheme(scheme) + + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "default", + }, + } + + tests := map[string]struct { + config *FailureConfig + wantErr bool + }{ + "no failure - update succeeds": { + config: nil, + wantErr: false, + }, + "fail on update": { + config: &FailureConfig{ + OnUpdate: FailOnObjectName("test-pod", ErrInjected), + }, + wantErr: true, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + t.Parallel() + + baseClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(pod). + Build() + + fakeClient := NewFakeClientWithFailures(baseClient, tc.config) + + err := fakeClient.Update(context.Background(), pod) + + if (err != nil) != tc.wantErr { + t.Errorf("Update() error = %v, wantErr %v", err, tc.wantErr) + } + }) + } +} + +func TestFakeClientWithFailures_Delete(t *testing.T) { + t.Parallel() + + scheme := runtime.NewScheme() + _ = corev1.AddToScheme(scheme) + + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "default", + }, + } + + tests := map[string]struct { + config *FailureConfig + wantErr bool + }{ + "no failure - delete succeeds": { + config: nil, + wantErr: false, + }, + "fail on delete": { + config: &FailureConfig{ + OnDelete: FailOnObjectName("test-pod", ErrInjected), + }, + wantErr: true, + }, + "fail on namespace": { + config: &FailureConfig{ + OnDelete: FailOnNamespace("default", ErrPermissionError), + }, + wantErr: true, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + t.Parallel() + + baseClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(pod.DeepCopy()). + Build() + + fakeClient := NewFakeClientWithFailures(baseClient, tc.config) + + err := fakeClient.Delete(context.Background(), pod) + + if (err != nil) != tc.wantErr { + t.Errorf("Delete() error = %v, wantErr %v", err, tc.wantErr) + } + }) + } +} + +func TestFakeClientWithFailures_StatusUpdate(t *testing.T) { + t.Parallel() + + scheme := runtime.NewScheme() + _ = corev1.AddToScheme(scheme) + + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "default", + }, + } + + tests := map[string]struct { + config *FailureConfig + wantErr bool + }{ + "no failure - status update succeeds": { + config: nil, + wantErr: false, + }, + "fail on status update": { + config: &FailureConfig{ + OnStatusUpdate: FailOnObjectName("test-pod", ErrInjected), + }, + wantErr: true, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + t.Parallel() + + baseClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(pod). + WithStatusSubresource(&corev1.Pod{}). + Build() + + fakeClient := NewFakeClientWithFailures(baseClient, tc.config) + + err := fakeClient.Status().Update(context.Background(), pod) + + if (err != nil) != tc.wantErr { + t.Errorf("Status().Update() error = %v, wantErr %v", err, tc.wantErr) + } + }) + } +} + +func TestFakeClientWithFailures_List(t *testing.T) { + t.Parallel() + + scheme := runtime.NewScheme() + _ = corev1.AddToScheme(scheme) + + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "default", + }, + } + + tests := map[string]struct { + config *FailureConfig + wantErr bool + }{ + "no failure - list succeeds": { + config: nil, + wantErr: false, + }, + "fail on list": { + config: &FailureConfig{ + OnList: func(list client.ObjectList) error { + return ErrInjected + }, + }, + wantErr: true, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + t.Parallel() + + baseClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(pod). + Build() + + fakeClient := NewFakeClientWithFailures(baseClient, tc.config) + + podList := &corev1.PodList{} + err := fakeClient.List(context.Background(), podList) + + if (err != nil) != tc.wantErr { + t.Errorf("List() error = %v, wantErr %v", err, tc.wantErr) + } + }) + } +} + +func TestFakeClientWithFailures_Patch(t *testing.T) { + t.Parallel() + + scheme := runtime.NewScheme() + _ = corev1.AddToScheme(scheme) + + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "default", + }, + } + + tests := map[string]struct { + config *FailureConfig + wantErr bool + }{ + "no failure - patch succeeds": { + config: nil, + wantErr: false, + }, + "fail on patch": { + config: &FailureConfig{ + OnPatch: FailOnObjectName("test-pod", ErrInjected), + }, + wantErr: true, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + t.Parallel() + + baseClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(pod.DeepCopy()). + Build() + + fakeClient := NewFakeClientWithFailures(baseClient, tc.config) + + patch := client.MergeFrom(pod.DeepCopy()) + err := fakeClient.Patch(context.Background(), pod, patch) + + if (err != nil) != tc.wantErr { + t.Errorf("Patch() error = %v, wantErr %v", err, tc.wantErr) + } + }) + } +} + +func TestFakeClientWithFailures_DeleteAllOf(t *testing.T) { + t.Parallel() + + scheme := runtime.NewScheme() + _ = corev1.AddToScheme(scheme) + + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "default", + }, + } + + tests := map[string]struct { + config *FailureConfig + wantErr bool + }{ + "no failure - deleteAllOf succeeds": { + config: nil, + wantErr: false, + }, + "fail on deleteAllOf": { + config: &FailureConfig{ + OnDeleteAllOf: func(obj client.Object) error { + return ErrInjected + }, + }, + wantErr: true, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + t.Parallel() + + baseClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(pod). + Build() + + fakeClient := NewFakeClientWithFailures(baseClient, tc.config) + + err := fakeClient.DeleteAllOf(context.Background(), &corev1.Pod{}, client.InNamespace("default")) + + if (err != nil) != tc.wantErr { + t.Errorf("DeleteAllOf() error = %v, wantErr %v", err, tc.wantErr) + } + }) + } +} + +func TestFakeClientWithFailures_StatusPatch(t *testing.T) { + t.Parallel() + + scheme := runtime.NewScheme() + _ = corev1.AddToScheme(scheme) + + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "default", + }, + } + + tests := map[string]struct { + config *FailureConfig + wantErr bool + }{ + "no failure - status patch succeeds": { + config: nil, + wantErr: false, + }, + "fail on status patch": { + config: &FailureConfig{ + OnStatusPatch: FailOnObjectName("test-pod", ErrInjected), + }, + wantErr: true, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + t.Parallel() + + baseClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(pod.DeepCopy()). + WithStatusSubresource(&corev1.Pod{}). + Build() + + fakeClient := NewFakeClientWithFailures(baseClient, tc.config) + + patch := client.MergeFrom(pod.DeepCopy()) + err := fakeClient.Status().Patch(context.Background(), pod, patch) + + if (err != nil) != tc.wantErr { + t.Errorf("Status().Patch() error = %v, wantErr %v", err, tc.wantErr) + } + }) + } +} + +func TestHelperFunctions(t *testing.T) { + t.Parallel() + + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "default", + }, + } + + t.Run("FailOnObjectName - matching name", func(t *testing.T) { + t.Parallel() + + fn := FailOnObjectName("test-pod", ErrInjected) + err := fn(pod) + if err != ErrInjected { + t.Errorf("Expected ErrInjected, got %v", err) + } + }) + + t.Run("FailOnObjectName - different name", func(t *testing.T) { + t.Parallel() + + fn := FailOnObjectName("other-pod", ErrInjected) + err := fn(pod) + if err != nil { + t.Errorf("Expected no error, got %v", err) + } + }) + + t.Run("FailOnKeyName - matching name", func(t *testing.T) { + t.Parallel() + + fn := FailOnKeyName("test-pod", ErrInjected) + err := fn(client.ObjectKey{Name: "test-pod", Namespace: "default"}) + if err != ErrInjected { + t.Errorf("Expected ErrInjected, got %v", err) + } + }) + + t.Run("FailOnKeyName - different name", func(t *testing.T) { + t.Parallel() + + fn := FailOnKeyName("other-pod", ErrInjected) + err := fn(client.ObjectKey{Name: "test-pod", Namespace: "default"}) + if err != nil { + t.Errorf("Expected no error, got %v", err) + } + }) + + t.Run("FailOnNamespacedKeyName - matching name and namespace", func(t *testing.T) { + t.Parallel() + + fn := FailOnNamespacedKeyName("test-pod", "default", ErrInjected) + err := fn(client.ObjectKey{Name: "test-pod", Namespace: "default"}) + if err != ErrInjected { + t.Errorf("Expected ErrInjected, got %v", err) + } + }) + + t.Run("FailOnNamespacedKeyName - matching name but different namespace", func(t *testing.T) { + t.Parallel() + + fn := FailOnNamespacedKeyName("test-pod", "default", ErrInjected) + err := fn(client.ObjectKey{Name: "test-pod", Namespace: "kube-system"}) + if err != nil { + t.Errorf("Expected no error, got %v", err) + } + }) + + t.Run("FailOnNamespacedKeyName - different name but matching namespace", func(t *testing.T) { + t.Parallel() + + fn := FailOnNamespacedKeyName("test-pod", "default", ErrInjected) + err := fn(client.ObjectKey{Name: "other-pod", Namespace: "default"}) + if err != nil { + t.Errorf("Expected no error, got %v", err) + } + }) + + t.Run("FailOnNamespace - matching namespace", func(t *testing.T) { + t.Parallel() + + fn := FailOnNamespace("default", ErrInjected) + err := fn(pod) + if err != ErrInjected { + t.Errorf("Expected ErrInjected, got %v", err) + } + }) + + t.Run("FailOnNamespace - different namespace", func(t *testing.T) { + t.Parallel() + + fn := FailOnNamespace("other-ns", ErrInjected) + err := fn(pod) + if err != nil { + t.Errorf("Expected no error, got %v", err) + } + }) + + t.Run("FailKeyAfterNCalls", func(t *testing.T) { + t.Parallel() + + fn := FailKeyAfterNCalls(2, ErrInjected) + + // First call - should succeed + if err := fn(client.ObjectKey{Name: "test", Namespace: "default"}); err != nil { + t.Errorf("Call 1: expected no error, got %v", err) + } + + // Second call - should succeed + if err := fn(client.ObjectKey{Name: "test", Namespace: "default"}); err != nil { + t.Errorf("Call 2: expected no error, got %v", err) + } + + // Third call - should fail + if err := fn(client.ObjectKey{Name: "test", Namespace: "default"}); err != ErrInjected { + t.Errorf("Call 3: expected ErrInjected, got %v", err) + } + }) + + t.Run("FailObjAfterNCalls", func(t *testing.T) { + t.Parallel() + + fn := FailObjAfterNCalls(1, ErrPermissionError) + + // First call - should succeed + if err := fn(pod); err != nil { + t.Errorf("Call 1: expected no error, got %v", err) + } + + // Second call - should fail + if err := fn(pod); err != ErrPermissionError { + t.Errorf("Call 2: expected ErrPermissionError, got %v", err) + } + }) + + t.Run("FailObjListAfterNCalls", func(t *testing.T) { + t.Parallel() + + fn := FailObjListAfterNCalls(1, ErrNetworkTimeout) + podList := &corev1.PodList{} + + // First call - should succeed + if err := fn(podList); err != nil { + t.Errorf("Call 1: expected no error, got %v", err) + } + + // Second call - should fail + if err := fn(podList); err != ErrNetworkTimeout { + t.Errorf("Call 2: expected ErrNetworkTimeout, got %v", err) + } + }) + + t.Run("AlwaysFail with object", func(t *testing.T) { + t.Parallel() + + fn := AlwaysFail(ErrInjected) + err := fn(pod) + if err != ErrInjected { + t.Errorf("Expected ErrInjected, got %v", err) + } + }) + + t.Run("AlwaysFail with key", func(t *testing.T) { + t.Parallel() + + fn := AlwaysFail(ErrNetworkTimeout) + err := fn(client.ObjectKey{Name: "test", Namespace: "default"}) + if err != ErrNetworkTimeout { + t.Errorf("Expected ErrNetworkTimeout, got %v", err) + } + }) +} + +func TestHelperFunctions_Panic(t *testing.T) { + t.Parallel() + + t.Run("FailOnObjectName - panics on nil object", func(t *testing.T) { + t.Parallel() + + defer func() { + if r := recover(); r == nil { + t.Errorf("Expected panic when meta.Accessor fails on nil") + } + }() + + fn := FailOnObjectName("test", ErrInjected) + _ = fn(nil) // Should panic + }) + + t.Run("FailOnNamespace - panics on nil object", func(t *testing.T) { + t.Parallel() + + defer func() { + if r := recover(); r == nil { + t.Errorf("Expected panic when meta.Accessor fails on nil") + } + }() + + fn := FailOnNamespace("default", ErrInjected) + _ = fn(nil) // Should panic + }) +} diff --git a/pkg/resource-handler/go.mod b/pkg/resource-handler/go.mod index 012c27ad..34835b4f 100644 --- a/pkg/resource-handler/go.mod +++ b/pkg/resource-handler/go.mod @@ -4,26 +4,63 @@ go 1.25.0 require ( github.com/google/go-cmp v0.7.0 + github.com/numtide/multigres-operator/api v0.0.0-20251010210109-0981dc938d36 k8s.io/api v0.34.1 + k8s.io/apimachinery v0.34.1 + sigs.k8s.io/controller-runtime v0.22.3 ) require ( + github.com/beorn7/perks v1.0.1 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/emicklei/go-restful/v3 v3.12.2 // indirect + github.com/evanphx/json-patch/v5 v5.9.11 // indirect + github.com/fsnotify/fsnotify v1.9.0 // indirect github.com/fxamacker/cbor/v2 v2.9.0 // indirect github.com/go-logr/logr v1.4.2 // indirect + github.com/go-openapi/jsonpointer v0.21.0 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/swag v0.23.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect + github.com/google/btree v1.1.3 // indirect + github.com/google/gnostic-models v0.7.0 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/kr/text v0.2.0 // indirect + github.com/mailru/easyjson v0.7.7 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/prometheus/client_golang v1.22.0 // indirect + github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/common v0.62.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect + github.com/spf13/pflag v1.0.6 // indirect github.com/x448/float16 v0.8.4 // indirect go.yaml.in/yaml/v2 v2.4.2 // indirect + go.yaml.in/yaml/v3 v3.0.4 // indirect golang.org/x/net v0.38.0 // indirect + golang.org/x/oauth2 v0.27.0 // indirect + golang.org/x/sync v0.12.0 // indirect + golang.org/x/sys v0.31.0 // indirect + golang.org/x/term v0.30.0 // indirect golang.org/x/text v0.23.0 // indirect + golang.org/x/time v0.9.0 // indirect + gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect + google.golang.org/protobuf v1.36.5 // indirect + gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect - k8s.io/apimachinery v0.34.1 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/apiextensions-apiserver v0.34.1 // indirect + k8s.io/client-go v0.34.1 // indirect k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b // indirect k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 // indirect sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect sigs.k8s.io/randfill v1.0.0 // indirect sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect + sigs.k8s.io/yaml v1.6.0 // indirect ) diff --git a/pkg/resource-handler/go.sum b/pkg/resource-handler/go.sum index df4088f6..4ee119d3 100644 --- a/pkg/resource-handler/go.sum +++ b/pkg/resource-handler/go.sum @@ -1,46 +1,124 @@ +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= +github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/evanphx/json-patch v0.5.2 h1:xVCHIVMUu1wtM/VkR9jVZ45N3FhZfYMMYGorLCR8P3k= +github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= +github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU= +github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= +github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= +github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo= +github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo= +github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8= github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/numtide/multigres-operator/api v0.0.0-20251010210109-0981dc938d36 h1:OJyNexfsX8SjGCG2JJBzNT8zThATaiu/MClD9TvCCdw= +github.com/numtide/multigres-operator/api v0.0.0-20251010210109-0981dc938d36/go.mod h1:zvjoyJ/6V8IOFLCcsJ1PwixfYPfpgtHUrPd9+N0/nI8= +github.com/onsi/ginkgo/v2 v2.22.0 h1:Yed107/8DjTr0lKCNt7Dn8yQ6ybuDRQoMGrNFKzMfHg= +github.com/onsi/ginkgo/v2 v2.22.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= +github.com/onsi/gomega v1.36.1 h1:bJDPBO7ibjxcbHMgSCoo4Yj18UWbKDlLwX1x9sybDcw= +github.com/onsi/gomega v1.36.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= +github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -52,39 +130,66 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= +golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M= +golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= +golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= +golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= +golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= +golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= +golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ= +golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= +gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= +google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= +google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= +gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= k8s.io/api v0.34.1 h1:jC+153630BMdlFukegoEL8E/yT7aLyQkIVuwhmwDgJM= k8s.io/api v0.34.1/go.mod h1:SB80FxFtXn5/gwzCoN6QCtPD7Vbu5w2n1S0J5gFfTYk= +k8s.io/apiextensions-apiserver v0.34.1 h1:NNPBva8FNAPt1iSVwIE0FsdrVriRXMsaWFMqJbII2CI= +k8s.io/apiextensions-apiserver v0.34.1/go.mod h1:hP9Rld3zF5Ay2Of3BeEpLAToP+l4s5UlxiHfqRaRcMc= k8s.io/apimachinery v0.34.1 h1:dTlxFls/eikpJxmAC7MVE8oOeP1zryV7iRyIjB0gky4= k8s.io/apimachinery v0.34.1/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw= +k8s.io/client-go v0.34.1 h1:ZUPJKgXsnKwVwmKKdPfw4tB58+7/Ik3CrjOEhsiZ7mY= +k8s.io/client-go v0.34.1/go.mod h1:kA8v0FP+tk6sZA0yKLRG67LWjqufAoSHA2xVGKw9Of8= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA= +k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts= k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y= k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/controller-runtime v0.22.3 h1:I7mfqz/a/WdmDCEnXmSPm8/b/yRTy6JsKKENTijTq8Y= +sigs.k8s.io/controller-runtime v0.22.3/go.mod h1:+QX1XUpTXN4mLoblf4tqr5CQcyHPAki2HLXqQMY6vh8= sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=