Skip to content

Commit

Permalink
feedback
Browse files Browse the repository at this point in the history
  • Loading branch information
jmdeal committed Nov 23, 2024
1 parent ad7c521 commit db1400e
Show file tree
Hide file tree
Showing 37 changed files with 113 additions and 186 deletions.
4 changes: 2 additions & 2 deletions pkg/apis/v1/nodepool_validation_cel_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -623,12 +623,12 @@ var _ = Describe("CEL/Validation", func() {
Context("NodeClassRef", func() {
It("should fail to mutate group", func() {
Expect(env.Client.Create(ctx, nodePool)).To(Succeed())
nodePool.Spec.Template.Spec.NodeClassRef.Group = "karpenter.k8s.aws"
nodePool.Spec.Template.Spec.NodeClassRef.Group = "karpenter.test.mutated.sh"
Expect(env.Client.Create(ctx, nodePool)).ToNot(Succeed())
})
It("should fail to mutate kind", func() {
Expect(env.Client.Create(ctx, nodePool)).To(Succeed())
nodePool.Spec.Template.Spec.NodeClassRef.Group = "EC2NodeClass"
nodePool.Spec.Template.Spec.NodeClassRef.Group = "TestNodeClass2"
Expect(env.Client.Create(ctx, nodePool)).ToNot(Succeed())
})
It("should fail if group is unset", func() {
Expand Down
2 changes: 1 addition & 1 deletion pkg/controllers/disruption/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -277,7 +277,7 @@ func (c *Controller) logAbnormalRuns(ctx context.Context) {

// logInvalidBudgets will log if there are any invalid schedules detected
func (c *Controller) logInvalidBudgets(ctx context.Context) {
nps, err := nodepool.List(ctx, c.kubeClient, nodepool.WithManagedFilter(c.cloudProvider))
nps, err := nodepool.ListManaged(ctx, c.kubeClient, c.cloudProvider)
if err != nil {
log.FromContext(ctx).Error(err, "failed listing nodepools")
return
Expand Down
4 changes: 2 additions & 2 deletions pkg/controllers/disruption/helpers.go
Original file line number Diff line number Diff line change
Expand Up @@ -163,7 +163,7 @@ func GetCandidates(ctx context.Context, cluster *state.Cluster, kubeClient clien
// BuildNodePoolMap builds a provName -> nodePool map and a provName -> instanceName -> instance type map
func BuildNodePoolMap(ctx context.Context, kubeClient client.Client, cloudProvider cloudprovider.CloudProvider) (map[string]*v1.NodePool, map[string]map[string]*cloudprovider.InstanceType, error) {
nodePoolMap := map[string]*v1.NodePool{}
nodePools, err := nodepoolutils.List(ctx, kubeClient, nodepoolutils.WithManagedFilter(cloudProvider))
nodePools, err := nodepoolutils.ListManaged(ctx, kubeClient, cloudProvider)
if err != nil {
return nil, nil, fmt.Errorf("listing node pools, %w", err)
}
Expand Down Expand Up @@ -227,7 +227,7 @@ func BuildDisruptionBudgetMapping(ctx context.Context, cluster *state.Cluster, c
disrupting[nodePool]++
}
}
nodePools, err := nodepoolutils.List(ctx, kubeClient, nodepoolutils.WithManagedFilter(cloudProvider))
nodePools, err := nodepoolutils.ListManaged(ctx, kubeClient, cloudProvider)
if err != nil {
return disruptionBudgetMapping, fmt.Errorf("listing node pools, %w", err)
}
Expand Down
4 changes: 2 additions & 2 deletions pkg/controllers/metrics/nodepool/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -86,8 +86,8 @@ var _ = Describe("Metrics", func() {
nodePool.Spec.Limits = limits
if !isNodePoolManaged {
nodePool.Spec.Template.Spec.NodeClassRef = &v1.NodeClassReference{
Group: "karpenter.k8s.aws",
Kind: "EC2NodeClass",
Group: "karpenter.test.sh",
Kind: "UnmanagedNodeClass",
Name: "default",
}
}
Expand Down
7 changes: 3 additions & 4 deletions pkg/controllers/node/termination/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,6 @@ import (
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"

nodeclaimutils "sigs.k8s.io/karpenter/pkg/utils/nodeclaim"
"sigs.k8s.io/karpenter/pkg/utils/pretty"

v1 "sigs.k8s.io/karpenter/pkg/apis/v1"
Expand Down Expand Up @@ -93,7 +92,7 @@ func (c *Controller) finalize(ctx context.Context, node *corev1.Node) (reconcile
return reconcile.Result{}, nil
}

nodeClaims, err := nodeutils.GetNodeClaims(ctx, node, c.kubeClient, nodeclaimutils.WithManagedFilter(c.cloudProvider))
nodeClaims, err := nodeutils.GetNodeClaims(ctx, c.kubeClient, node)
if err != nil {
return reconcile.Result{}, fmt.Errorf("listing nodeclaims, %w", err)
}
Expand Down Expand Up @@ -149,7 +148,7 @@ func (c *Controller) finalize(ctx context.Context, node *corev1.Node) (reconcile
return reconcile.Result{RequeueAfter: 1 * time.Second}, nil
}
}
nodeClaims, err = nodeutils.GetNodeClaims(ctx, node, c.kubeClient, nodeclaimutils.WithManagedFilter(c.cloudProvider))
nodeClaims, err = nodeutils.GetNodeClaims(ctx, c.kubeClient, node)
if err != nil {
return reconcile.Result{}, fmt.Errorf("deleting nodeclaims, %w", err)
}
Expand Down Expand Up @@ -289,7 +288,7 @@ func (c *Controller) nodeTerminationTime(node *corev1.Node, nodeClaims ...*v1.No
func (c *Controller) Register(_ context.Context, m manager.Manager) error {
return controllerruntime.NewControllerManagedBy(m).
Named("node.termination").
For(&corev1.Node{}, builder.WithPredicates(nodeutils.IsManagedPredicates(c.cloudProvider))).
For(&corev1.Node{}, builder.WithPredicates(nodeutils.IsManagedPredicateFuncs(c.cloudProvider))).
WithOptions(
controller.Options{
RateLimiter: workqueue.NewTypedMaxOfRateLimiter[reconcile.Request](
Expand Down
2 changes: 1 addition & 1 deletion pkg/controllers/node/termination/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ var _ = Describe("Termination", func() {
})
It("should ignore nodes not managed by this Karpenter instance", func() {
delete(node.Labels, "karpenter.test.sh/testnodeclass")
node.Labels = lo.Assign(node.Labels, map[string]string{"karpenter.k8s.aws/ec2nodeclass": "default"})
node.Labels = lo.Assign(node.Labels, map[string]string{"karpenter.test.sh/unmanagednodeclass": "default"})
ExpectApplied(ctx, env.Client, node)
Expect(env.Client.Delete(ctx, node)).To(Succeed())
node = ExpectNodeExists(ctx, env.Client, node.Name)
Expand Down
4 changes: 2 additions & 2 deletions pkg/controllers/nodeclaim/consistency/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -147,10 +147,10 @@ func (c *Controller) checkConsistency(ctx context.Context, nodeClaim *v1.NodeCla
func (c *Controller) Register(_ context.Context, m manager.Manager) error {
return controllerruntime.NewControllerManagedBy(m).
Named("nodeclaim.consistency").
For(&v1.NodeClaim{}, builder.WithPredicates(nodeclaimutil.IsManagedPredicates(c.cloudProvider))).
For(&v1.NodeClaim{}, builder.WithPredicates(nodeclaimutil.IsManagedPredicateFuncs(c.cloudProvider))).
Watches(
&corev1.Node{},
nodeclaimutil.NodeEventHandler(c.kubeClient, nodeclaimutil.WithManagedFilter(c.cloudProvider)),
nodeclaimutil.NodeEventHandler(c.kubeClient, c.cloudProvider),
).
WithOptions(controller.Options{MaxConcurrentReconciles: 10}).
Complete(reconcile.AsReconciler(m.GetClient(), c))
Expand Down
8 changes: 4 additions & 4 deletions pkg/controllers/nodeclaim/consistency/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -112,8 +112,8 @@ var _ = Describe("NodeClaimController", func() {
nodeClaimOpts = append(nodeClaimOpts, v1.NodeClaim{
Spec: v1.NodeClaimSpec{
NodeClassRef: &v1.NodeClassReference{
Group: "karpenter.k8s.aws",
Kind: "EC2NodeClass",
Group: "karpenter.test.sh",
Kind: "UnmanagedNodeClass",
Name: "default",
},
},
Expand Down Expand Up @@ -178,8 +178,8 @@ var _ = Describe("NodeClaimController", func() {
nodeClaimOpts = append(nodeClaimOpts, v1.NodeClaim{
Spec: v1.NodeClaimSpec{
NodeClassRef: &v1.NodeClassReference{
Group: "karpenter.k8s.aws",
Kind: "EC2NodeClass",
Group: "karpenter.test.sh",
Kind: "UnmanagedNodeClass",
Name: "default",
},
},
Expand Down
4 changes: 2 additions & 2 deletions pkg/controllers/nodeclaim/disruption/consolidation_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -62,8 +62,8 @@ var _ = Describe("Underutilized", func() {
},
Spec: v1.NodeClaimSpec{
NodeClassRef: &v1.NodeClassReference{
Group: "karpenter.k8s.aws",
Kind: "EC2NodeClass",
Group: "karpenter.test.sh",
Kind: "UnmanagedNodeClass",
Name: "default",
},
},
Expand Down
22 changes: 7 additions & 15 deletions pkg/controllers/nodeclaim/disruption/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ import (
v1 "sigs.k8s.io/karpenter/pkg/apis/v1"
"sigs.k8s.io/karpenter/pkg/cloudprovider"
"sigs.k8s.io/karpenter/pkg/operator/injection"
nodeclaimutil "sigs.k8s.io/karpenter/pkg/utils/nodeclaim"
nodeclaimutils "sigs.k8s.io/karpenter/pkg/utils/nodeclaim"
"sigs.k8s.io/karpenter/pkg/utils/result"
)

Expand Down Expand Up @@ -68,7 +68,7 @@ func NewController(clk clock.Clock, kubeClient client.Client, cloudProvider clou
func (c *Controller) Reconcile(ctx context.Context, nodeClaim *v1.NodeClaim) (reconcile.Result, error) {
ctx = injection.WithControllerName(ctx, "nodeclaim.disruption")

if !nodeclaimutil.IsManaged(nodeClaim, c.cloudProvider) {
if !nodeclaimutils.IsManaged(nodeClaim, c.cloudProvider) {
return reconcile.Result{}, nil
}
if !nodeClaim.DeletionTimestamp.IsZero() {
Expand Down Expand Up @@ -115,21 +115,13 @@ func (c *Controller) Reconcile(ctx context.Context, nodeClaim *v1.NodeClaim) (re
func (c *Controller) Register(_ context.Context, m manager.Manager) error {
b := controllerruntime.NewControllerManagedBy(m).
Named("nodeclaim.disruption").
For(&v1.NodeClaim{}, builder.WithPredicates(nodeclaimutil.IsManagedPredicates(c.cloudProvider))).
For(&v1.NodeClaim{}, builder.WithPredicates(nodeclaimutils.IsManagedPredicateFuncs(c.cloudProvider))).
WithOptions(controller.Options{MaxConcurrentReconciles: 10}).
// Note: We don't use the ManagedFilter (NodeClaim) for NodePool updates because drift should be captured when
// updating a NodePool's NodeClassRef to an unsupported NodeClass. However, this is currently unsupported
// (enforced via CEL validation on the NodeClassRef).
Watches(
&v1.NodePool{},
nodeclaimutil.NodePoolEventHandler(c.kubeClient),
).
Watches(
&corev1.Pod{},
nodeclaimutil.PodEventHandler(c.kubeClient, nodeclaimutil.WithManagedFilter(c.cloudProvider)),
)
Watches(&v1.NodePool{}, nodeclaimutils.NodePoolEventHandler(c.kubeClient, c.cloudProvider)).
Watches(&corev1.Pod{}, nodeclaimutils.PodEventHandler(c.kubeClient, c.cloudProvider))

for _, nodeClass := range c.cloudProvider.GetSupportedNodeClasses() {
b.Watches(nodeClass, nodeclaimutil.NodeClassEventHandler(c.kubeClient))
b.Watches(nodeClass, nodeclaimutils.NodeClassEventHandler(c.kubeClient))
}
return b.Complete(reconcile.AsReconciler(m.GetClient(), c))
}
4 changes: 2 additions & 2 deletions pkg/controllers/nodeclaim/disruption/drift_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -64,8 +64,8 @@ var _ = Describe("Drift", func() {
cp.Drifted = "drifted"
if !isNodeClaimManaged {
nodeClaim.Spec.NodeClassRef = &v1.NodeClassReference{
Group: "karpenter.k8s.aws",
Kind: "EC2NodeClass",
Group: "karpenter.test.sh",
Kind: "UnmanagedNodeClass",
Name: "default",
}
}
Expand Down
2 changes: 1 addition & 1 deletion pkg/controllers/nodeclaim/expiration/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,6 @@ func (c *Controller) Reconcile(ctx context.Context, nodeClaim *v1.NodeClaim) (re
func (c *Controller) Register(_ context.Context, m manager.Manager) error {
return controllerruntime.NewControllerManagedBy(m).
Named("nodeclaim.expiration").
For(&v1.NodeClaim{}, builder.WithPredicates(nodeclaimutils.IsManagedPredicates(c.cloudProvider))).
For(&v1.NodeClaim{}, builder.WithPredicates(nodeclaimutils.IsManagedPredicateFuncs(c.cloudProvider))).
Complete(reconcile.AsReconciler(m.GetClient(), c))
}
4 changes: 2 additions & 2 deletions pkg/controllers/nodeclaim/expiration/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -124,8 +124,8 @@ var _ = Describe("Expiration", func() {
nodeClaim.Spec.ExpireAfter = v1.MustParseNillableDuration("30s")
if !isNodeClaimManaged {
nodeClaim.Spec.NodeClassRef = &v1.NodeClassReference{
Group: "karpenter.k8s.aws",
Kind: "EC2NodeClass",
Group: "karpenter.test.sh",
Kind: "UnmanagedNodeClass",
Name: "default",
}
}
Expand Down
2 changes: 1 addition & 1 deletion pkg/controllers/nodeclaim/garbagecollection/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ func NewController(c clock.Clock, kubeClient client.Client, cloudProvider cloudp
func (c *Controller) Reconcile(ctx context.Context) (reconcile.Result, error) {
ctx = injection.WithControllerName(ctx, "nodeclaim.garbagecollection")

nodeClaims, err := nodeclaimutils.List(ctx, c.kubeClient, nodeclaimutils.WithManagedFilter(c.cloudProvider))
nodeClaims, err := nodeclaimutils.ListManaged(ctx, c.kubeClient, c.cloudProvider)
if err != nil {
return reconcile.Result{}, err
}
Expand Down
4 changes: 2 additions & 2 deletions pkg/controllers/nodeclaim/lifecycle/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -87,10 +87,10 @@ func NewController(clk clock.Clock, kubeClient client.Client, cloudProvider clou
func (c *Controller) Register(_ context.Context, m manager.Manager) error {
return controllerruntime.NewControllerManagedBy(m).
Named(c.Name()).
For(&v1.NodeClaim{}, builder.WithPredicates(nodeclaimutils.IsManagedPredicates(c.cloudProvider))).
For(&v1.NodeClaim{}, builder.WithPredicates(nodeclaimutils.IsManagedPredicateFuncs(c.cloudProvider))).
Watches(
&corev1.Node{},
nodeclaimutils.NodeEventHandler(c.kubeClient, nodeclaimutils.WithManagedFilter(c.cloudProvider)),
nodeclaimutils.NodeEventHandler(c.kubeClient, c.cloudProvider),
).
WithOptions(controller.Options{
RateLimiter: workqueue.NewTypedMaxOfRateLimiter[reconcile.Request](
Expand Down
4 changes: 2 additions & 2 deletions pkg/controllers/nodeclaim/lifecycle/initialization_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -59,8 +59,8 @@ var _ = Describe("Initialization", func() {
nodeClaimOpts = append(nodeClaimOpts, v1.NodeClaim{
Spec: v1.NodeClaimSpec{
NodeClassRef: &v1.NodeClassReference{
Group: "karpenter.k8s.aws",
Kind: "EC2NodeClass",
Group: "karpenter.test.sh",
Kind: "UnmanagedNodeClass",
Name: "default",
},
},
Expand Down
4 changes: 2 additions & 2 deletions pkg/controllers/nodeclaim/lifecycle/launch_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -49,8 +49,8 @@ var _ = Describe("Launch", func() {
nodeClaimOpts = append(nodeClaimOpts, v1.NodeClaim{
Spec: v1.NodeClaimSpec{
NodeClassRef: &v1.NodeClassReference{
Group: "karpenter.k8s.aws",
Kind: "EC2NodeClass",
Group: "karpenter.test.sh",
Kind: "UnmanagedNodeClass",
Name: "default",
},
},
Expand Down
4 changes: 2 additions & 2 deletions pkg/controllers/nodeclaim/lifecycle/liveness_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -60,8 +60,8 @@ var _ = Describe("Liveness", func() {
nodeClaimOpts = append(nodeClaimOpts, v1.NodeClaim{
Spec: v1.NodeClaimSpec{
NodeClassRef: &v1.NodeClassReference{
Group: "karpenter.k8s.aws",
Kind: "EC2NodeClass",
Group: "karpenter.test.sh",
Kind: "UnmanagedNodeClass",
Name: "default",
},
},
Expand Down
4 changes: 2 additions & 2 deletions pkg/controllers/nodeclaim/lifecycle/registration_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,8 +46,8 @@ var _ = Describe("Registration", func() {
nodeClaimOpts = append(nodeClaimOpts, v1.NodeClaim{
Spec: v1.NodeClaimSpec{
NodeClassRef: &v1.NodeClassReference{
Group: "karpenter.k8s.aws",
Kind: "EC2NodeClass",
Group: "karpenter.test.sh",
Kind: "UnmanagedNodeClass",
Name: "default",
},
},
Expand Down
4 changes: 2 additions & 2 deletions pkg/controllers/nodeclaim/lifecycle/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -104,8 +104,8 @@ var _ = Describe("Finalizer", func() {
},
Spec: v1.NodeClaimSpec{
NodeClassRef: &v1.NodeClassReference{
Group: "karpenter.k8s.aws",
Kind: "EC2NodeClass",
Group: "karpenter.test.sh",
Kind: "UnmanagedNodeClass",
Name: "default",
},
},
Expand Down
4 changes: 2 additions & 2 deletions pkg/controllers/nodeclaim/lifecycle/termination_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -72,8 +72,8 @@ var _ = Describe("Termination", func() {
func(isNodeClaimManaged bool) {
if !isNodeClaimManaged {
nodeClaim.Spec.NodeClassRef = &v1.NodeClassReference{
Group: "karpenter.k8s.aws",
Kind: "EC2NodeClass",
Group: "karpenter.test.sh",
Kind: "UnmanagedNodeClass",
Name: "default",
}
}
Expand Down
8 changes: 5 additions & 3 deletions pkg/controllers/nodeclaim/podevents/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ import (

"sigs.k8s.io/karpenter/pkg/cloudprovider"
nodeutils "sigs.k8s.io/karpenter/pkg/utils/node"
"sigs.k8s.io/karpenter/pkg/utils/nodeclaim"
nodeclaimutils "sigs.k8s.io/karpenter/pkg/utils/nodeclaim"
podutils "sigs.k8s.io/karpenter/pkg/utils/pod"
)

Expand Down Expand Up @@ -71,13 +71,15 @@ func (c *Controller) Reconcile(ctx context.Context, pod *corev1.Pod) (reconcile.
if err := c.kubeClient.Get(ctx, types.NamespacedName{Name: pod.Spec.NodeName}, node); err != nil {
return reconcile.Result{}, client.IgnoreNotFound(fmt.Errorf("getting node, %w", err))
}

// If there's no associated node claim, it's not a karpenter owned node.
nc, err := nodeutils.NodeClaimForNode(ctx, c.kubeClient, node, nodeclaim.WithManagedFilter(c.cloudProvider))
nc, err := nodeutils.NodeClaimForNode(ctx, c.kubeClient, node)
if err != nil {
// if the nodeclaim doesn't exist, or has duplicates, ignore.
return reconcile.Result{}, nodeutils.IgnoreDuplicateNodeClaimError(nodeutils.IgnoreNodeClaimNotFoundError(fmt.Errorf("getting nodeclaims for node, %w", err)))
}
if !nodeclaimutils.IsManaged(nc, c.cloudProvider) {
return reconcile.Result{}, nil
}

stored := nc.DeepCopy()
// If we've set the lastPodEvent before and it hasn't been before the timeout, don't do anything
Expand Down
4 changes: 2 additions & 2 deletions pkg/controllers/nodepool/counter/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -120,8 +120,8 @@ var _ = Describe("Counter", func() {
It("should ignore NodePools which aren't managed by this instance of Karpenter", func() {
nodePool = test.NodePool(v1.NodePool{Spec: v1.NodePoolSpec{Template: v1.NodeClaimTemplate{Spec: v1.NodeClaimTemplateSpec{
NodeClassRef: &v1.NodeClassReference{
Group: "karpenter.k8s.aws",
Kind: "EC2NodeClass",
Group: "karpenter.test.sh",
Kind: "UnmanagedNodeClass",
Name: "default",
},
}}}})
Expand Down
2 changes: 1 addition & 1 deletion pkg/controllers/nodepool/hash/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ func (c *Controller) Register(_ context.Context, m manager.Manager) error {
// NodePool. Since, we cannot rely on the `nodepool-hash` on the NodeClaims, due to the breaking change, we will need to re-calculate the hash and update the annotation.
// For more information on the Drift Hash Versioning: https://github.com/kubernetes-sigs/karpenter/blob/main/designs/drift-hash-versioning.md
func (c *Controller) updateNodeClaimHash(ctx context.Context, np *v1.NodePool) error {
nodeClaims, err := nodeclaimutils.List(ctx, c.kubeClient, nodeclaimutils.WithManagedFilter(c.cloudProvider), nodeclaimutils.WithNodePoolFilter(np.Name))
nodeClaims, err := nodeclaimutils.ListManaged(ctx, c.kubeClient, c.cloudProvider, nodeclaimutils.ForNodePool(np.Name))
if err != nil {
return err
}
Expand Down
4 changes: 2 additions & 2 deletions pkg/controllers/nodepool/hash/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -96,8 +96,8 @@ var _ = Describe("Static Drift Hash", func() {
})
It("should ignore NodePools which aren't managed by this instance of Karpenter", func() {
nodePool.Spec.Template.Spec.NodeClassRef = &v1.NodeClassReference{
Group: "karpenter.k8s.aws",
Kind: "EC2NodeClass",
Group: "karpenter.test.sh",
Kind: "UnmanagedNodeClass",
Name: "default",
}
ExpectApplied(ctx, env.Client, nodePool)
Expand Down
9 changes: 3 additions & 6 deletions pkg/controllers/nodepool/readiness/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -60,17 +60,13 @@ func (c *Controller) Reconcile(ctx context.Context, nodePool *v1.NodePool) (reco
})
if !ok {
// Ignore NodePools which aren't using a supported NodeClass.
// Note: should be unreachable due to predicate in controller reconciliation
return reconcile.Result{}, nil
}

err := c.kubeClient.Get(ctx, client.ObjectKey{Name: nodePool.Spec.Template.Spec.NodeClassRef.Name}, nodeClass)
if err != nil && !errors.IsNotFound(err) {
if errors.IsNotFound(err) {
return reconcile.Result{}, nil
}
if client.IgnoreNotFound(err) != nil {
return reconcile.Result{}, err
}

switch {
case errors.IsNotFound(err):
nodePool.StatusConditions().SetFalse(v1.ConditionTypeNodeClassReady, "NodeClassNotFound", "NodeClass not found on cluster")
Expand All @@ -79,6 +75,7 @@ func (c *Controller) Reconcile(ctx context.Context, nodePool *v1.NodePool) (reco
default:
c.setReadyCondition(nodePool, nodeClass)
}

if !equality.Semantic.DeepEqual(stored, nodePool) {
// We use client.MergeFromWithOptimisticLock because patching a list with a JSON merge patch
// can cause races due to the fact that it fully replaces the list on a change
Expand Down
Loading

0 comments on commit db1400e

Please sign in to comment.