Skip to content

Commit

Permalink
Limit node repair based by nodepool
Browse files Browse the repository at this point in the history
  • Loading branch information
engedaam committed Nov 22, 2024
1 parent c221747 commit 9059ee9
Show file tree
Hide file tree
Showing 2 changed files with 99 additions and 0 deletions.
44 changes: 44 additions & 0 deletions pkg/controllers/node/health/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,10 +18,12 @@ package health

import (
"context"
"fmt"
"time"

"github.com/samber/lo"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/klog/v2"
"k8s.io/utils/clock"
controllerruntime "sigs.k8s.io/controller-runtime"
Expand Down Expand Up @@ -70,6 +72,20 @@ func (c *Controller) Reconcile(ctx context.Context, node *corev1.Node) (reconcil
return reconcile.Result{}, nodeutils.IgnoreNodeClaimNotFoundError(err)
}

nodePoolName, found := nodeClaim.Labels[v1.NodePoolLabelKey]
if !found {
return reconcile.Result{}, fmt.Errorf("nodepool cannot be found from node")
}

nodePoolHealthy, err := c.isNodePoolHealthy(ctx, nodePoolName)
if err != nil {
return reconcile.Result{}, client.IgnoreNotFound(err)
}
if !nodePoolHealthy {
log.FromContext(ctx).WithValues("nodepool", nodeClaim.Labels[v1.NodePoolLabelKey]).V(1).Info("nodepool is unhealthy")
return reconcile.Result{RequeueAfter: time.Minute}, nil
}

unhealthyNodeCondition, policyTerminationDuration := c.findUnhealthyConditions(node)
if unhealthyNodeCondition == nil {
return reconcile.Result{}, nil
Expand Down Expand Up @@ -130,3 +146,31 @@ func (c *Controller) annotateTerminationGracePeriod(ctx context.Context, nodeCla

return nil
}

// isNodePoolHealthy checks if the number of unhealthy nodes managed by the given NodePool exceeds the health threshold.
// defined by the cloud provider
// Up to 20% of Nodes may be unhealthy before the NodePool becomes unhealthy (or the nearest whole number, rounding up).
// For example, given a NodePool with three nodes, one may be unhealthy without rendering the NodePool unhealthy, even though that's 33% of the total nodes.
// This is analogous to how minAvailable and maxUnavailable work for PodDisruptionBudgets: https://kubernetes.io/docs/tasks/run-application/configure-pdb/#rounding-logic-when-specifying-percentages.
func (c *Controller) isNodePoolHealthy(ctx context.Context, nodePoolName string) (bool, error) {
nodeList := &corev1.NodeList{}
if err := c.kubeClient.List(ctx, nodeList, client.MatchingLabels(map[string]string{v1.NodePoolLabelKey: nodePoolName})); err != nil {
return false, err
}

for _, policy := range c.cloudProvider.RepairPolicies() {
unhealthyNodeCount := lo.CountBy(nodeList.Items, func(node corev1.Node) bool {
nodeCondition := nodeutils.GetCondition(lo.ToPtr(node), policy.ConditionType)
return nodeCondition.Status == policy.ConditionStatus
})

threshold, err := intstr.GetScaledValueFromIntOrPercent(lo.ToPtr(intstr.FromString("20%")), len(nodeList.Items), true)
if err != nil {
return false, err
}
if unhealthyNodeCount > threshold {
return false, nil
}
}
return true, nil
}
55 changes: 55 additions & 0 deletions pkg/controllers/node/health/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -274,6 +274,61 @@ var _ = Describe("Node Health", func() {
nodeClaim = ExpectExists(ctx, env.Client, nodeClaim)
Expect(nodeClaim.DeletionTimestamp).ToNot(BeNil())
})
It("should ignore unhealthy nodes if more then 20% of the nodes are unhealthy", func() {
ExpectApplied(ctx, env.Client, nodePool)
nodeClaims := []*v1.NodeClaim{}
nodes := []*corev1.Node{}
for i := range 10 {
nodeClaim, node = test.NodeClaimAndNode(v1.NodeClaim{ObjectMeta: metav1.ObjectMeta{Finalizers: []string{v1.TerminationFinalizer}}})
if i < 3 {
node.Status.Conditions = append(node.Status.Conditions, corev1.NodeCondition{
Type: "BadNode",
Status: corev1.ConditionFalse,
LastTransitionTime: metav1.Time{Time: fakeClock.Now()},
})
}
node.Labels[v1.NodePoolLabelKey] = nodePool.Name
nodeClaim.Labels[v1.NodePoolLabelKey] = nodePool.Name
nodeClaims = append(nodeClaims, nodeClaim)
nodes = append(nodes, node)
ExpectApplied(ctx, env.Client, nodePool, nodeClaim, node)
}

fakeClock.Step(60 * time.Minute)

// Determine to delete unhealthy nodes
for i := range 4 {
res := ExpectObjectReconciled(ctx, env.Client, healthController, nodes[i])
nodeClaim = ExpectExists(ctx, env.Client, nodeClaims[i])
Expect(nodeClaim.DeletionTimestamp).To(BeNil())
Expect(res.RequeueAfter).To(BeNumerically("~", time.Minute*1, time.Second))
}
})
It("should consider a NodePool healthy when a single Node is unhealthy and the threshold is exceeded", func() {
nodeClaims := []*v1.NodeClaim{}
nodes := []*corev1.Node{}
for i := range 3 {
nodeClaim, node = test.NodeClaimAndNode(v1.NodeClaim{ObjectMeta: metav1.ObjectMeta{Finalizers: []string{v1.TerminationFinalizer}}})
if i == 0 {
node.Status.Conditions = append(node.Status.Conditions, corev1.NodeCondition{
Type: "BadNode",
Status: corev1.ConditionFalse,
LastTransitionTime: metav1.Time{Time: fakeClock.Now()},
})
}
node.Labels[v1.NodePoolLabelKey] = nodePool.Name
nodeClaim.Labels[v1.NodePoolLabelKey] = nodePool.Name
nodeClaims = append(nodeClaims, nodeClaim)
nodes = append(nodes, node)
ExpectApplied(ctx, env.Client, nodePool, nodeClaim, node)
}

fakeClock.Step(60 * time.Minute)
// Determine to delete unhealthy nodes
ExpectObjectReconciled(ctx, env.Client, healthController, nodes[0])
nodeClaim = ExpectExists(ctx, env.Client, nodeClaims[0])
Expect(nodeClaim.DeletionTimestamp).ToNot(BeNil())
})
})
Context("Metrics", func() {
It("should fire a karpenter_nodeclaims_disrupted_total metric when unhealthy", func() {
Expand Down

0 comments on commit 9059ee9

Please sign in to comment.