Skip to content

Do not remove healthy nodes from partially failing zero-or-max-scaling node groups #8291

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions cluster-autoscaler/config/autoscaling_options.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,8 @@ type NodeGroupAutoscalingOptions struct {
MaxNodeProvisionTime time.Duration
// ZeroOrMaxNodeScaling means that a node group should be scaled up to maximum size or down to zero nodes all at once instead of one-by-one.
ZeroOrMaxNodeScaling bool
// KeepPartiallyFailedZeroOrMaxScalingNodeGroups indicates that partially failing ZeroOrMaxNodeScaling node groups should not be removed
KeepPartiallyFailedZeroOrMaxScalingNodeGroups bool
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Maybe DisableAtomicZeroOrMaxNodeScaling? We are effectively removing the atomic part of the logic.

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Not fully. We still scale down atomically, regardless if there were failed nodes during scale-up or not. So maybe AllowNonAtomicScaleUpToMax?

// IgnoreDaemonSetsUtilization sets if daemonsets utilization should be considered during node scale-down
IgnoreDaemonSetsUtilization bool
}
Expand Down
58 changes: 35 additions & 23 deletions cluster-autoscaler/core/static_autoscaler.go
Original file line number Diff line number Diff line change
Expand Up @@ -801,29 +801,31 @@ func (a *StaticAutoscaler) removeOldUnregisteredNodes(allUnregisteredNodes []clu
continue
}

if a.ForceDeleteLongUnregisteredNodes {
err = nodeGroup.ForceDeleteNodes(nodesToDelete)
if err == cloudprovider.ErrNotImplemented {
if len(nodesToDelete) > 0 {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

You can return on len(nodesToDelete) == 0 to remove nesting.

if a.ForceDeleteLongUnregisteredNodes {
err = nodeGroup.ForceDeleteNodes(nodesToDelete)
if err == cloudprovider.ErrNotImplemented {
err = nodeGroup.DeleteNodes(nodesToDelete)
}
} else {
err = nodeGroup.DeleteNodes(nodesToDelete)
}
} else {
err = nodeGroup.DeleteNodes(nodesToDelete)
}
csr.InvalidateNodeInstancesCacheEntry(nodeGroup)
if err != nil {
klog.Warningf("Failed to remove %v unregistered nodes from node group %s: %v", len(nodesToDelete), nodeGroupId, err)
csr.InvalidateNodeInstancesCacheEntry(nodeGroup)
if err != nil {
klog.Warningf("Failed to remove %v unregistered nodes from node group %s: %v", len(nodesToDelete), nodeGroupId, err)
for _, node := range nodesToDelete {
logRecorder.Eventf(apiv1.EventTypeWarning, "DeleteUnregisteredFailed",
"Failed to remove node %s: %v", node.Name, err)
}
return removedAny, err
}
for _, node := range nodesToDelete {
logRecorder.Eventf(apiv1.EventTypeWarning, "DeleteUnregisteredFailed",
"Failed to remove node %s: %v", node.Name, err)
logRecorder.Eventf(apiv1.EventTypeNormal, "DeleteUnregistered",
"Removed unregistered node %v", node.Name)
}
return removedAny, err
}
for _, node := range nodesToDelete {
logRecorder.Eventf(apiv1.EventTypeNormal, "DeleteUnregistered",
"Removed unregistered node %v", node.Name)
metrics.RegisterOldUnregisteredNodesRemoved(len(nodesToDelete))
removedAny = true
}
metrics.RegisterOldUnregisteredNodesRemoved(len(nodesToDelete))
removedAny = true
}
return removedAny, nil
}
Expand Down Expand Up @@ -880,12 +882,14 @@ func (a *StaticAutoscaler) deleteCreatedNodesWithErrors() {
if nodeGroup == nil {
err = fmt.Errorf("node group %s not found", nodeGroupId)
} else if nodesToDelete, err = overrideNodesToDeleteForZeroOrMax(a.NodeGroupDefaults, nodeGroup, nodesToDelete); err == nil {
err = nodeGroup.DeleteNodes(nodesToDelete)
if len(nodesToDelete) > 0 {
err = nodeGroup.DeleteNodes(nodesToDelete)
}
}

if err != nil {
klog.Warningf("Error while trying to delete nodes from %v: %v", nodeGroupId, err)
} else {
} else if len(nodesToDelete) > 0 {
deletedAny = true
a.clusterStateRegistry.InvalidateNodeInstancesCacheEntry(nodeGroup)
}
Expand All @@ -898,21 +902,29 @@ func (a *StaticAutoscaler) deleteCreatedNodesWithErrors() {
}

// overrideNodesToDeleteForZeroOrMax returns a list of nodes to delete, taking into account that
// node deletion for a "ZeroOrMaxNodeScaling" node group is atomic and should delete all nodes.
// node deletion for a "ZeroOrMaxNodeScaling" should either keep or remove all the nodes.
// For a non-"ZeroOrMaxNodeScaling" node group it returns the unchanged list of nodes to delete.
func overrideNodesToDeleteForZeroOrMax(defaults config.NodeGroupAutoscalingOptions, nodeGroup cloudprovider.NodeGroup, nodesToDelete []*apiv1.Node) ([]*apiv1.Node, error) {
opts, err := nodeGroup.GetOptions(defaults)
if err != nil && err != cloudprovider.ErrNotImplemented {
return []*apiv1.Node{}, fmt.Errorf("Failed to get node group options for %s: %s", nodeGroup.Id(), err)
}
// If a scale-up of "ZeroOrMaxNodeScaling" node group failed, the cleanup
// should stick to the all-or-nothing principle. Deleting all nodes.
// node deletion for a "ZeroOrMaxNodeScaling" node group is atomic and should delete all nodes or none.
if opts != nil && opts.ZeroOrMaxNodeScaling {
instances, err := nodeGroup.Nodes()
if err != nil {
return []*apiv1.Node{}, fmt.Errorf("Failed to fill in nodes to delete from group %s based on ZeroOrMaxNodeScaling option: %s", nodeGroup.Id(), err)
}
return instancesToFakeNodes(instances), nil

// Remove all nodes in case when either:
// 1. All nodes are failing
// 2. KeepPartiallyFailedZeroOrMaxScalingNodeGroups is false which means we want to atomically remove partially failed node groups
if len(instances) == len(nodesToDelete) || !opts.KeepPartiallyFailedZeroOrMaxScalingNodeGroups {
// Remove all nodes
return instancesToFakeNodes(instances), nil
}
return []*apiv1.Node{}, nil
}
// No override needed.
return nodesToDelete, nil
Expand Down
Loading
Loading