Skip to content

Commit

Permalink
perf: Cache taints for existing nodes (#1827)
Browse files Browse the repository at this point in the history
  • Loading branch information
jonathan-innis authored Nov 18, 2024
1 parent 8ce869c commit c221747
Show file tree
Hide file tree
Showing 2 changed files with 7 additions and 4 deletions.
6 changes: 4 additions & 2 deletions pkg/controllers/provisioning/scheduling/existingnode.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,14 +31,15 @@ import (
type ExistingNode struct {
*state.StateNode
cachedAvailable v1.ResourceList // Cache so we don't have to re-subtract resources on the StateNode every time
cachedTaints []v1.Taint // Cache so we don't hae to re-construct the taints each time we attempt to schedule a pod

Pods []*v1.Pod
topology *Topology
requests v1.ResourceList
requirements scheduling.Requirements
}

func NewExistingNode(n *state.StateNode, topology *Topology, daemonResources v1.ResourceList) *ExistingNode {
func NewExistingNode(n *state.StateNode, topology *Topology, taints []v1.Taint, daemonResources v1.ResourceList) *ExistingNode {
// The state node passed in here must be a deep copy from cluster state as we modify it
// the remaining daemonResources to schedule are the total daemonResources minus what has already scheduled
remainingDaemonResources := resources.Subtract(daemonResources, n.DaemonSetRequests())
Expand All @@ -54,6 +55,7 @@ func NewExistingNode(n *state.StateNode, topology *Topology, daemonResources v1.
node := &ExistingNode{
StateNode: n,
cachedAvailable: n.Available(),
cachedTaints: taints,
topology: topology,
requests: remainingDaemonResources,
requirements: scheduling.NewLabelRequirements(n.Labels()),
Expand All @@ -65,7 +67,7 @@ func NewExistingNode(n *state.StateNode, topology *Topology, daemonResources v1.

func (n *ExistingNode) Add(ctx context.Context, kubeClient client.Client, pod *v1.Pod, podRequests v1.ResourceList) error {
// Check Taints
if err := scheduling.Taints(n.Taints()).Tolerates(pod); err != nil {
if err := scheduling.Taints(n.cachedTaints).Tolerates(pod); err != nil {
return err
}
// determine the volumes that will be mounted if the pod schedules
Expand Down
5 changes: 3 additions & 2 deletions pkg/controllers/provisioning/scheduling/scheduler.go
Original file line number Diff line number Diff line change
Expand Up @@ -318,17 +318,18 @@ func (s *Scheduler) calculateExistingNodeClaims(stateNodes []*state.StateNode, d
// create our existing nodes
for _, node := range stateNodes {
// Calculate any daemonsets that should schedule to the inflight node
taints := node.Taints()
var daemons []*corev1.Pod
for _, p := range daemonSetPods {
if err := scheduling.Taints(node.Taints()).Tolerates(p); err != nil {
if err := scheduling.Taints(taints).Tolerates(p); err != nil {
continue
}
if err := scheduling.NewLabelRequirements(node.Labels()).Compatible(scheduling.NewPodRequirements(p)); err != nil {
continue
}
daemons = append(daemons, p)
}
s.existingNodes = append(s.existingNodes, NewExistingNode(node, s.topology, resources.RequestsForPods(daemons...)))
s.existingNodes = append(s.existingNodes, NewExistingNode(node, s.topology, taints, resources.RequestsForPods(daemons...)))

// We don't use the status field and instead recompute the remaining resources to ensure we have a consistent view
// of the cluster during scheduling. Depending on how node creation falls out, this will also work for cases where
Expand Down

0 comments on commit c221747

Please sign in to comment.