Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore: Remove usage of 'state node' in eventing, logging, and errors #1906

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion pkg/controllers/disruption/consolidation.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,8 @@ import (
"k8s.io/utils/clock"
"sigs.k8s.io/controller-runtime/pkg/client"

"sigs.k8s.io/karpenter/pkg/utils/pretty"

v1 "sigs.k8s.io/karpenter/pkg/apis/v1"
"sigs.k8s.io/karpenter/pkg/cloudprovider"
disruptionevents "sigs.k8s.io/karpenter/pkg/controllers/disruption/events"
Expand Down Expand Up @@ -144,7 +146,7 @@ func (c *consolidation) computeConsolidation(ctx context.Context, candidates ...
if !results.AllNonPendingPodsScheduled() {
// This method is used by multi-node consolidation as well, so we'll only report in the single node case
if len(candidates) == 1 {
c.recorder.Publish(disruptionevents.Unconsolidatable(candidates[0].Node, candidates[0].NodeClaim, results.NonPendingPodSchedulingErrors())...)
c.recorder.Publish(disruptionevents.Unconsolidatable(candidates[0].Node, candidates[0].NodeClaim, pretty.Sentence(results.NonPendingPodSchedulingErrors()))...)
}
return Command{}, pscheduling.Results{}, nil
}
Expand Down
2 changes: 1 addition & 1 deletion pkg/controllers/disruption/consolidation_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2748,7 +2748,7 @@ var _ = Describe("Consolidation", func() {
// Expect Unconsolidatable events to be fired
evts := recorder.Events()
_, ok := lo.Find(evts, func(e events.Event) bool {
return strings.Contains(e.Message, "not all pods would schedule")
return strings.Contains(e.Message, "Not all pods would schedule")
})
Expect(ok).To(BeTrue())
_, ok = lo.Find(evts, func(e events.Event) bool {
Expand Down
4 changes: 3 additions & 1 deletion pkg/controllers/disruption/drift.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,8 @@ import (

"sigs.k8s.io/controller-runtime/pkg/client"

"sigs.k8s.io/karpenter/pkg/utils/pretty"

v1 "sigs.k8s.io/karpenter/pkg/apis/v1"
disruptionevents "sigs.k8s.io/karpenter/pkg/controllers/disruption/events"
"sigs.k8s.io/karpenter/pkg/controllers/provisioning"
Expand Down Expand Up @@ -100,7 +102,7 @@ func (d *Drift) ComputeCommand(ctx context.Context, disruptionBudgetMapping map[
}
// Emit an event that we couldn't reschedule the pods on the node.
if !results.AllNonPendingPodsScheduled() {
d.recorder.Publish(disruptionevents.Blocked(candidate.Node, candidate.NodeClaim, results.NonPendingPodSchedulingErrors())...)
d.recorder.Publish(disruptionevents.Blocked(candidate.Node, candidate.NodeClaim, pretty.Sentence(results.NonPendingPodSchedulingErrors()))...)
continue
}

Expand Down
12 changes: 6 additions & 6 deletions pkg/controllers/disruption/events/events.go
Original file line number Diff line number Diff line change
Expand Up @@ -69,21 +69,21 @@ func Terminating(node *corev1.Node, nodeClaim *v1.NodeClaim, reason string) []ev

// Unconsolidatable is an event that informs the user that a NodeClaim/Node combination cannot be consolidated
// due to the state of the NodeClaim/Node or due to some state of the pods that are scheduled to the NodeClaim/Node
func Unconsolidatable(node *corev1.Node, nodeClaim *v1.NodeClaim, reason string) []events.Event {
func Unconsolidatable(node *corev1.Node, nodeClaim *v1.NodeClaim, msg string) []events.Event {
return []events.Event{
{
InvolvedObject: node,
Type: corev1.EventTypeNormal,
Reason: "Unconsolidatable",
Message: reason,
Message: msg,
DedupeValues: []string{string(node.UID)},
DedupeTimeout: time.Minute * 15,
},
{
InvolvedObject: nodeClaim,
Type: corev1.EventTypeNormal,
Reason: "Unconsolidatable",
Message: reason,
Message: msg,
DedupeValues: []string{string(nodeClaim.UID)},
DedupeTimeout: time.Minute * 15,
},
Expand All @@ -92,13 +92,13 @@ func Unconsolidatable(node *corev1.Node, nodeClaim *v1.NodeClaim, reason string)

// Blocked is an event that informs the user that a NodeClaim/Node combination is blocked on deprovisioning
// due to the state of the NodeClaim/Node or due to some state of the pods that are scheduled to the NodeClaim/Node
func Blocked(node *corev1.Node, nodeClaim *v1.NodeClaim, reason string) (evs []events.Event) {
func Blocked(node *corev1.Node, nodeClaim *v1.NodeClaim, msg string) (evs []events.Event) {
if node != nil {
evs = append(evs, events.Event{
InvolvedObject: node,
Type: corev1.EventTypeNormal,
Reason: "DisruptionBlocked",
Message: fmt.Sprintf("Cannot disrupt Node: %s", reason),
Message: msg,
DedupeValues: []string{string(node.UID)},
})
}
Expand All @@ -107,7 +107,7 @@ func Blocked(node *corev1.Node, nodeClaim *v1.NodeClaim, reason string) (evs []e
InvolvedObject: nodeClaim,
Type: corev1.EventTypeNormal,
Reason: "DisruptionBlocked",
Message: fmt.Sprintf("Cannot disrupt NodeClaim: %s", reason),
Message: msg,
DedupeValues: []string{string(nodeClaim.UID)},
})
}
Expand Down
44 changes: 22 additions & 22 deletions pkg/controllers/disruption/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -876,7 +876,7 @@ var _ = Describe("Candidate Filtering", func() {
_, err := disruption.NewCandidate(ctx, env.Client, recorder, fakeClock, cluster.Nodes()[0], pdbLimits, nodePoolMap, nodePoolInstanceTypeMap, queue, disruption.GracefulDisruptionClass)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(Equal(fmt.Sprintf(`pod %q has "karpenter.sh/do-not-disrupt" annotation`, client.ObjectKeyFromObject(pod))))
Expect(recorder.DetectedEvent(fmt.Sprintf(`Cannot disrupt Node: pod %q has "karpenter.sh/do-not-disrupt" annotation`, client.ObjectKeyFromObject(pod)))).To(BeTrue())
Expect(recorder.DetectedEvent(fmt.Sprintf(`Pod %q has "karpenter.sh/do-not-disrupt" annotation`, client.ObjectKeyFromObject(pod)))).To(BeTrue())
})
It("should not consider candidates that have do-not-disrupt mirror pods scheduled", func() {
nodeClaim, node := test.NodeClaimAndNode(v1.NodeClaim{
Expand Down Expand Up @@ -914,7 +914,7 @@ var _ = Describe("Candidate Filtering", func() {
_, err := disruption.NewCandidate(ctx, env.Client, recorder, fakeClock, cluster.Nodes()[0], pdbLimits, nodePoolMap, nodePoolInstanceTypeMap, queue, disruption.GracefulDisruptionClass)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(Equal(fmt.Sprintf(`pod %q has "karpenter.sh/do-not-disrupt" annotation`, client.ObjectKeyFromObject(pod))))
Expect(recorder.DetectedEvent(fmt.Sprintf(`Cannot disrupt Node: pod %q has "karpenter.sh/do-not-disrupt" annotation`, client.ObjectKeyFromObject(pod)))).To(BeTrue())
Expect(recorder.DetectedEvent(fmt.Sprintf(`Pod %q has "karpenter.sh/do-not-disrupt" annotation`, client.ObjectKeyFromObject(pod)))).To(BeTrue())
})
It("should not consider candidates that have do-not-disrupt daemonset pods scheduled", func() {
daemonSet := test.DaemonSet()
Expand Down Expand Up @@ -953,7 +953,7 @@ var _ = Describe("Candidate Filtering", func() {
_, err := disruption.NewCandidate(ctx, env.Client, recorder, fakeClock, cluster.Nodes()[0], pdbLimits, nodePoolMap, nodePoolInstanceTypeMap, queue, disruption.GracefulDisruptionClass)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(Equal(fmt.Sprintf(`pod %q has "karpenter.sh/do-not-disrupt" annotation`, client.ObjectKeyFromObject(pod))))
Expect(recorder.DetectedEvent(fmt.Sprintf(`Cannot disrupt Node: pod %q has "karpenter.sh/do-not-disrupt" annotation`, client.ObjectKeyFromObject(pod)))).To(BeTrue())
Expect(recorder.DetectedEvent(fmt.Sprintf(`Pod %q has "karpenter.sh/do-not-disrupt" annotation`, client.ObjectKeyFromObject(pod)))).To(BeTrue())
})
It("should consider candidates that have do-not-disrupt pods scheduled with a terminationGracePeriod set for eventual disruption", func() {
nodeClaim, node := test.NodeClaimAndNode(v1.NodeClaim{
Expand Down Expand Up @@ -1043,7 +1043,7 @@ var _ = Describe("Candidate Filtering", func() {
_, err := disruption.NewCandidate(ctx, env.Client, recorder, fakeClock, cluster.Nodes()[0], pdbLimits, nodePoolMap, nodePoolInstanceTypeMap, queue, disruption.GracefulDisruptionClass)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(Equal(fmt.Sprintf(`pod %q has "karpenter.sh/do-not-disrupt" annotation`, client.ObjectKeyFromObject(pod))))
Expect(recorder.DetectedEvent(fmt.Sprintf(`Cannot disrupt Node: pod %q has "karpenter.sh/do-not-disrupt" annotation`, client.ObjectKeyFromObject(pod)))).To(BeTrue())
Expect(recorder.DetectedEvent(fmt.Sprintf(`Pod %q has "karpenter.sh/do-not-disrupt" annotation`, client.ObjectKeyFromObject(pod)))).To(BeTrue())
})
It("should not consider candidates that have PDB-blocked pods scheduled with a terminationGracePeriod set for graceful disruption", func() {
nodeClaim, node := test.NodeClaimAndNode(v1.NodeClaim{
Expand Down Expand Up @@ -1079,7 +1079,7 @@ var _ = Describe("Candidate Filtering", func() {
_, err = disruption.NewCandidate(ctx, env.Client, recorder, fakeClock, cluster.Nodes()[0], pdbLimits, nodePoolMap, nodePoolInstanceTypeMap, queue, disruption.GracefulDisruptionClass)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(Equal(fmt.Sprintf(`pdb %q prevents pod evictions`, client.ObjectKeyFromObject(budget))))
Expect(recorder.DetectedEvent(fmt.Sprintf(`Cannot disrupt Node: pdb %q prevents pod evictions`, client.ObjectKeyFromObject(budget)))).To(BeTrue())
Expect(recorder.DetectedEvent(fmt.Sprintf(`Pdb %q prevents pod evictions`, client.ObjectKeyFromObject(budget)))).To(BeTrue())
})
It("should not consider candidates that have do-not-disrupt pods scheduled without a terminationGracePeriod set for eventual disruption", func() {
nodeClaim, node := test.NodeClaimAndNode(v1.NodeClaim{
Expand Down Expand Up @@ -1107,7 +1107,7 @@ var _ = Describe("Candidate Filtering", func() {
_, err := disruption.NewCandidate(ctx, env.Client, recorder, fakeClock, cluster.Nodes()[0], pdbLimits, nodePoolMap, nodePoolInstanceTypeMap, queue, disruption.EventualDisruptionClass)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(Equal(fmt.Sprintf(`pod %q has "karpenter.sh/do-not-disrupt" annotation`, client.ObjectKeyFromObject(pod))))
Expect(recorder.DetectedEvent(fmt.Sprintf(`Cannot disrupt Node: pod %q has "karpenter.sh/do-not-disrupt" annotation`, client.ObjectKeyFromObject(pod)))).To(BeTrue())
Expect(recorder.DetectedEvent(fmt.Sprintf(`Pod %q has "karpenter.sh/do-not-disrupt" annotation`, client.ObjectKeyFromObject(pod)))).To(BeTrue())
})
It("should not consider candidates that have PDB-blocked pods scheduled without a terminationGracePeriod set for eventual disruption", func() {
nodeClaim, node := test.NodeClaimAndNode(v1.NodeClaim{
Expand Down Expand Up @@ -1142,7 +1142,7 @@ var _ = Describe("Candidate Filtering", func() {
_, err = disruption.NewCandidate(ctx, env.Client, recorder, fakeClock, cluster.Nodes()[0], pdbLimits, nodePoolMap, nodePoolInstanceTypeMap, queue, disruption.EventualDisruptionClass)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(Equal(fmt.Sprintf(`pdb %q prevents pod evictions`, client.ObjectKeyFromObject(budget))))
Expect(recorder.DetectedEvent(fmt.Sprintf(`Cannot disrupt Node: pdb %q prevents pod evictions`, client.ObjectKeyFromObject(budget)))).To(BeTrue())
Expect(recorder.DetectedEvent(fmt.Sprintf(`Pdb %q prevents pod evictions`, client.ObjectKeyFromObject(budget)))).To(BeTrue())
})
It("should consider candidates that have do-not-disrupt terminating pods", func() {
nodeClaim, node := test.NodeClaimAndNode(v1.NodeClaim{
Expand Down Expand Up @@ -1233,7 +1233,7 @@ var _ = Describe("Candidate Filtering", func() {
_, err := disruption.NewCandidate(ctx, env.Client, recorder, fakeClock, cluster.Nodes()[0], pdbLimits, nodePoolMap, nodePoolInstanceTypeMap, queue, disruption.GracefulDisruptionClass)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(Equal(`disruption is blocked through the "karpenter.sh/do-not-disrupt" annotation`))
Expect(recorder.DetectedEvent(`Cannot disrupt Node: disruption is blocked through the "karpenter.sh/do-not-disrupt" annotation`)).To(BeTrue())
Expect(recorder.DetectedEvent(`Disruption is blocked through the "karpenter.sh/do-not-disrupt" annotation`)).To(BeTrue())
})
It("should not consider candidates that have fully blocking PDBs", func() {
nodeClaim, node := test.NodeClaimAndNode(v1.NodeClaim{
Expand Down Expand Up @@ -1269,7 +1269,7 @@ var _ = Describe("Candidate Filtering", func() {
_, err = disruption.NewCandidate(ctx, env.Client, recorder, fakeClock, cluster.Nodes()[0], pdbLimits, nodePoolMap, nodePoolInstanceTypeMap, queue, disruption.GracefulDisruptionClass)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(Equal(fmt.Sprintf(`pdb %q prevents pod evictions`, client.ObjectKeyFromObject(budget))))
Expect(recorder.DetectedEvent(fmt.Sprintf(`Cannot disrupt Node: pdb %q prevents pod evictions`, client.ObjectKeyFromObject(budget)))).To(BeTrue())
Expect(recorder.DetectedEvent(fmt.Sprintf(`Pdb %q prevents pod evictions`, client.ObjectKeyFromObject(budget)))).To(BeTrue())
})
It("should not consider candidates that have fully blocking PDBs on daemonset pods", func() {
daemonSet := test.DaemonSet()
Expand Down Expand Up @@ -1316,7 +1316,7 @@ var _ = Describe("Candidate Filtering", func() {
_, err = disruption.NewCandidate(ctx, env.Client, recorder, fakeClock, cluster.Nodes()[0], pdbLimits, nodePoolMap, nodePoolInstanceTypeMap, queue, disruption.GracefulDisruptionClass)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(Equal(fmt.Sprintf(`pdb %q prevents pod evictions`, client.ObjectKeyFromObject(budget))))
Expect(recorder.DetectedEvent(fmt.Sprintf(`Cannot disrupt Node: pdb %q prevents pod evictions`, client.ObjectKeyFromObject(budget)))).To(BeTrue())
Expect(recorder.DetectedEvent(fmt.Sprintf(`Pdb %q prevents pod evictions`, client.ObjectKeyFromObject(budget)))).To(BeTrue())
})
It("should consider candidates that have fully blocking PDBs on mirror pods", func() {
nodeClaim, node := test.NodeClaimAndNode(v1.NodeClaim{
Expand Down Expand Up @@ -1393,7 +1393,7 @@ var _ = Describe("Candidate Filtering", func() {
_, err = disruption.NewCandidate(ctx, env.Client, recorder, fakeClock, cluster.Nodes()[0], pdbLimits, nodePoolMap, nodePoolInstanceTypeMap, queue, disruption.GracefulDisruptionClass)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(Equal(fmt.Sprintf(`pod %q has "karpenter.sh/do-not-disrupt" annotation`, client.ObjectKeyFromObject(pod))))
Expect(recorder.DetectedEvent(fmt.Sprintf(`Cannot disrupt Node: pod %q has "karpenter.sh/do-not-disrupt" annotation`, client.ObjectKeyFromObject(pod)))).To(BeTrue())
Expect(recorder.DetectedEvent(fmt.Sprintf(`Pod %q has "karpenter.sh/do-not-disrupt" annotation`, client.ObjectKeyFromObject(pod)))).To(BeTrue())
})
It("should not consider candidates that have fully blocking PDBs without a terminationGracePeriod set for graceful disruption", func() {
nodeClaim, node := test.NodeClaimAndNode(v1.NodeClaim{
Expand Down Expand Up @@ -1427,7 +1427,7 @@ var _ = Describe("Candidate Filtering", func() {
_, err = disruption.NewCandidate(ctx, env.Client, recorder, fakeClock, cluster.Nodes()[0], pdbLimits, nodePoolMap, nodePoolInstanceTypeMap, queue, disruption.GracefulDisruptionClass)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(Equal(fmt.Sprintf(`pdb %q prevents pod evictions`, client.ObjectKeyFromObject(budget))))
Expect(recorder.DetectedEvent(fmt.Sprintf(`Cannot disrupt Node: pdb %q prevents pod evictions`, client.ObjectKeyFromObject(budget)))).To(BeTrue())
Expect(recorder.DetectedEvent(fmt.Sprintf(`Pdb %q prevents pod evictions`, client.ObjectKeyFromObject(budget)))).To(BeTrue())
})
It("should consider candidates that have fully blocking PDBs on terminal pods", func() {
nodeClaim, node := test.NodeClaimAndNode(v1.NodeClaim{
Expand Down Expand Up @@ -1528,7 +1528,7 @@ var _ = Describe("Candidate Filtering", func() {
Expect(cluster.Nodes()).To(HaveLen(1))
_, err := disruption.NewCandidate(ctx, env.Client, recorder, fakeClock, cluster.Nodes()[0], pdbLimits, nodePoolMap, nodePoolInstanceTypeMap, queue, disruption.GracefulDisruptionClass)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(Equal("node is not managed by karpenter"))
Expect(err.Error()).To(Equal("node isn't managed by karpenter"))
})
It("should not consider candidate that has just a NodeClaim representation", func() {
nodeClaim, _ := test.NodeClaimAndNode(v1.NodeClaim{
Expand Down Expand Up @@ -1567,8 +1567,8 @@ var _ = Describe("Candidate Filtering", func() {
Expect(cluster.Nodes()).To(HaveLen(1))
_, err := disruption.NewCandidate(ctx, env.Client, recorder, fakeClock, cluster.Nodes()[0], pdbLimits, nodePoolMap, nodePoolInstanceTypeMap, queue, disruption.GracefulDisruptionClass)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(Equal("state node is nominated for a pending pod"))
Expect(recorder.DetectedEvent("Cannot disrupt Node: state node is nominated for a pending pod")).To(BeTrue())
Expect(err.Error()).To(Equal("node is nominated for a pending pod"))
Expect(recorder.DetectedEvent("Node is nominated for a pending pod")).To(BeTrue())
})
It("should not consider candidates that are deleting", func() {
nodeClaim, node := test.NodeClaimAndNode(v1.NodeClaim{
Expand All @@ -1590,7 +1590,7 @@ var _ = Describe("Candidate Filtering", func() {
Expect(cluster.Nodes()).To(HaveLen(1))
_, err := disruption.NewCandidate(ctx, env.Client, recorder, fakeClock, cluster.Nodes()[0], pdbLimits, nodePoolMap, nodePoolInstanceTypeMap, queue, disruption.GracefulDisruptionClass)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(Equal("state node is marked for deletion"))
Expect(err.Error()).To(Equal("node is deleting or marked for deletion"))
})
It("should not consider candidates that are MarkedForDeletion", func() {
nodeClaim, node := test.NodeClaimAndNode(v1.NodeClaim{
Expand All @@ -1611,7 +1611,7 @@ var _ = Describe("Candidate Filtering", func() {
Expect(cluster.Nodes()).To(HaveLen(1))
_, err := disruption.NewCandidate(ctx, env.Client, recorder, fakeClock, cluster.Nodes()[0], pdbLimits, nodePoolMap, nodePoolInstanceTypeMap, queue, disruption.GracefulDisruptionClass)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(Equal("state node is marked for deletion"))
Expect(err.Error()).To(Equal("node is deleting or marked for deletion"))
})
It("should not consider candidates that aren't yet initialized", func() {
nodeClaim, node := test.NodeClaimAndNode(v1.NodeClaim{
Expand All @@ -1631,7 +1631,7 @@ var _ = Describe("Candidate Filtering", func() {
Expect(cluster.Nodes()).To(HaveLen(1))
_, err := disruption.NewCandidate(ctx, env.Client, recorder, fakeClock, cluster.Nodes()[0], pdbLimits, nodePoolMap, nodePoolInstanceTypeMap, queue, disruption.GracefulDisruptionClass)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(Equal("state node isn't initialized"))
Expect(err.Error()).To(Equal("node isn't initialized"))
})
It("should not consider candidates that are not owned by a NodePool (no karpenter.sh/nodepool label)", func() {
nodeClaim, node := test.NodeClaimAndNode(v1.NodeClaim{
Expand All @@ -1649,8 +1649,8 @@ var _ = Describe("Candidate Filtering", func() {
Expect(cluster.Nodes()).To(HaveLen(1))
_, err := disruption.NewCandidate(ctx, env.Client, recorder, fakeClock, cluster.Nodes()[0], pdbLimits, nodePoolMap, nodePoolInstanceTypeMap, queue, disruption.GracefulDisruptionClass)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(Equal(`state node doesn't have required label "karpenter.sh/nodepool"`))
Expect(recorder.DetectedEvent(`Cannot disrupt Node: state node doesn't have required label "karpenter.sh/nodepool"`)).To(BeTrue())
Expect(err.Error()).To(Equal(`node doesn't have required label "karpenter.sh/nodepool"`))
Expect(recorder.DetectedEvent(`Node doesn't have required label "karpenter.sh/nodepool"`)).To(BeTrue())
})
It("should not consider candidates that are have a non-existent NodePool", func() {
nodeClaim, node := test.NodeClaimAndNode(v1.NodeClaim{
Expand All @@ -1674,8 +1674,8 @@ var _ = Describe("Candidate Filtering", func() {
Expect(cluster.Nodes()).To(HaveLen(1))
_, err := disruption.NewCandidate(ctx, env.Client, recorder, fakeClock, cluster.Nodes()[0], pdbLimits, nodePoolMap, nodePoolInstanceTypeMap, queue, disruption.GracefulDisruptionClass)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(Equal(fmt.Sprintf("nodepool %q can't be resolved for state node", nodePool.Name)))
Expect(recorder.DetectedEvent(fmt.Sprintf("Cannot disrupt Node: NodePool %q not found", nodePool.Name))).To(BeTrue())
Expect(err.Error()).To(Equal(fmt.Sprintf("nodepool %q not found", nodePool.Name)))
Expect(recorder.DetectedEvent(fmt.Sprintf("NodePool %q not found", nodePool.Name))).To(BeTrue())
})
It("should consider candidates that do not have the karpenter.sh/capacity-type label", func() {
nodeClaim, node := test.NodeClaimAndNode(v1.NodeClaim{
Expand Down
Loading
Loading