Skip to content

Commit

Permalink
feedback
Browse files Browse the repository at this point in the history
  • Loading branch information
jmdeal committed Nov 23, 2024
1 parent ad7c521 commit 540eb38
Show file tree
Hide file tree
Showing 49 changed files with 156 additions and 239 deletions.
5 changes: 5 additions & 0 deletions pkg/apis/v1/labels.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ import (
"strings"

v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/sets"

"sigs.k8s.io/karpenter/pkg/apis"
Expand Down Expand Up @@ -141,3 +142,7 @@ func GetLabelDomain(key string) string {
}
return ""
}

func NodeClassLabelKey(gk schema.GroupKind) string {
return fmt.Sprintf("%s/%s", gk.Group, strings.ToLower(gk.Kind))
}
4 changes: 2 additions & 2 deletions pkg/apis/v1/nodepool_validation_cel_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -623,12 +623,12 @@ var _ = Describe("CEL/Validation", func() {
Context("NodeClassRef", func() {
It("should fail to mutate group", func() {
Expect(env.Client.Create(ctx, nodePool)).To(Succeed())
nodePool.Spec.Template.Spec.NodeClassRef.Group = "karpenter.k8s.aws"
nodePool.Spec.Template.Spec.NodeClassRef.Group = "karpenter.test.mutated.sh"
Expect(env.Client.Create(ctx, nodePool)).ToNot(Succeed())
})
It("should fail to mutate kind", func() {
Expect(env.Client.Create(ctx, nodePool)).To(Succeed())
nodePool.Spec.Template.Spec.NodeClassRef.Group = "EC2NodeClass"
nodePool.Spec.Template.Spec.NodeClassRef.Group = "TestNodeClass2"
Expect(env.Client.Create(ctx, nodePool)).ToNot(Succeed())
})
It("should fail if group is unset", func() {
Expand Down
4 changes: 2 additions & 2 deletions pkg/controllers/disruption/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"

"sigs.k8s.io/karpenter/pkg/utils/nodepool"
nodepoolutils "sigs.k8s.io/karpenter/pkg/utils/nodepool"
"sigs.k8s.io/karpenter/pkg/utils/pretty"

v1 "sigs.k8s.io/karpenter/pkg/apis/v1"
Expand Down Expand Up @@ -277,7 +277,7 @@ func (c *Controller) logAbnormalRuns(ctx context.Context) {

// logInvalidBudgets will log if there are any invalid schedules detected
func (c *Controller) logInvalidBudgets(ctx context.Context) {
nps, err := nodepool.List(ctx, c.kubeClient, nodepool.WithManagedFilter(c.cloudProvider))
nps, err := nodepoolutils.ListManaged(ctx, c.kubeClient, c.cloudProvider)
if err != nil {
log.FromContext(ctx).Error(err, "failed listing nodepools")
return
Expand Down
4 changes: 2 additions & 2 deletions pkg/controllers/disruption/helpers.go
Original file line number Diff line number Diff line change
Expand Up @@ -163,7 +163,7 @@ func GetCandidates(ctx context.Context, cluster *state.Cluster, kubeClient clien
// BuildNodePoolMap builds a provName -> nodePool map and a provName -> instanceName -> instance type map
func BuildNodePoolMap(ctx context.Context, kubeClient client.Client, cloudProvider cloudprovider.CloudProvider) (map[string]*v1.NodePool, map[string]map[string]*cloudprovider.InstanceType, error) {
nodePoolMap := map[string]*v1.NodePool{}
nodePools, err := nodepoolutils.List(ctx, kubeClient, nodepoolutils.WithManagedFilter(cloudProvider))
nodePools, err := nodepoolutils.ListManaged(ctx, kubeClient, cloudProvider)
if err != nil {
return nil, nil, fmt.Errorf("listing node pools, %w", err)
}
Expand Down Expand Up @@ -227,7 +227,7 @@ func BuildDisruptionBudgetMapping(ctx context.Context, cluster *state.Cluster, c
disrupting[nodePool]++
}
}
nodePools, err := nodepoolutils.List(ctx, kubeClient, nodepoolutils.WithManagedFilter(cloudProvider))
nodePools, err := nodepoolutils.ListManaged(ctx, kubeClient, cloudProvider)
if err != nil {
return disruptionBudgetMapping, fmt.Errorf("listing node pools, %w", err)
}
Expand Down
2 changes: 1 addition & 1 deletion pkg/controllers/metrics/nodepool/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -141,6 +141,6 @@ func makeLabels(nodePool *v1.NodePool, resourceTypeName string) prometheus.Label
func (c *Controller) Register(_ context.Context, m manager.Manager) error {
return controllerruntime.NewControllerManagedBy(m).
Named("metrics.nodepool").
For(&v1.NodePool{}, builder.WithPredicates(nodepoolutils.IsManagedPredicates(c.cloudProvider))).
For(&v1.NodePool{}, builder.WithPredicates(nodepoolutils.IsManagedPredicateFuncs(c.cloudProvider))).
Complete(c)
}
4 changes: 2 additions & 2 deletions pkg/controllers/metrics/nodepool/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -86,8 +86,8 @@ var _ = Describe("Metrics", func() {
nodePool.Spec.Limits = limits
if !isNodePoolManaged {
nodePool.Spec.Template.Spec.NodeClassRef = &v1.NodeClassReference{
Group: "karpenter.k8s.aws",
Kind: "EC2NodeClass",
Group: "karpenter.test.sh",
Kind: "UnmanagedNodeClass",
Name: "default",
}
}
Expand Down
7 changes: 3 additions & 4 deletions pkg/controllers/node/termination/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,6 @@ import (
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"

nodeclaimutils "sigs.k8s.io/karpenter/pkg/utils/nodeclaim"
"sigs.k8s.io/karpenter/pkg/utils/pretty"

v1 "sigs.k8s.io/karpenter/pkg/apis/v1"
Expand Down Expand Up @@ -93,7 +92,7 @@ func (c *Controller) finalize(ctx context.Context, node *corev1.Node) (reconcile
return reconcile.Result{}, nil
}

nodeClaims, err := nodeutils.GetNodeClaims(ctx, node, c.kubeClient, nodeclaimutils.WithManagedFilter(c.cloudProvider))
nodeClaims, err := nodeutils.GetNodeClaims(ctx, c.kubeClient, node)
if err != nil {
return reconcile.Result{}, fmt.Errorf("listing nodeclaims, %w", err)
}
Expand Down Expand Up @@ -149,7 +148,7 @@ func (c *Controller) finalize(ctx context.Context, node *corev1.Node) (reconcile
return reconcile.Result{RequeueAfter: 1 * time.Second}, nil
}
}
nodeClaims, err = nodeutils.GetNodeClaims(ctx, node, c.kubeClient, nodeclaimutils.WithManagedFilter(c.cloudProvider))
nodeClaims, err = nodeutils.GetNodeClaims(ctx, c.kubeClient, node)
if err != nil {
return reconcile.Result{}, fmt.Errorf("deleting nodeclaims, %w", err)
}
Expand Down Expand Up @@ -289,7 +288,7 @@ func (c *Controller) nodeTerminationTime(node *corev1.Node, nodeClaims ...*v1.No
func (c *Controller) Register(_ context.Context, m manager.Manager) error {
return controllerruntime.NewControllerManagedBy(m).
Named("node.termination").
For(&corev1.Node{}, builder.WithPredicates(nodeutils.IsManagedPredicates(c.cloudProvider))).
For(&corev1.Node{}, builder.WithPredicates(nodeutils.IsManagedPredicateFuncs(c.cloudProvider))).
WithOptions(
controller.Options{
RateLimiter: workqueue.NewTypedMaxOfRateLimiter[reconcile.Request](
Expand Down
2 changes: 1 addition & 1 deletion pkg/controllers/node/termination/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ var _ = Describe("Termination", func() {
})
It("should ignore nodes not managed by this Karpenter instance", func() {
delete(node.Labels, "karpenter.test.sh/testnodeclass")
node.Labels = lo.Assign(node.Labels, map[string]string{"karpenter.k8s.aws/ec2nodeclass": "default"})
node.Labels = lo.Assign(node.Labels, map[string]string{"karpenter.test.sh/unmanagednodeclass": "default"})
ExpectApplied(ctx, env.Client, node)
Expect(env.Client.Delete(ctx, node)).To(Succeed())
node = ExpectNodeExists(ctx, env.Client, node.Name)
Expand Down
4 changes: 2 additions & 2 deletions pkg/controllers/node/termination/terminator/terminator.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ import (

terminatorevents "sigs.k8s.io/karpenter/pkg/controllers/node/termination/terminator/events"
"sigs.k8s.io/karpenter/pkg/events"
nodeutil "sigs.k8s.io/karpenter/pkg/utils/node"
nodeutils "sigs.k8s.io/karpenter/pkg/utils/node"
podutil "sigs.k8s.io/karpenter/pkg/utils/pod"
)

Expand Down Expand Up @@ -94,7 +94,7 @@ func (t *Terminator) Taint(ctx context.Context, node *corev1.Node, taint corev1.
// Drain evicts pods from the node and returns true when all pods are evicted
// https://kubernetes.io/docs/concepts/architecture/nodes/#graceful-node-shutdown
func (t *Terminator) Drain(ctx context.Context, node *corev1.Node, nodeGracePeriodExpirationTime *time.Time) error {
pods, err := nodeutil.GetPods(ctx, t.kubeClient, node)
pods, err := nodeutils.GetPods(ctx, t.kubeClient, node)
if err != nil {
return fmt.Errorf("listing pods on node, %w", err)
}
Expand Down
12 changes: 6 additions & 6 deletions pkg/controllers/nodeclaim/consistency/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ import (
"sigs.k8s.io/karpenter/pkg/cloudprovider"
"sigs.k8s.io/karpenter/pkg/events"
"sigs.k8s.io/karpenter/pkg/operator/injection"
nodeclaimutil "sigs.k8s.io/karpenter/pkg/utils/nodeclaim"
nodeclaimutils "sigs.k8s.io/karpenter/pkg/utils/nodeclaim"
)

type Controller struct {
Expand Down Expand Up @@ -78,7 +78,7 @@ func NewController(clk clock.Clock, kubeClient client.Client, cloudProvider clou

func (c *Controller) Reconcile(ctx context.Context, nodeClaim *v1.NodeClaim) (reconcile.Result, error) {
ctx = injection.WithControllerName(ctx, "nodeclaim.consistency")
if !nodeclaimutil.IsManaged(nodeClaim, c.cloudProvider) {
if !nodeclaimutils.IsManaged(nodeClaim, c.cloudProvider) {
return reconcile.Result{}, nil
}
if nodeClaim.Status.ProviderID == "" {
Expand All @@ -99,9 +99,9 @@ func (c *Controller) Reconcile(ctx context.Context, nodeClaim *v1.NodeClaim) (re

// We assume the invariant that there is a single node for a single nodeClaim. If this invariant is violated,
// then we assume this is bubbled up through the nodeClaim lifecycle controller and don't perform consistency checks
node, err := nodeclaimutil.NodeForNodeClaim(ctx, c.kubeClient, nodeClaim)
node, err := nodeclaimutils.NodeForNodeClaim(ctx, c.kubeClient, nodeClaim)
if err != nil {
return reconcile.Result{}, nodeclaimutil.IgnoreDuplicateNodeError(nodeclaimutil.IgnoreNodeNotFoundError(err))
return reconcile.Result{}, nodeclaimutils.IgnoreDuplicateNodeError(nodeclaimutils.IgnoreNodeNotFoundError(err))
}
if err = c.checkConsistency(ctx, nodeClaim, node); err != nil {
return reconcile.Result{}, err
Expand Down Expand Up @@ -147,10 +147,10 @@ func (c *Controller) checkConsistency(ctx context.Context, nodeClaim *v1.NodeCla
func (c *Controller) Register(_ context.Context, m manager.Manager) error {
return controllerruntime.NewControllerManagedBy(m).
Named("nodeclaim.consistency").
For(&v1.NodeClaim{}, builder.WithPredicates(nodeclaimutil.IsManagedPredicates(c.cloudProvider))).
For(&v1.NodeClaim{}, builder.WithPredicates(nodeclaimutils.IsManagedPredicateFuncs(c.cloudProvider))).
Watches(
&corev1.Node{},
nodeclaimutil.NodeEventHandler(c.kubeClient, nodeclaimutil.WithManagedFilter(c.cloudProvider)),
nodeclaimutils.NodeEventHandler(c.kubeClient, c.cloudProvider),
).
WithOptions(controller.Options{MaxConcurrentReconciles: 10}).
Complete(reconcile.AsReconciler(m.GetClient(), c))
Expand Down
8 changes: 4 additions & 4 deletions pkg/controllers/nodeclaim/consistency/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -112,8 +112,8 @@ var _ = Describe("NodeClaimController", func() {
nodeClaimOpts = append(nodeClaimOpts, v1.NodeClaim{
Spec: v1.NodeClaimSpec{
NodeClassRef: &v1.NodeClassReference{
Group: "karpenter.k8s.aws",
Kind: "EC2NodeClass",
Group: "karpenter.test.sh",
Kind: "UnmanagedNodeClass",
Name: "default",
},
},
Expand Down Expand Up @@ -178,8 +178,8 @@ var _ = Describe("NodeClaimController", func() {
nodeClaimOpts = append(nodeClaimOpts, v1.NodeClaim{
Spec: v1.NodeClaimSpec{
NodeClassRef: &v1.NodeClassReference{
Group: "karpenter.k8s.aws",
Kind: "EC2NodeClass",
Group: "karpenter.test.sh",
Kind: "UnmanagedNodeClass",
Name: "default",
},
},
Expand Down
7 changes: 2 additions & 5 deletions pkg/controllers/nodeclaim/disruption/consolidation_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -50,9 +50,6 @@ var _ = Describe("Underutilized", func() {
ExpectApplied(ctx, env.Client, nodeClaim, nodePool)
})
It("should ignore NodeClaims not managed by this instance of Karpenter", func() {
// nodePool := test.NodePool()
// nodePool.Spec.Disruption.ConsolidationPolicy = v1.ConsolidationPolicyWhenEmptyOrUnderutilized
// nodePool.Spec.Disruption.ConsolidateAfter = v1.MustParseNillableDuration("1m")
unmanagedNodeClaim, _ := test.NodeClaimAndNode(v1.NodeClaim{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
Expand All @@ -62,8 +59,8 @@ var _ = Describe("Underutilized", func() {
},
Spec: v1.NodeClaimSpec{
NodeClassRef: &v1.NodeClassReference{
Group: "karpenter.k8s.aws",
Kind: "EC2NodeClass",
Group: "karpenter.test.sh",
Kind: "UnmanagedNodeClass",
Name: "default",
},
},
Expand Down
22 changes: 7 additions & 15 deletions pkg/controllers/nodeclaim/disruption/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ import (
v1 "sigs.k8s.io/karpenter/pkg/apis/v1"
"sigs.k8s.io/karpenter/pkg/cloudprovider"
"sigs.k8s.io/karpenter/pkg/operator/injection"
nodeclaimutil "sigs.k8s.io/karpenter/pkg/utils/nodeclaim"
nodeclaimutils "sigs.k8s.io/karpenter/pkg/utils/nodeclaim"
"sigs.k8s.io/karpenter/pkg/utils/result"
)

Expand Down Expand Up @@ -68,7 +68,7 @@ func NewController(clk clock.Clock, kubeClient client.Client, cloudProvider clou
func (c *Controller) Reconcile(ctx context.Context, nodeClaim *v1.NodeClaim) (reconcile.Result, error) {
ctx = injection.WithControllerName(ctx, "nodeclaim.disruption")

if !nodeclaimutil.IsManaged(nodeClaim, c.cloudProvider) {
if !nodeclaimutils.IsManaged(nodeClaim, c.cloudProvider) {
return reconcile.Result{}, nil
}
if !nodeClaim.DeletionTimestamp.IsZero() {
Expand Down Expand Up @@ -115,21 +115,13 @@ func (c *Controller) Reconcile(ctx context.Context, nodeClaim *v1.NodeClaim) (re
func (c *Controller) Register(_ context.Context, m manager.Manager) error {
b := controllerruntime.NewControllerManagedBy(m).
Named("nodeclaim.disruption").
For(&v1.NodeClaim{}, builder.WithPredicates(nodeclaimutil.IsManagedPredicates(c.cloudProvider))).
For(&v1.NodeClaim{}, builder.WithPredicates(nodeclaimutils.IsManagedPredicateFuncs(c.cloudProvider))).
WithOptions(controller.Options{MaxConcurrentReconciles: 10}).
// Note: We don't use the ManagedFilter (NodeClaim) for NodePool updates because drift should be captured when
// updating a NodePool's NodeClassRef to an unsupported NodeClass. However, this is currently unsupported
// (enforced via CEL validation on the NodeClassRef).
Watches(
&v1.NodePool{},
nodeclaimutil.NodePoolEventHandler(c.kubeClient),
).
Watches(
&corev1.Pod{},
nodeclaimutil.PodEventHandler(c.kubeClient, nodeclaimutil.WithManagedFilter(c.cloudProvider)),
)
Watches(&v1.NodePool{}, nodeclaimutils.NodePoolEventHandler(c.kubeClient, c.cloudProvider)).
Watches(&corev1.Pod{}, nodeclaimutils.PodEventHandler(c.kubeClient, c.cloudProvider))

for _, nodeClass := range c.cloudProvider.GetSupportedNodeClasses() {
b.Watches(nodeClass, nodeclaimutil.NodeClassEventHandler(c.kubeClient))
b.Watches(nodeClass, nodeclaimutils.NodeClassEventHandler(c.kubeClient))
}
return b.Complete(reconcile.AsReconciler(m.GetClient(), c))
}
4 changes: 2 additions & 2 deletions pkg/controllers/nodeclaim/disruption/drift_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -64,8 +64,8 @@ var _ = Describe("Drift", func() {
cp.Drifted = "drifted"
if !isNodeClaimManaged {
nodeClaim.Spec.NodeClassRef = &v1.NodeClassReference{
Group: "karpenter.k8s.aws",
Kind: "EC2NodeClass",
Group: "karpenter.test.sh",
Kind: "UnmanagedNodeClass",
Name: "default",
}
}
Expand Down
2 changes: 1 addition & 1 deletion pkg/controllers/nodeclaim/expiration/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,6 @@ func (c *Controller) Reconcile(ctx context.Context, nodeClaim *v1.NodeClaim) (re
func (c *Controller) Register(_ context.Context, m manager.Manager) error {
return controllerruntime.NewControllerManagedBy(m).
Named("nodeclaim.expiration").
For(&v1.NodeClaim{}, builder.WithPredicates(nodeclaimutils.IsManagedPredicates(c.cloudProvider))).
For(&v1.NodeClaim{}, builder.WithPredicates(nodeclaimutils.IsManagedPredicateFuncs(c.cloudProvider))).
Complete(reconcile.AsReconciler(m.GetClient(), c))
}
4 changes: 2 additions & 2 deletions pkg/controllers/nodeclaim/expiration/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -124,8 +124,8 @@ var _ = Describe("Expiration", func() {
nodeClaim.Spec.ExpireAfter = v1.MustParseNillableDuration("30s")
if !isNodeClaimManaged {
nodeClaim.Spec.NodeClassRef = &v1.NodeClassReference{
Group: "karpenter.k8s.aws",
Kind: "EC2NodeClass",
Group: "karpenter.test.sh",
Kind: "UnmanagedNodeClass",
Name: "default",
}
}
Expand Down
2 changes: 1 addition & 1 deletion pkg/controllers/nodeclaim/garbagecollection/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ func NewController(c clock.Clock, kubeClient client.Client, cloudProvider cloudp
func (c *Controller) Reconcile(ctx context.Context) (reconcile.Result, error) {
ctx = injection.WithControllerName(ctx, "nodeclaim.garbagecollection")

nodeClaims, err := nodeclaimutils.List(ctx, c.kubeClient, nodeclaimutils.WithManagedFilter(c.cloudProvider))
nodeClaims, err := nodeclaimutils.ListManaged(ctx, c.kubeClient, c.cloudProvider)
if err != nil {
return reconcile.Result{}, err
}
Expand Down
4 changes: 2 additions & 2 deletions pkg/controllers/nodeclaim/lifecycle/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -87,10 +87,10 @@ func NewController(clk clock.Clock, kubeClient client.Client, cloudProvider clou
func (c *Controller) Register(_ context.Context, m manager.Manager) error {
return controllerruntime.NewControllerManagedBy(m).
Named(c.Name()).
For(&v1.NodeClaim{}, builder.WithPredicates(nodeclaimutils.IsManagedPredicates(c.cloudProvider))).
For(&v1.NodeClaim{}, builder.WithPredicates(nodeclaimutils.IsManagedPredicateFuncs(c.cloudProvider))).
Watches(
&corev1.Node{},
nodeclaimutils.NodeEventHandler(c.kubeClient, nodeclaimutils.WithManagedFilter(c.cloudProvider)),
nodeclaimutils.NodeEventHandler(c.kubeClient, c.cloudProvider),
).
WithOptions(controller.Options{
RateLimiter: workqueue.NewTypedMaxOfRateLimiter[reconcile.Request](
Expand Down
8 changes: 4 additions & 4 deletions pkg/controllers/nodeclaim/lifecycle/initialization.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,8 @@ import (

v1 "sigs.k8s.io/karpenter/pkg/apis/v1"
"sigs.k8s.io/karpenter/pkg/scheduling"
nodeutil "sigs.k8s.io/karpenter/pkg/utils/node"
nodeclaimutil "sigs.k8s.io/karpenter/pkg/utils/nodeclaim"
nodeutils "sigs.k8s.io/karpenter/pkg/utils/node"
nodeclaimutils "sigs.k8s.io/karpenter/pkg/utils/nodeclaim"
"sigs.k8s.io/karpenter/pkg/utils/resources"
)

Expand All @@ -52,13 +52,13 @@ func (i *Initialization) Reconcile(ctx context.Context, nodeClaim *v1.NodeClaim)
return reconcile.Result{}, nil
}
ctx = log.IntoContext(ctx, log.FromContext(ctx).WithValues("provider-id", nodeClaim.Status.ProviderID))
node, err := nodeclaimutil.NodeForNodeClaim(ctx, i.kubeClient, nodeClaim)
node, err := nodeclaimutils.NodeForNodeClaim(ctx, i.kubeClient, nodeClaim)
if err != nil {
nodeClaim.StatusConditions().SetUnknownWithReason(v1.ConditionTypeInitialized, "NodeNotFound", "Node not registered with cluster")
return reconcile.Result{}, nil //nolint:nilerr
}
ctx = log.IntoContext(ctx, log.FromContext(ctx).WithValues("Node", klog.KRef("", node.Name)))
if nodeutil.GetCondition(node, corev1.NodeReady).Status != corev1.ConditionTrue {
if nodeutils.GetCondition(node, corev1.NodeReady).Status != corev1.ConditionTrue {
nodeClaim.StatusConditions().SetUnknownWithReason(v1.ConditionTypeInitialized, "NodeNotReady", "Node status is NotReady")
return reconcile.Result{}, nil
}
Expand Down
4 changes: 2 additions & 2 deletions pkg/controllers/nodeclaim/lifecycle/initialization_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -59,8 +59,8 @@ var _ = Describe("Initialization", func() {
nodeClaimOpts = append(nodeClaimOpts, v1.NodeClaim{
Spec: v1.NodeClaimSpec{
NodeClassRef: &v1.NodeClassReference{
Group: "karpenter.k8s.aws",
Kind: "EC2NodeClass",
Group: "karpenter.test.sh",
Kind: "UnmanagedNodeClass",
Name: "default",
},
},
Expand Down
4 changes: 2 additions & 2 deletions pkg/controllers/nodeclaim/lifecycle/launch_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -49,8 +49,8 @@ var _ = Describe("Launch", func() {
nodeClaimOpts = append(nodeClaimOpts, v1.NodeClaim{
Spec: v1.NodeClaimSpec{
NodeClassRef: &v1.NodeClassReference{
Group: "karpenter.k8s.aws",
Kind: "EC2NodeClass",
Group: "karpenter.test.sh",
Kind: "UnmanagedNodeClass",
Name: "default",
},
},
Expand Down
4 changes: 2 additions & 2 deletions pkg/controllers/nodeclaim/lifecycle/liveness_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -60,8 +60,8 @@ var _ = Describe("Liveness", func() {
nodeClaimOpts = append(nodeClaimOpts, v1.NodeClaim{
Spec: v1.NodeClaimSpec{
NodeClassRef: &v1.NodeClassReference{
Group: "karpenter.k8s.aws",
Kind: "EC2NodeClass",
Group: "karpenter.test.sh",
Kind: "UnmanagedNodeClass",
Name: "default",
},
},
Expand Down
Loading

0 comments on commit 540eb38

Please sign in to comment.