Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add Controller Revision (Implementation of KEP #238) #277

Merged
merged 27 commits into from
Dec 28, 2024
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
Show all changes
27 commits
Select commit Hold shift + click to select a range
db8a7c5
initial working prototype of controller revision
Edwinhr716 Nov 8, 2024
df4872b
update value of currentRevision after update is done
Edwinhr716 Nov 15, 2024
de9dd70
switched names from sts to lws
Edwinhr716 Nov 15, 2024
267af37
test changes
Edwinhr716 Nov 27, 2024
7389ea1
added unit, integration, and e2e tests
Edwinhr716 Dec 5, 2024
5a49f88
cleanup and added integration test for collisionCount
Edwinhr716 Dec 6, 2024
f28995a
fix lint
Edwinhr716 Dec 6, 2024
1f5adfa
changed from string to const on labels
Edwinhr716 Dec 9, 2024
1b978cf
changed revision logic based on updated design
Edwinhr716 Dec 16, 2024
522f1b0
removed status changes, cleaned up tests that referenced them. Implem…
Edwinhr716 Dec 20, 2024
9436aeb
addressed comments, refactored
Edwinhr716 Dec 23, 2024
8d3da19
rebased
Edwinhr716 Dec 23, 2024
5fa3b34
fixed failing tests and lint error
Edwinhr716 Dec 23, 2024
6a29f29
fixed lint, again
Edwinhr716 Dec 23, 2024
e9d6131
refactored revision code, added fix for PodGroupRestart bug and an e2…
Edwinhr716 Dec 26, 2024
7a19d85
addressed third round of comments
Edwinhr716 Dec 27, 2024
8b66d18
removed blank space
Edwinhr716 Dec 27, 2024
4d2ec47
addressed comments, round 4
Edwinhr716 Dec 27, 2024
949bc68
further changes from templateHash to RevisionKey
Edwinhr716 Dec 27, 2024
6b292d7
further changing from templateHash to revisionKey
Edwinhr716 Dec 28, 2024
a065e93
minor fixes
Edwinhr716 Dec 28, 2024
8b62baa
fixed all tests failing, still need to debug other tests
Edwinhr716 Dec 28, 2024
897e009
added log messages to listRevision for debugging
Edwinhr716 Dec 28, 2024
7822c18
fixed bug with getHighestRevision
Edwinhr716 Dec 28, 2024
cafadfc
added log messages for create revision function
Edwinhr716 Dec 28, 2024
a856b3d
removed fetch after creation, not needed
Edwinhr716 Dec 28, 2024
1faace7
removed e2e tests, will be added as integration tests intead
Edwinhr716 Dec 28, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
182 changes: 98 additions & 84 deletions pkg/controllers/leaderworkerset_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@ import (
"sigs.k8s.io/lws/pkg/utils"
controllerutils "sigs.k8s.io/lws/pkg/utils/controller"
podutils "sigs.k8s.io/lws/pkg/utils/pod"
revisionutils "sigs.k8s.io/lws/pkg/utils/revision"
statefulsetutils "sigs.k8s.io/lws/pkg/utils/statefulset"
)

Expand Down Expand Up @@ -99,13 +100,31 @@ func (r *LeaderWorkerSetReconciler) Reconcile(ctx context.Context, req ctrl.Requ
log := ctrl.LoggerFrom(ctx).WithValues("leaderworkerset", klog.KObj(lws))
ctx = ctrl.LoggerInto(ctx, log)

partition, replicas, err := r.rollingUpdateParameters(ctx, lws)
leaderSts, err := r.getLeaderStatefulSet(ctx, lws)
if err != nil {
log.Error(err, "Fetching leader statefulset")
return ctrl.Result{}, err
}

if err := r.createControllerRevisionIfNonExist(ctx, leaderSts, lws); err != nil {
Edwinhr716 marked this conversation as resolved.
Show resolved Hide resolved
log.Error(err, "Creating controller revision")
return ctrl.Result{}, err
}

lwsUpdated, err := r.leaderWorkerSetUpdated(ctx, leaderSts, lws)
if err != nil {
Edwinhr716 marked this conversation as resolved.
Show resolved Hide resolved
log.Error(err, "Validating if LWS has been updated")
return ctrl.Result{}, err
}

templateHash := getLeaderWorkerTemplateHash(leaderSts, lws, lwsUpdated)
Edwinhr716 marked this conversation as resolved.
Show resolved Hide resolved
partition, replicas, err := r.rollingUpdateParameters(ctx, lws, leaderSts, lwsUpdated)
if err != nil {
log.Error(err, "Rolling partition error")
return ctrl.Result{}, err
}

if err := r.SSAWithStatefulset(ctx, lws, partition, replicas); err != nil {
if err := r.SSAWithStatefulset(ctx, lws, partition, replicas, templateHash); err != nil {
return ctrl.Result{}, err
}

Expand All @@ -117,7 +136,7 @@ func (r *LeaderWorkerSetReconciler) Reconcile(ctx context.Context, req ctrl.Requ
return ctrl.Result{}, err
}

err = r.updateStatus(ctx, lws)
err = r.updateStatus(ctx, lws, templateHash)
if err != nil {
return ctrl.Result{}, err
}
Expand Down Expand Up @@ -193,34 +212,15 @@ func SetupIndexes(indexer client.FieldIndexer) error {
// - Otherwise, Replicas is equal to spec.Replicas
// - One exception here is when unready replicas of leaderWorkerSet is equal to MaxSurge,
// we should reclaim the extra replicas gradually to accommodate for the new replicas.
func (r *LeaderWorkerSetReconciler) rollingUpdateParameters(ctx context.Context, lws *leaderworkerset.LeaderWorkerSet) (int32, int32, error) {
func (r *LeaderWorkerSetReconciler) rollingUpdateParameters(ctx context.Context, lws *leaderworkerset.LeaderWorkerSet, sts *appsv1.StatefulSet, leaderWorkerSetUpdated bool) (int32, int32, error) {
log := ctrl.LoggerFrom(ctx).WithValues("leaderworkerset", klog.KObj(lws))
ctx = ctrl.LoggerInto(ctx, log)
lwsReplicas := *lws.Spec.Replicas

// Case 1:
// If sts not created yet, all partitions should be updated,
// replicas should not change.
stsExists, sts, err := stsCreated(ctx, r.Client, lws)
if err != nil {
return 0, 0, err
}

if !stsExists {
return 0, lwsReplicas, nil
}

existingControllerRevisions, err := controllerutils.ExistingControllerRevisions(ctx, r.Client, lws)
if err != nil {
return 0, 0, err
}

if !existingControllerRevisions {
// Updating from version that did not support Controller Revision. Need to create one first before checking if template has been updated
log.V(2).Info(fmt.Sprintf("Creating new controller revision create/update operation for %+v ", lws))
if err := controllerutils.CreateLeaderWorkerSetRevision(ctx, r.Client, lws, sts.Labels[leaderworkerset.TemplateRevisionHashKey]); err != nil {
return 0, 0, nil
}
if sts == nil {
return 0, lwsReplicas, nil
}

Expand Down Expand Up @@ -248,11 +248,7 @@ func (r *LeaderWorkerSetReconciler) rollingUpdateParameters(ctx context.Context,

// Case 2:
// Indicates a new rolling update here.
hasTemplateUdated, err := templateUpdated(ctx, r.Client, sts, lws)
if err != nil {
return 0, 0, err
}
if hasTemplateUdated {
if leaderWorkerSetUpdated {
// Processing scaling up/down first prior to rolling update.
return min(lwsReplicas, stsReplicas), wantReplicas(lwsReplicas), nil
}
Expand Down Expand Up @@ -297,29 +293,10 @@ func (r *LeaderWorkerSetReconciler) rollingUpdateParameters(ctx context.Context,
return min(partition, utils.NonZeroValue(stsReplicas-int32(rollingStep)-continuousReadyReplicas)), wantReplicas(lwsUnreadyReplicas), nil
}

func (r *LeaderWorkerSetReconciler) SSAWithStatefulset(ctx context.Context, lws *leaderworkerset.LeaderWorkerSet, partition, replicas int32) error {
func (r *LeaderWorkerSetReconciler) SSAWithStatefulset(ctx context.Context, lws *leaderworkerset.LeaderWorkerSet, partition, replicas int32, templateHash string) error {
log := ctrl.LoggerFrom(ctx)

// templateHash is not a reliable way to determine whether or not an lws object has been updated as seen in
// https://github.com/kubernetes-sigs/lws/issues/281
// If a leader sts already exists, but the template has not been updated, the templateHash of the leader is
// used to keep consistency in cases where two different templateHashes are calculated from the same LWS object
stsExists, sts, err := stsCreated(ctx, r.Client, lws)
if err != nil {
return err
}
templateHash := utils.LeaderWorkerTemplateHash(lws)
if stsExists {
templateUpdated, err := templateUpdated(ctx, r.Client, sts, lws)
if err != nil {
return err
}
if !templateUpdated {
templateHash = sts.Labels[leaderworkerset.TemplateRevisionHashKey]
}
}

if err = controllerutils.CreateLeaderWorkerSetRevision(ctx, r.Client, lws, templateHash); err != nil {
if err := revisionutils.CreateLeaderWorkerSetRevision(ctx, r.Client, lws, templateHash); err != nil {
log.Error(err, "Creating LWS Revision")
return err
}
Edwinhr716 marked this conversation as resolved.
Show resolved Hide resolved
Expand Down Expand Up @@ -359,7 +336,7 @@ func (r *LeaderWorkerSetReconciler) SSAWithStatefulset(ctx context.Context, lws
}

// updates the condition of the leaderworkerset to either Progressing or Available.
func (r *LeaderWorkerSetReconciler) updateConditions(ctx context.Context, lws *leaderworkerset.LeaderWorkerSet) (bool, error) {
func (r *LeaderWorkerSetReconciler) updateConditions(ctx context.Context, lws *leaderworkerset.LeaderWorkerSet, templateHash string) (bool, error) {
log := ctrl.LoggerFrom(ctx)
podSelector := client.MatchingLabels(map[string]string{
leaderworkerset.SetNameLabelKey: lws.Name,
Expand All @@ -373,7 +350,6 @@ func (r *LeaderWorkerSetReconciler) updateConditions(ctx context.Context, lws *l

updateStatus := false
readyCount, updatedCount, updatedNonBurstWorkerCount, currentNonBurstWorkerCount, updatedAndReadyCount := 0, 0, 0, 0, 0
templateHash := utils.LeaderWorkerTemplateHash(lws)
noWorkerSts := *lws.Spec.LeaderWorkerTemplate.Size == 1

// Iterate through all leaderPods.
Expand Down Expand Up @@ -434,7 +410,9 @@ func (r *LeaderWorkerSetReconciler) updateConditions(ctx context.Context, lws *l
conditions = append(conditions, makeCondition(leaderworkerset.LeaderWorkerSetUpgradeInProgress))
} else if updatedAndReadyCount == int(*lws.Spec.Replicas) {
conditions = append(conditions, makeCondition(leaderworkerset.LeaderWorkerSetAvailable))
controllerutils.TruncateHistory(ctx, r.Client, lws, templateHash)
if err := revisionutils.TruncateHistory(ctx, r.Client, lws, templateHash); err != nil {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

as a follow up, we can store the current and updated revisions for debugging purposes. The current one can be set here, while the updated one can be set in the if block above.

return false, err
}
} else {
conditions = append(conditions, makeCondition(leaderworkerset.LeaderWorkerSetProgressing))
}
Expand All @@ -448,7 +426,7 @@ func (r *LeaderWorkerSetReconciler) updateConditions(ctx context.Context, lws *l
}

// Updates status and condition of LeaderWorkerSet and returns whether or not an update actually occurred.
func (r *LeaderWorkerSetReconciler) updateStatus(ctx context.Context, lws *leaderworkerset.LeaderWorkerSet) error {
func (r *LeaderWorkerSetReconciler) updateStatus(ctx context.Context, lws *leaderworkerset.LeaderWorkerSet, templateHash string) error {
updateStatus := false
log := ctrl.LoggerFrom(ctx)

Expand Down Expand Up @@ -484,7 +462,7 @@ func (r *LeaderWorkerSetReconciler) updateStatus(ctx context.Context, lws *leade
}

// check if an update is needed
updateConditions, err := r.updateConditions(ctx, lws)
updateConditions, err := r.updateConditions(ctx, lws, templateHash)
if err != nil {
return err
}
Expand Down Expand Up @@ -529,7 +507,7 @@ func (r *LeaderWorkerSetReconciler) iterateReplicas(ctx context.Context, lws *le
return strconv.Atoi(sts.Labels[leaderworkerset.GroupIndexLabelKey])
}, stsList.Items, int(stsReplicas))

templateHash := utils.LeaderWorkerTemplateHash(lws)
templateHash := revisionutils.LeaderWorkerTemplateHash(lws)
// Once size==1, no worker statefulSets will be created.
noWorkerSts := *lws.Spec.LeaderWorkerTemplate.Size == 1
processReplica := func(index int32) (ready bool) {
Expand Down Expand Up @@ -571,6 +549,70 @@ func (r *LeaderWorkerSetReconciler) iterateReplicas(ctx context.Context, lws *le
return continuousReadyReplicas, lwsUnreadyReplicas, nil
}

func (r *LeaderWorkerSetReconciler) getLeaderStatefulSet(ctx context.Context, lws *leaderworkerset.LeaderWorkerSet) (*appsv1.StatefulSet, error) {
sts := &appsv1.StatefulSet{}
err := r.Get(ctx, types.NamespacedName{Name: lws.Name, Namespace: lws.Namespace}, sts)
if err != nil {
if apierrors.IsNotFound(err) {
return nil, nil
}
return nil, err
}

return sts, nil
ahg-g marked this conversation as resolved.
Show resolved Hide resolved
}

// Creates a Controller Revision if the leader statefulset exists but no revisions have been created yet. This happens when updating from a version that doesn't
// support controller revision
func (r *LeaderWorkerSetReconciler) createControllerRevisionIfNonExist(ctx context.Context, sts *appsv1.StatefulSet, lws *leaderworkerset.LeaderWorkerSet) error {

Edwinhr716 marked this conversation as resolved.
Show resolved Hide resolved
if sts == nil {
return nil
}

existingControllerRevisions, err := revisionutils.ExistingControllerRevisions(ctx, r.Client, lws)
if err != nil {
return err
}

if !existingControllerRevisions {
Edwinhr716 marked this conversation as resolved.
Show resolved Hide resolved
return revisionutils.CreateLeaderWorkerSetRevision(ctx, r.Client, lws, sts.Labels[leaderworkerset.TemplateRevisionHashKey])
}

return nil
}

func (r *LeaderWorkerSetReconciler) leaderWorkerSetUpdated(ctx context.Context, sts *appsv1.StatefulSet, lws *leaderworkerset.LeaderWorkerSet) (bool, error) {

Edwinhr716 marked this conversation as resolved.
Show resolved Hide resolved
if sts == nil {
return false, nil
}

controllerRevision, err := revisionutils.GetLeaderWorkerSetRevisionFromTemplateHash(ctx, r.Client, lws, sts.Labels[leaderworkerset.TemplateRevisionHashKey])
if err != nil {
return false, err
}
baselineLws, err := revisionutils.ApplyRevision(lws, controllerRevision)
if err != nil {
return false, err
}
return !revisionutils.EqualLeaderWorkerTemplates(baselineLws, lws), nil
Edwinhr716 marked this conversation as resolved.
Show resolved Hide resolved
}

// templateHash is not a reliable way to determine whether or not an lws object has been updated as seen in https://github.com/kubernetes-sigs/lws/issues/281
// If a leader sts already exists, but the template has not been updated, the templateHash of the leader is used to keep consistency in cases where two
// different templateHashes are calculated from the same LWS object
func getLeaderWorkerTemplateHash(sts *appsv1.StatefulSet, lws *leaderworkerset.LeaderWorkerSet, leaderWorkerSetUpdated bool) string {

if sts != nil {
if !leaderWorkerSetUpdated {
return sts.Labels[leaderworkerset.TemplateRevisionHashKey]
}
}

return revisionutils.LeaderWorkerTemplateHash(lws)
}

// constructLeaderStatefulSetApplyConfiguration constructs the applied configuration for the leader StatefulSet
func constructLeaderStatefulSetApplyConfiguration(lws *leaderworkerset.LeaderWorkerSet, partition, replicas int32, templateHash string) (*appsapplyv1.StatefulSetApplyConfiguration, error) {
var podTemplateSpec corev1.PodTemplateSpec
Expand Down Expand Up @@ -721,31 +763,3 @@ func exclusiveConditionTypes(condition1 metav1.Condition, condition2 metav1.Cond

return false
}

func templateUpdated(ctx context.Context, k8sClient client.Client, sts *appsv1.StatefulSet, lws *leaderworkerset.LeaderWorkerSet) (bool, error) {
log := ctrl.LoggerFrom(ctx).WithValues("leaderworkerset", klog.KObj(lws))
ctx = ctrl.LoggerInto(ctx, log)
controllerRevision, err := controllerutils.GetLeaderWorkerSetRevisionFromTemplateHash(ctx, k8sClient, lws, sts.Labels[leaderworkerset.TemplateRevisionHashKey])
if err != nil {
return false, err
}

baselineLws, err := controllerutils.ApplyRevision(lws, controllerRevision)
if err != nil {
return false, err
}
return !utils.EqualLeaderWorkerTemplates(baselineLws, lws), nil
}

func stsCreated(ctx context.Context, k8sClient client.Client, lws *leaderworkerset.LeaderWorkerSet) (bool, *appsv1.StatefulSet, error) {
sts := &appsv1.StatefulSet{}
err := k8sClient.Get(ctx, types.NamespacedName{Name: lws.Name, Namespace: lws.Namespace}, sts)
if err != nil {
if apierrors.IsNotFound(err) {
return false, nil, nil
}
return false, nil, err
}

return true, sts, nil
}
25 changes: 16 additions & 9 deletions pkg/controllers/leaderworkerset_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,24 +30,27 @@ import (
"k8s.io/utils/ptr"

leaderworkerset "sigs.k8s.io/lws/api/leaderworkerset/v1"
"sigs.k8s.io/lws/pkg/utils"

revisionutils "sigs.k8s.io/lws/pkg/utils/revision"
testutils "sigs.k8s.io/lws/test/testutils"
)

func TestLeaderStatefulSetApplyConfig(t *testing.T) {
hash1 := utils.LeaderWorkerTemplateHash(testutils.BuildBasicLeaderWorkerSet("test-sample", "default").
hash1 := revisionutils.LeaderWorkerTemplateHash(testutils.BuildBasicLeaderWorkerSet("test-sample", "default").
LeaderTemplateSpec(testutils.MakeLeaderPodSpec()).
WorkerTemplateSpec(testutils.MakeWorkerPodSpec()).Obj())
hash2 := utils.LeaderWorkerTemplateHash(testutils.BuildBasicLeaderWorkerSet("test-sample", "default").
hash2 := revisionutils.LeaderWorkerTemplateHash(testutils.BuildBasicLeaderWorkerSet("test-sample", "default").
WorkerTemplateSpec(testutils.MakeWorkerPodSpec()).Obj())

tests := []struct {
name string
templateHash string
lws *leaderworkerset.LeaderWorkerSet
wantApplyConfig *appsapplyv1.StatefulSetApplyConfiguration
}{
{
name: "1 replica, size 1, with empty leader template, exclusive placement disabled",
name: "1 replica, size 1, with empty leader template, exclusive placement disabled",
templateHash: hash2,
lws: testutils.BuildBasicLeaderWorkerSet("test-sample", "default").
Replica(1).
RolloutStrategy(leaderworkerset.RolloutStrategy{
Expand Down Expand Up @@ -112,7 +115,8 @@ func TestLeaderStatefulSetApplyConfig(t *testing.T) {
},
},
{
name: "1 replica, size 2 , with empty leader template, exclusive placement enabled",
name: "1 replica, size 2 , with empty leader template, exclusive placement enabled",
templateHash: hash2,
lws: testutils.BuildBasicLeaderWorkerSet("test-sample", "default").
Annotation(map[string]string{
"leaderworkerset.sigs.k8s.io/exclusive-topology": "topologyKey",
Expand Down Expand Up @@ -180,7 +184,8 @@ func TestLeaderStatefulSetApplyConfig(t *testing.T) {
},
},
{
name: "2 replica, size 2, with leader template, exclusive placement enabled",
name: "2 replica, size 2, with leader template, exclusive placement enabled",
templateHash: hash1,
lws: testutils.BuildBasicLeaderWorkerSet("test-sample", "default").Annotation(map[string]string{
"leaderworkerset.sigs.k8s.io/exclusive-topology": "topologyKey",
}).Replica(2).
Expand Down Expand Up @@ -247,7 +252,8 @@ func TestLeaderStatefulSetApplyConfig(t *testing.T) {
},
},
{
name: "2 maxUnavailable, 1 maxSurge, with empty leader template, exclusive placement disabled",
name: "2 maxUnavailable, 1 maxSurge, with empty leader template, exclusive placement disabled",
templateHash: hash2,
lws: testutils.BuildBasicLeaderWorkerSet("test-sample", "default").
Replica(1).
RolloutStrategy(leaderworkerset.RolloutStrategy{
Expand Down Expand Up @@ -313,7 +319,8 @@ func TestLeaderStatefulSetApplyConfig(t *testing.T) {
},
},
{
name: "1 replica, size 2, with leader template, exclusive placement enabled, subgroupsize enabled",
name: "1 replica, size 2, with leader template, exclusive placement enabled, subgroupsize enabled",
templateHash: hash1,
lws: testutils.BuildBasicLeaderWorkerSet("test-sample", "default").Annotation(map[string]string{
leaderworkerset.SubGroupExclusiveKeyAnnotationKey: "topologyKey",
}).SubGroupSize(2).Replica(1).
Expand Down Expand Up @@ -383,7 +390,7 @@ func TestLeaderStatefulSetApplyConfig(t *testing.T) {
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
stsApplyConfig, err := constructLeaderStatefulSetApplyConfiguration(tc.lws, 0, *tc.lws.Spec.Replicas, "")
stsApplyConfig, err := constructLeaderStatefulSetApplyConfiguration(tc.lws, 0, *tc.lws.Spec.Replicas, tc.templateHash)
if err != nil {
t.Errorf("failed with error: %s", err.Error())
}
Expand Down
5 changes: 3 additions & 2 deletions pkg/controllers/pod_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ import (
acceleratorutils "sigs.k8s.io/lws/pkg/utils/accelerators"
controllerutils "sigs.k8s.io/lws/pkg/utils/controller"
podutils "sigs.k8s.io/lws/pkg/utils/pod"
revisionutils "sigs.k8s.io/lws/pkg/utils/revision"
statefulsetutils "sigs.k8s.io/lws/pkg/utils/statefulset"
)

Expand Down Expand Up @@ -118,7 +119,7 @@ func (r *PodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.R
log.V(2).Info("defer the creation of the worker statefulset because leader pod is not ready.")
return ctrl.Result{}, nil
}
currentRevision, err := controllerutils.GetLeaderWorkerSetRevisionFromTemplateHash(ctx, r.Client, &leaderWorkerSet, pod.Labels[leaderworkerset.TemplateRevisionHashKey])
currentRevision, err := revisionutils.GetLeaderWorkerSetRevisionFromTemplateHash(ctx, r.Client, &leaderWorkerSet, pod.Labels[leaderworkerset.TemplateRevisionHashKey])
Edwinhr716 marked this conversation as resolved.
Show resolved Hide resolved
if err != nil {
log.Error(err, "Getting lws revisions")
return ctrl.Result{}, err
Expand Down Expand Up @@ -264,7 +265,7 @@ func setControllerReferenceWithStatefulSet(owner metav1.Object, sts *appsapplyv1

// constructWorkerStatefulSetApplyConfiguration constructs the applied configuration for the leader StatefulSet
func constructWorkerStatefulSetApplyConfiguration(leaderPod corev1.Pod, lws leaderworkerset.LeaderWorkerSet, currentRevision *appsv1.ControllerRevision) (*appsapplyv1.StatefulSetApplyConfiguration, error) {
currentLws, err := controllerutils.ApplyRevision(&lws, currentRevision)
currentLws, err := revisionutils.ApplyRevision(&lws, currentRevision)
if err != nil {
return nil, err
}
Expand Down
Loading