|
| 1 | +--- |
| 2 | +title: "Implementing Kubernetes ConfigMap Revisions with Pulumi and Argo Rollouts" |
| 3 | +h1: "How we implemented ConfigMap revisions in Kubernetes using Pulumi and Argo Rollouts" |
| 4 | +date: "2026-01-13" |
| 5 | +meta_desc: "Learn how to implement ConfigMap revisions in Kubernetes using Pulumi's ConfigMapPatch and owner references with ReplicaSets to leverage Kubernetes garbage collection." |
| 6 | +meta_image: meta.png |
| 7 | +authors: ["matan-baruch"] |
| 8 | +tags: ["kubernetes", "pulumi", "configmap", "argo-rollouts", "canary-deployment"] |
| 9 | +--- |
| 10 | + |
| 11 | +ConfigMaps in Kubernetes don't have built-in revision support, |
| 12 | +which can create challenges when deploying applications with canary strategies. |
| 13 | +When using Argo Rollouts with AWS Spot instances, |
| 14 | +ConfigMap deletions during canary deployments can cause older pods to fail when they try to reload configuration. |
| 15 | +We solved this by implementing a custom ConfigMap revision system |
| 16 | +using Pulumi's ConfigMapPatch and Kubernetes owner references. |
| 17 | + |
| 18 | +<!--more--> |
| 19 | + |
| 20 | +## The Problem |
| 21 | + |
| 22 | +When deploying applications to Kubernetes using canary strategies with Argo Rollouts, |
| 23 | +we encountered a specific challenge: |
| 24 | + |
| 25 | +1. **Pulumi ConfigMap replacement behavior**: By default, when a ConfigMap’s data changes, Pulumi may replace it rather than update it in place, which for auto-named ConfigMaps results in a new generated name (suffix). |
| 26 | +2. **Canary deployment issues**: During canary deployments, |
| 27 | + the old ConfigMap gets deleted, |
| 28 | + but older pods (especially on AWS Spot instances that can be replaced during canary) may fail to reload |
| 29 | +3. **No native revision support**: Neither Kubernetes nor Pulumi natively supports ConfigMap revisions like they do for deployments |
| 30 | + |
| 31 | +## The solution: ConfigMap revisions with owner references |
| 32 | + |
| 33 | +Our solution leverages Kubernetes' garbage collection mechanism |
| 34 | +by using owner references to tie ConfigMaps to ReplicaSets created during canary deployments. |
| 35 | + |
| 36 | +### Key components |
| 37 | + |
| 38 | +1. **Pulumi's ConfigMapPatch**: Patches existing ConfigMaps with owner references |
| 39 | +2. **ReplicaSet Owner References**: Links ConfigMaps to ReplicaSets for automatic cleanup |
| 40 | +3. **Kubernetes Garbage Collection**: Automatically cleans up ConfigMaps when ReplicaSets are deleted |
| 41 | +4. **Retain on Delete**: Protects ConfigMaps from immediate deletion during Pulumi updates |
| 42 | + |
| 43 | +### Implementation |
| 44 | + |
| 45 | +Here's how we implemented this solution in our rollout component: |
| 46 | + |
| 47 | +```typescript |
| 48 | +import * as pulumi from "@pulumi/pulumi"; |
| 49 | +import * as k8s from "@pulumi/kubernetes"; |
| 50 | +import * as k8sClient from "@kubernetes/client-node"; |
| 51 | + |
| 52 | +interface RolloutComponentArgs { |
| 53 | + namespace: string; |
| 54 | + configMapPatch?: boolean; |
| 55 | + kubeconfig: pulumi.Output<any>; |
| 56 | + configMapName: pulumi.Output<string>; |
| 57 | + rolloutSpec: k8s.types.input.apiextensions.CustomResourceArgs["spec"]; |
| 58 | +} |
| 59 | + |
| 60 | +export class ConfigMapRevisionRollout extends pulumi.ComponentResource { |
| 61 | + public readonly rollout: k8s.apiextensions.CustomResource; |
| 62 | + |
| 63 | + constructor( |
| 64 | + name: string, |
| 65 | + args: RolloutComponentArgs, |
| 66 | + opts?: pulumi.ComponentResourceOptions |
| 67 | + ) { |
| 68 | + super("pulumi:component:ConfigMapRevisionRollout", name, {}, opts); |
| 69 | + |
| 70 | + // Create the Argo Rollout using CustomResource |
| 71 | + this.rollout = new k8s.apiextensions.CustomResource( |
| 72 | + `${name}-rollout`, |
| 73 | + { |
| 74 | + apiVersion: "argoproj.io/v1alpha1", |
| 75 | + kind: "Rollout", |
| 76 | + metadata: { |
| 77 | + name: name, |
| 78 | + namespace: args.namespace, |
| 79 | + }, |
| 80 | + spec: args.rolloutSpec, |
| 81 | + }, |
| 82 | + { parent: this, ...opts } |
| 83 | + ); |
| 84 | + |
| 85 | + // Apply ConfigMap revision patching if enabled |
| 86 | + if (args.configMapPatch) { |
| 87 | + this.setupConfigMapRevisions(name, args); |
| 88 | + } |
| 89 | + |
| 90 | + this.registerOutputs({ |
| 91 | + rollout: this.rollout, |
| 92 | + }); |
| 93 | + } |
| 94 | + |
| 95 | + private setupConfigMapRevisions(name: string, args: RolloutComponentArgs): void { |
| 96 | + pulumi |
| 97 | + .all([args.kubeconfig, args.configMapName]) |
| 98 | + .apply(async ([kubeconfig, configMapName]) => { |
| 99 | + try { |
| 100 | + // Create Server-Side Apply enabled provider |
| 101 | + const ssaProvider = new k8s.Provider(`${name}-ssa-provider`, { |
| 102 | + kubeconfig: JSON.stringify(kubeconfig), |
| 103 | + enableServerSideApply: true, |
| 104 | + }); |
| 105 | + |
| 106 | + // Wait for rollout to stabilize and create ReplicaSets |
| 107 | + await this.waitForRolloutStabilization(); |
| 108 | + |
| 109 | + // Get ReplicaSets associated with this rollout |
| 110 | + const replicaSets = await this.getAssociatedReplicaSets( |
| 111 | + args.namespace, |
| 112 | + configMapName, |
| 113 | + kubeconfig |
| 114 | + ); |
| 115 | + |
| 116 | + if (replicaSets.length === 0) { |
| 117 | + pulumi.log.warn("No ReplicaSets found for ConfigMap patching"); |
| 118 | + return; |
| 119 | + } |
| 120 | + |
| 121 | + // Create owner references for the ConfigMap |
| 122 | + const ownerReferences = replicaSets.map(rs => ({ |
| 123 | + apiVersion: "apps/v1", |
| 124 | + kind: "ReplicaSet", |
| 125 | + name: rs.metadata?.name!, |
| 126 | + uid: rs.metadata?.uid!, |
| 127 | + controller: false, |
| 128 | + blockOwnerDeletion: false, |
| 129 | + })); |
| 130 | + |
| 131 | + // Patch the ConfigMap with owner references |
| 132 | + new k8s.core.v1.ConfigMapPatch( |
| 133 | + `${configMapName}-revision-patch`, |
| 134 | + { |
| 135 | + metadata: { |
| 136 | + name: configMapName, |
| 137 | + namespace: args.namespace, |
| 138 | + ownerReferences: ownerReferences, |
| 139 | + annotations: { |
| 140 | + "pulumi.com/patchForce": "true", |
| 141 | + "configmap.kubernetes.io/revision-managed": "true", |
| 142 | + }, |
| 143 | + }, |
| 144 | + }, |
| 145 | + { |
| 146 | + provider: ssaProvider, |
| 147 | + retainOnDelete: true, |
| 148 | + parent: this, |
| 149 | + } |
| 150 | + ); |
| 151 | + |
| 152 | + pulumi.log.info( |
| 153 | + `Successfully patched ConfigMap ${configMapName} with ${ownerReferences.length} owner references` |
| 154 | + ); |
| 155 | + } catch (error) { |
| 156 | + pulumi.log.error(`Failed to setup ConfigMap revisions: ${error}`); |
| 157 | + throw error; |
| 158 | + } |
| 159 | + }); |
| 160 | + } |
| 161 | + |
| 162 | + private async waitForRolloutStabilization(): Promise<void> { |
| 163 | + // Wait for rollout to create and stabilize ReplicaSets |
| 164 | + // In production, consider using a more sophisticated polling mechanism |
| 165 | + await new Promise(resolve => setTimeout(resolve, 10000)); |
| 166 | + } |
| 167 | + |
| 168 | + private async getAssociatedReplicaSets( |
| 169 | + namespace: string, |
| 170 | + configMapName: string, |
| 171 | + kubeconfig: any |
| 172 | + ): Promise<k8sClient.V1ReplicaSet[]> { |
| 173 | + const kc = new k8sClient.KubeConfig(); |
| 174 | + kc.loadFromString(JSON.stringify(kubeconfig)); |
| 175 | + |
| 176 | + const appsV1Api = kc.makeApiClient(k8sClient.AppsV1Api); |
| 177 | + |
| 178 | + try { |
| 179 | + const response = await appsV1Api.listNamespacedReplicaSet( |
| 180 | + namespace, |
| 181 | + undefined, // pretty |
| 182 | + false, // allowWatchBookmarks |
| 183 | + undefined, // continue |
| 184 | + undefined, // fieldSelector |
| 185 | + `configMap=${configMapName}` // labelSelector |
| 186 | + ); |
| 187 | + |
| 188 | + return response.body.items; |
| 189 | + } catch (error) { |
| 190 | + pulumi.log.error(`Failed to list ReplicaSets: ${error}`); |
| 191 | + return []; |
| 192 | + } |
| 193 | + } |
| 194 | +} |
| 195 | +``` |
| 196 | + |
| 197 | +## How it works |
| 198 | + |
| 199 | +1. **Rollout Creation**: When a new rollout is created, Argo Rollouts generates new ReplicaSets for the canary deployment |
| 200 | +2. **ConfigMap Patching**: Our code waits for the ReplicaSet creation, then patches the ConfigMap with owner references pointing to these ReplicaSets |
| 201 | +3. **Garbage Collection**: Kubernetes automatically tracks the relationship between ConfigMaps and ReplicaSets |
| 202 | +4. **Automatic Cleanup**: When ReplicaSets are cleaned up (based on the default 10 revision history), their associated ConfigMaps are also garbage collected |
| 203 | + |
| 204 | +## Benefits |
| 205 | + |
| 206 | +- **Revision Control**: ConfigMaps now have revision-like behavior tied to ReplicaSet history |
| 207 | +- **Automatic Cleanup**: No manual intervention needed for ConfigMap cleanup |
| 208 | +- **Canary Safety**: Old ConfigMaps remain available during canary deployments until ReplicaSets are cleaned up |
| 209 | +- **Spot Instance Resilience**: Pods that get replaced during canary deployments can still access their original ConfigMaps |
| 210 | + |
| 211 | +## Configuration options |
| 212 | + |
| 213 | +```typescript |
| 214 | +interface RolloutComponentArgs { |
| 215 | + namespace: string; |
| 216 | + configMapPatch?: boolean; |
| 217 | + kubeconfig: pulumi.Output<any>; |
| 218 | + configMapName: pulumi.Output<string>; |
| 219 | + rolloutSpec: k8s.types.input.apiextensions.CustomResourceArgs["spec"]; |
| 220 | +} |
| 221 | +``` |
| 222 | + |
| 223 | +To enable this feature in your rollout: |
| 224 | + |
| 225 | +```typescript |
| 226 | +import * as pulumi from "@pulumi/pulumi"; |
| 227 | +import * as k8s from "@pulumi/kubernetes"; |
| 228 | + |
| 229 | +// Create your EKS cluster |
| 230 | +const cluster = new k8s.Provider("k8s-provider", { |
| 231 | + kubeconfig: clusterKubeconfig, |
| 232 | +}); |
| 233 | + |
| 234 | +// Create ConfigMap |
| 235 | +const appConfig = new k8s.core.v1.ConfigMap("app-config", { |
| 236 | + metadata: { |
| 237 | + name: "my-app-config", |
| 238 | + namespace: "default", |
| 239 | + labels: { |
| 240 | + app: "my-app", |
| 241 | + configMap: "my-app-config", // Important for ReplicaSet selection |
| 242 | + }, |
| 243 | + }, |
| 244 | + data: { |
| 245 | + "app.properties": "key=value\nother=setting", |
| 246 | + }, |
| 247 | +}, { provider: cluster }); |
| 248 | + |
| 249 | +// Create rollout with ConfigMap revision management |
| 250 | +const rollout = new ConfigMapRevisionRollout("my-app", { |
| 251 | + namespace: "default", |
| 252 | + configMapPatch: true, |
| 253 | + kubeconfig: clusterKubeconfig, |
| 254 | + configMapName: appConfig.metadata.name, |
| 255 | + rolloutSpec: { |
| 256 | + replicas: 3, |
| 257 | + selector: { |
| 258 | + matchLabels: { app: "my-app" }, |
| 259 | + }, |
| 260 | + template: { |
| 261 | + metadata: { |
| 262 | + labels: { app: "my-app" }, |
| 263 | + }, |
| 264 | + spec: { |
| 265 | + containers: [{ |
| 266 | + name: "app", |
| 267 | + image: "nginx:latest", |
| 268 | + volumeMounts: [{ |
| 269 | + name: "config", |
| 270 | + mountPath: "/etc/config", |
| 271 | + }], |
| 272 | + }], |
| 273 | + volumes: [{ |
| 274 | + name: "config", |
| 275 | + configMap: { |
| 276 | + name: appConfig.metadata.name, |
| 277 | + }, |
| 278 | + }], |
| 279 | + }, |
| 280 | + }, |
| 281 | + strategy: { |
| 282 | + canary: { |
| 283 | + maxSurge: 1, |
| 284 | + maxUnavailable: 0, |
| 285 | + steps: [ |
| 286 | + { setWeight: 20 }, |
| 287 | + { pause: { duration: "1m" } }, |
| 288 | + { setWeight: 50 }, |
| 289 | + { pause: { duration: "2m" } }, |
| 290 | + ], |
| 291 | + }, |
| 292 | + }, |
| 293 | + }, |
| 294 | +}); |
| 295 | +``` |
| 296 | + |
| 297 | +## Key dependencies |
| 298 | + |
| 299 | +The solution uses several key packages: |
| 300 | + |
| 301 | +- `@pulumi/kubernetes`: For Kubernetes resources and ConfigMapPatch |
| 302 | +- `@kubernetes/client-node`: For direct Kubernetes API access |
| 303 | +- Argo Rollouts CRDs installed in your cluster |
| 304 | + |
| 305 | +## Conclusion |
| 306 | + |
| 307 | +This approach gives us ConfigMap revision functionality that doesn't exist natively in Kubernetes or Pulumi. |
| 308 | +By leveraging Kubernetes' garbage collection mechanism and Pulumi's patching capabilities, |
| 309 | +we created a robust solution for managing ConfigMap lifecycles during canary deployments. |
| 310 | + |
| 311 | +The solution is particularly valuable when: |
| 312 | + |
| 313 | +- Running canary deployments with Argo Rollouts |
| 314 | +- Using AWS Spot instances that can be replaced during deployments |
| 315 | +- Needing automatic cleanup of old ConfigMaps without manual intervention |
| 316 | +- Wanting to maintain configuration availability for older pods during deployment transitions |
| 317 | + |
| 318 | +This pattern can be extended to other scenarios |
| 319 | +where you need revision control for Kubernetes resources that don't natively support it. |
0 commit comments