Skip to content

Commit

Permalink
Merge pull request #6857 from vpatelsj/release-1.30
Browse files Browse the repository at this point in the history
Fix: Failure to Enforce clustercidr on nodes with exisiting cidr should not be treated as a fatal error
  • Loading branch information
k8s-ci-robot authored Aug 26, 2024
2 parents 2d252cd + 2452996 commit 83dd255
Show file tree
Hide file tree
Showing 3 changed files with 48 additions and 17 deletions.
26 changes: 26 additions & 0 deletions azure-pipelines.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
# Starter pipeline
# Start with a minimal pipeline that you can customize to build and deploy your code.
# Add steps that build, run tests, deploy, and more:
# https://aka.ms/yaml

trigger:
- master

pool:
vmImage: ubuntu-latest

steps:
- task: AzureCLI@2
displayName: Build component binaries, containers and packages
inputs:
azureSubscription: aks deploy msi - dev
scriptType: bash
scriptLocation: inlineScript
inlineScript: |
set -euo pipefail
export DOCKER_BUILDKIT=1
az acr login --name acsdevdeployment
IMAGE_REGISTRY=acsdevdeployment.azurecr.io ARCH=amd64 OUTPUT_TYPE=docker make build-ccm-image
IMAGE_TAG=$(git rev-parse --short=7 HEAD)
docker tag acsdevdeployment.azurecr.io/azure-cloud-controller-manager:$IMAGE_TAG acsdevdeployment.azurecr.io/azure-cloud-controller-manager:v1.30.vapa1.$IMAGE_TAG
docker push acsdevdeployment.azurecr.io/azure-cloud-controller-manager:v1.30.vapa1.$IMAGE_TAG
7 changes: 6 additions & 1 deletion pkg/nodeipam/ipam/range_allocator.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ package ipam
import (
"fmt"
"net"
"strings"
"sync"

v1 "k8s.io/api/core/v1"
Expand Down Expand Up @@ -127,7 +128,11 @@ func NewCIDRRangeAllocator(client clientset.Interface, nodeInformer informers.No
// This will happen if:
// 1. We find garbage in the podCIDRs field. Retrying is useless.
// 2. CIDR out of range: This means a node CIDR has changed.
// This error will keep crashing controller-manager.
// This error should not keep crashing controller-manager as there may be a podcidr migration happening.
if strings.Contains(err.Error(), "is out the range of cluster cidr") {
klog.Warningf("Error occupying podCIDR %s on node %s: %v", node.Spec.PodCIDR, node.Name, err)
continue
}
return nil, err
}
}
Expand Down
32 changes: 16 additions & 16 deletions pkg/nodeipam/ipam/range_allocator_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -182,9 +182,8 @@ func TestOccupyPreExistingCIDR(t *testing.T) {
expectedAllocatedCIDR: nil,
ctrlCreateFail: false,
},
// failure cases
{
description: "fail, single stack incorrect node allocation",
description: "success, single stack incorrect node allocation",
fakeNodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
Expand All @@ -209,10 +208,10 @@ func TestOccupyPreExistingCIDR(t *testing.T) {
},
allocatedCIDRs: nil,
expectedAllocatedCIDR: nil,
ctrlCreateFail: true,
ctrlCreateFail: false,
},
{
description: "fail, dualstack node allocating from non existing cidr",
description: "success, dualstack node allocating bad v4",

fakeNodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
Expand All @@ -221,7 +220,7 @@ func TestOccupyPreExistingCIDR(t *testing.T) {
Name: "node0",
},
Spec: v1.NodeSpec{
PodCIDRs: []string{"10.10.0.1/24", "a00::/86"},
PodCIDRs: []string{"172.10.0.1/24", "a00::/86"},
},
},
},
Expand All @@ -230,18 +229,19 @@ func TestOccupyPreExistingCIDR(t *testing.T) {
allocatorParams: CIDRAllocatorParams{
ClusterCIDRs: func() []*net.IPNet {
_, clusterCIDRv4, _ := net.ParseCIDR("10.10.0.0/16")
return []*net.IPNet{clusterCIDRv4}
_, clusterCIDRv6, _ := net.ParseCIDR("ace:cab:deca::/8")
return []*net.IPNet{clusterCIDRv4, clusterCIDRv6}
}(),
ServiceCIDR: nil,
SecondaryServiceCIDR: nil,
NodeCIDRMaskSizes: []int{24},
NodeCIDRMaskSizes: []int{24, 24},
},
allocatedCIDRs: nil,
expectedAllocatedCIDR: nil,
ctrlCreateFail: true,
ctrlCreateFail: false,
},
{
description: "fail, dualstack node allocating bad v4",
description: "success, dualstack node allocating bad v6",

fakeNodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
Expand All @@ -250,7 +250,7 @@ func TestOccupyPreExistingCIDR(t *testing.T) {
Name: "node0",
},
Spec: v1.NodeSpec{
PodCIDRs: []string{"172.10.0.1/24", "a00::/86"},
PodCIDRs: []string{"10.10.0.1/24", "cdd::/86"},
},
},
},
Expand All @@ -268,10 +268,11 @@ func TestOccupyPreExistingCIDR(t *testing.T) {
},
allocatedCIDRs: nil,
expectedAllocatedCIDR: nil,
ctrlCreateFail: true,
ctrlCreateFail: false,
},
// failure cases
{
description: "fail, dualstack node allocating bad v6",
description: "fail, dualstack node allocating from non existing cidr",

fakeNodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
Expand All @@ -280,7 +281,7 @@ func TestOccupyPreExistingCIDR(t *testing.T) {
Name: "node0",
},
Spec: v1.NodeSpec{
PodCIDRs: []string{"10.10.0.1/24", "cdd::/86"},
PodCIDRs: []string{"10.10.0.1/24", "a00::/86"},
},
},
},
Expand All @@ -289,12 +290,11 @@ func TestOccupyPreExistingCIDR(t *testing.T) {
allocatorParams: CIDRAllocatorParams{
ClusterCIDRs: func() []*net.IPNet {
_, clusterCIDRv4, _ := net.ParseCIDR("10.10.0.0/16")
_, clusterCIDRv6, _ := net.ParseCIDR("ace:cab:deca::/8")
return []*net.IPNet{clusterCIDRv4, clusterCIDRv6}
return []*net.IPNet{clusterCIDRv4}
}(),
ServiceCIDR: nil,
SecondaryServiceCIDR: nil,
NodeCIDRMaskSizes: []int{24, 24},
NodeCIDRMaskSizes: []int{24},
},
allocatedCIDRs: nil,
expectedAllocatedCIDR: nil,
Expand Down

0 comments on commit 83dd255

Please sign in to comment.