Skip to content

Commit 6acb114

Browse files
camrynlCopilot
andauthored
chore: updating cilium nightly charts (#3676)
* chore: update nightly manifests from v1.14 > match v1.17 (latest available) * fix: update clusterrole with cilium-config * chore: update hubble dir path * log file directory * Update test/integration/manifests/cilium/cilium-nightly-config.yaml Co-authored-by: Copilot <[email protected]> Signed-off-by: Camryn Lee <[email protected]> * fix: cluster naming in log collection --------- Signed-off-by: Camryn Lee <[email protected]> Co-authored-by: Copilot <[email protected]>
1 parent 196fce9 commit 6acb114

File tree

11 files changed

+208
-52
lines changed

11 files changed

+208
-52
lines changed

.pipelines/cni/cilium/nightly-release-test.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -159,7 +159,7 @@ stages:
159159
scriptType: "bash"
160160
addSpnToEnvironment: true
161161
inlineScript: |
162-
make -C ./hack/aks set-kubeconf AZCLI=az CLUSTER=$(clusterName)-$(commitID)
162+
make -C ./hack/aks set-kubeconf AZCLI=az CLUSTER=ciliumnightly-$(commitID)
163163
164164
set -e
165165
echo "Run Cilium Connectivity Tests"
@@ -171,7 +171,7 @@ stages:
171171

172172
- template: ../../templates/log-check-template.yaml # Operator Check
173173
parameters:
174-
clusterName: $(clusterName)-$(commitID)
174+
clusterName: ciliumnightly-$(commitID)
175175
podLabel: "name=cilium-operator"
176176
logGrep: "level=error"
177177

.pipelines/singletenancy/cilium-overlay-withhubble/cilium-overlay-e2e-step-template.yaml

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -124,9 +124,10 @@ steps:
124124
125125
- ${{ if eq( parameters['testHubble'], true) }}:
126126
- script: |
127-
echo "enable Hubble metrics server"
127+
export CILIUM_VERSION_TAG=${CILIUM_HUBBLE_VERSION_TAG}
128+
export DIR=$(echo ${CILIUM_VERSION_TAG#v} | cut -d. -f1,2)
128129
kubectl apply -f test/integration/manifests/cilium/hubble/hubble-peer-svc.yaml
129-
kubectl apply -f test/integration/manifests/cilium/v1.14.4/cilium-config/cilium-config-hubble.yaml
130+
kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-config/cilium-config-hubble.yaml
130131
kubectl rollout restart ds cilium -n kube-system
131132
echo "wait <3 minutes for pods to be ready after restart"
132133
kubectl rollout status ds cilium -n kube-system --timeout=3m

.pipelines/singletenancy/cilium-overlay-withhubble/cilium-overlay-e2e.steps.yaml

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -123,8 +123,10 @@ steps:
123123
- ${{ if eq( parameters['testHubble'], true) }}:
124124
- script: |
125125
echo "enable Hubble metrics server"
126+
export CILIUM_VERSION_TAG=${CILIUM_HUBBLE_VERSION_TAG}
127+
export DIR=$(echo ${CILIUM_VERSION_TAG#v} | cut -d. -f1,2)
126128
kubectl apply -f test/integration/manifests/cilium/hubble/hubble-peer-svc.yaml
127-
kubectl apply -f test/integration/manifests/cilium/v1.14.4/cilium-config/cilium-config-hubble.yaml
129+
kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-config/cilium-config-hubble.yaml
128130
kubectl rollout restart ds cilium -n kube-system
129131
echo "wait <3 minutes for pods to be ready after restart"
130132
kubectl rollout status ds cilium -n kube-system --timeout=3m

.pipelines/singletenancy/cilium-overlay/cilium-overlay-e2e-step-template.yaml

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -158,8 +158,11 @@ steps:
158158
- ${{ if eq( parameters['testHubble'], true) }}:
159159
- script: |
160160
echo "enable Hubble metrics server"
161+
export CILIUM_VERSION_TAG=${CILIUM_HUBBLE_VERSION_TAG}
162+
export DIR=$(echo ${CILIUM_VERSION_TAG#v} | cut -d. -f1,2)
163+
echo "installing files from ${DIR}"
161164
kubectl apply -f test/integration/manifests/cilium/hubble/hubble-peer-svc.yaml
162-
kubectl apply -f test/integration/manifests/cilium/cilium-config-hubble.yaml
165+
kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-config/cilium-config-hubble.yaml
163166
kubectl rollout restart ds cilium -n kube-system
164167
echo "wait <3 minutes for pods to be ready after restart"
165168
kubectl rollout status ds cilium -n kube-system --timeout=3m

.pipelines/singletenancy/cilium-overlay/cilium-overlay-e2e.steps.yaml

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -156,8 +156,10 @@ steps:
156156
- ${{ if eq( parameters['testHubble'], true) }}:
157157
- script: |
158158
echo "enable Hubble metrics server"
159+
export CILIUM_VERSION_TAG=${CILIUM_HUBBLE_VERSION_TAG}
160+
export DIR=$(echo ${CILIUM_VERSION_TAG#v} | cut -d. -f1,2)
159161
kubectl apply -f test/integration/manifests/cilium/hubble/hubble-peer-svc.yaml
160-
kubectl apply -f test/integration/manifests/cilium/cilium-config-hubble.yaml
162+
kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-config/cilium-config-hubble.yaml
161163
kubectl rollout restart ds cilium -n kube-system
162164
echo "wait <3 minutes for pods to be ready after restart"
163165
kubectl rollout status ds cilium -n kube-system --timeout=3m

test/integration/manifests/cilium/cilium-nightly-agent/clusterrole.yaml

Lines changed: 20 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,8 @@ apiVersion: rbac.authorization.k8s.io/v1
22
kind: ClusterRole
33
metadata:
44
name: cilium
5+
labels:
6+
app.kubernetes.io/part-of: cilium
57
rules:
68
- apiGroups:
79
- networking.k8s.io
@@ -45,8 +47,6 @@ rules:
4547
- apiGroups:
4648
- cilium.io
4749
resources:
48-
#Naming changed from ciliumbgploadbalancerippools
49-
- ciliumloadbalancerippools
5050
- ciliumbgppeeringpolicies
5151
- ciliumclusterwideenvoyconfigs
5252
- ciliumclusterwidenetworkpolicies
@@ -59,8 +59,13 @@ rules:
5959
- ciliumnetworkpolicies
6060
- ciliumnodes
6161
- ciliumnodeconfigs
62-
#Added in 1.14.0 snapshot 2
62+
- ciliumloadbalancerippools
6363
- ciliumcidrgroups
64+
- ciliuml2announcementpolicies
65+
- ciliumpodippools
66+
- ciliumbgpnodeconfigs
67+
- ciliumbgpadvertisements
68+
- ciliumbgppeerconfigs
6469
verbs:
6570
- list
6671
- watch
@@ -74,6 +79,7 @@ rules:
7479
- create
7580
- apiGroups:
7681
- cilium.io
82+
# To synchronize garbage collection of such resources
7783
resources:
7884
- ciliumidentities
7985
verbs:
@@ -100,5 +106,16 @@ rules:
100106
- ciliumclusterwidenetworkpolicies/status
101107
- ciliumendpoints/status
102108
- ciliumendpoints
109+
- ciliuml2announcementpolicies/status
110+
- ciliumbgpnodeconfigs/status
103111
verbs:
104112
- patch
113+
- apiGroups:
114+
- ""
115+
resourceNames:
116+
- cilium-config
117+
resources:
118+
- configmaps
119+
verbs:
120+
- list
121+
- watch

test/integration/manifests/cilium/cilium-nightly-agent/clusterrolebinding.yaml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,8 @@ apiVersion: rbac.authorization.k8s.io/v1
22
kind: ClusterRoleBinding
33
metadata:
44
name: cilium
5+
labels:
6+
app.kubernetes.io/part-of: cilium
57
roleRef:
68
apiGroup: rbac.authorization.k8s.io
79
kind: ClusterRole

test/integration/manifests/cilium/cilium-nightly-config.yaml

Lines changed: 58 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
apiVersion: v1
1+
apiVersion: v1 #Not verified, placeholder
22
data:
33
agent-not-ready-taint-key: node.cilium.io/agent-not-ready
44
arping-refresh-period: 30s
@@ -9,7 +9,6 @@ data:
99
bpf-map-dynamic-size-ratio: "0.0025"
1010
bpf-policy-map-max: "16384"
1111
bpf-root: /sys/fs/bpf
12-
ces-slice-mode: fcfs
1312
cgroup-root: /run/cilium/cgroupv2
1413
cilium-endpoint-gc-interval: 5m0s
1514
cluster-id: "0"
@@ -20,7 +19,6 @@ data:
2019
enable-auto-protect-node-port-range: "true"
2120
enable-bgp-control-plane: "false"
2221
enable-bpf-clock-probe: "true"
23-
enable-cilium-endpoint-slice: "true"
2422
enable-endpoint-health-checking: "false"
2523
enable-endpoint-routes: "true"
2624
enable-health-check-nodeport: "true"
@@ -35,7 +33,7 @@ data:
3533
enable-l2-neigh-discovery: "true"
3634
enable-l7-proxy: "false"
3735
enable-local-node-route: "false"
38-
enable-local-redirect-policy: "true"
36+
enable-local-redirect-policy: "true" # set to true for lrp test
3937
enable-metrics: "true"
4038
enable-policy: default
4139
enable-session-affinity: "true"
@@ -48,7 +46,7 @@ data:
4846
install-no-conntrack-iptables-rules: "false"
4947
ipam: delegated-plugin
5048
kube-proxy-replacement: "true"
51-
kube-proxy-replacement-healthz-bind-address: ""
49+
kube-proxy-replacement-healthz-bind-address: "0.0.0.0:10256"
5250
local-router-ipv4: 169.254.23.0
5351
metrics: +cilium_bpf_map_pressure
5452
monitor-aggregation: medium
@@ -63,21 +61,72 @@ data:
6361
prometheus-serve-addr: :9962
6462
remove-cilium-node-taints: "true"
6563
set-cilium-is-up-condition: "true"
64+
sidecar-istio-proxy-image: cilium/istio_proxy
6665
synchronize-k8s-nodes: "true"
6766
tofqdns-dns-reject-response-code: refused
6867
tofqdns-enable-dns-compression: "true"
69-
tofqdns-endpoint-max-ip-per-hostname: "50"
68+
tofqdns-endpoint-max-ip-per-hostname: "1000"
7069
tofqdns-idle-connection-grace-period: 0s
7170
tofqdns-max-deferred-connection-deletes: "10000"
72-
tofqdns-min-ttl: "3600"
71+
tofqdns-min-ttl: "0"
7372
tofqdns-proxy-response-max-delay: 100ms
74-
#Replaces tunnel: disabled in v1.15
75-
routing-mode: "native"
73+
routing-mode: native
7674
unmanaged-pod-watcher-interval: "15"
7775
vtep-cidr: ""
7876
vtep-endpoint: ""
7977
vtep-mac: ""
8078
vtep-mask: ""
79+
enable-sctp: "false"
80+
external-envoy-proxy: "false"
81+
k8s-client-qps: "10"
82+
k8s-client-burst: "20"
83+
mesh-auth-enabled: "true"
84+
mesh-auth-queue-size: "1024"
85+
mesh-auth-rotated-identities-queue-size: "1024"
86+
mesh-auth-gc-interval: "5m0s"
87+
proxy-connect-timeout: "2"
88+
proxy-max-requests-per-connection: "0"
89+
proxy-max-connection-duration-seconds: "0"
90+
set-cilium-node-taints: "true"
91+
## new values added for 1.16 below
92+
enable-ipv4-big-tcp: "false"
93+
enable-ipv6-big-tcp: "false"
94+
enable-masquerade-to-route-source: "false"
95+
enable-health-check-loadbalancer-ip: "false"
96+
bpf-lb-acceleration: "disabled"
97+
enable-k8s-networkpolicy: "true"
98+
cni-exclusive: "false" # Cilium takes ownership of /etc/cni/net.d, pods cannot be scheduled with any other cni if cilium is down
99+
cni-log-file: "/var/run/cilium/cilium-cni.log"
100+
ipam-cilium-node-update-rate: "15s"
101+
egress-gateway-reconciliation-trigger-interval: "1s"
102+
nat-map-stats-entries: "32"
103+
nat-map-stats-interval: "30s"
104+
bpf-events-drop-enabled: "true" # exposes drop events to cilium monitor/hubble
105+
bpf-events-policy-verdict-enabled: "true" # exposes policy verdict events to cilium monitor/hubble
106+
bpf-events-trace-enabled: "true" # exposes trace events to cilium monitor/hubble
107+
enable-tcx: "false" # attach endpoint programs with tcx if supported by kernel
108+
datapath-mode: "veth"
109+
direct-routing-skip-unreachable: "false"
110+
enable-runtime-device-detection: "false"
111+
bpf-lb-sock: "false"
112+
bpf-lb-sock-terminate-pod-connections: "false"
113+
nodeport-addresses: ""
114+
k8s-require-ipv4-pod-cidr: "false"
115+
k8s-require-ipv6-pod-cidr: "false"
116+
enable-node-selector-labels: "false"
117+
## new values for 1.17
118+
ces-slice-mode: "fcfs"
119+
enable-cilium-endpoint-slice: "true"
120+
bpf-lb-source-range-all-types: "false"
121+
bpf-algorithm-annotation: "false"
122+
bpf-lb-mode-annotation: "false"
123+
enable-experimental-lb: "false"
124+
enable-endpoint-lockdown-on-policy-overflow: "false"
125+
health-check-icmp-failure-threshold: "3"
126+
enable-internal-traffic-policy: "true"
127+
enable-lb-ipam: "true"
128+
enable-non-default-deny-policies: "true"
129+
enable-source-ip-verification: "true"
81130
kind: ConfigMap
82131
metadata:
83132
annotations:

test/integration/manifests/cilium/cilium-nightly-operator/clusterrole.yaml

Lines changed: 57 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,8 @@ apiVersion: rbac.authorization.k8s.io/v1
22
kind: ClusterRole
33
metadata:
44
name: cilium-operator
5+
labels:
6+
app.kubernetes.io/part-of: cilium
57
rules:
68
- apiGroups:
79
- ""
@@ -14,6 +16,15 @@ rules:
1416
# to automatically delete [core|kube]dns pods so that are starting to being
1517
# managed by Cilium
1618
- delete
19+
- apiGroups:
20+
- ""
21+
resources:
22+
- configmaps
23+
resourceNames:
24+
- cilium-config
25+
verbs:
26+
# allow patching of the configmap to set annotations
27+
- patch
1728
- apiGroups:
1829
- ""
1930
resources:
@@ -51,6 +62,7 @@ rules:
5162
resources:
5263
# to check apiserver connectivity
5364
- namespaces
65+
- secrets
5466
verbs:
5567
- get
5668
- list
@@ -87,6 +99,7 @@ rules:
8799
- ciliumclusterwidenetworkpolicies/status
88100
verbs:
89101
# Update the auto-generated CNPs and CCNPs status.
102+
- patch
90103
- update
91104
- apiGroups:
92105
- cilium.io
@@ -103,6 +116,7 @@ rules:
103116
resources:
104117
- ciliumidentities
105118
verbs:
119+
# To synchronize garbage collection of such resources
106120
- update
107121
- apiGroups:
108122
- cilium.io
@@ -127,6 +141,9 @@ rules:
127141
resources:
128142
- ciliumendpointslices
129143
- ciliumenvoyconfigs
144+
- ciliumbgppeerconfigs
145+
- ciliumbgpadvertisements
146+
- ciliumbgpnodeconfigs
130147
verbs:
131148
- create
132149
- update
@@ -135,6 +152,13 @@ rules:
135152
- watch
136153
- delete
137154
- patch
155+
- apiGroups:
156+
- cilium.io
157+
resources:
158+
- ciliumbgpclusterconfigs/status
159+
- ciliumbgppeerconfigs/status
160+
verbs:
161+
- update
138162
- apiGroups:
139163
- apiextensions.k8s.io
140164
resources:
@@ -153,10 +177,14 @@ rules:
153177
resourceNames:
154178
- ciliumloadbalancerippools.cilium.io
155179
- ciliumbgppeeringpolicies.cilium.io
180+
- ciliumbgpclusterconfigs.cilium.io
181+
- ciliumbgppeerconfigs.cilium.io
182+
- ciliumbgpadvertisements.cilium.io
183+
- ciliumbgpnodeconfigs.cilium.io
184+
- ciliumbgpnodeconfigoverrides.cilium.io
156185
- ciliumclusterwideenvoyconfigs.cilium.io
157186
- ciliumclusterwidenetworkpolicies.cilium.io
158187
- ciliumegressgatewaypolicies.cilium.io
159-
- ciliumegressnatpolicies.cilium.io
160188
- ciliumendpoints.cilium.io
161189
- ciliumendpointslices.cilium.io
162190
- ciliumenvoyconfigs.cilium.io
@@ -166,8 +194,34 @@ rules:
166194
- ciliumnetworkpolicies.cilium.io
167195
- ciliumnodes.cilium.io
168196
- ciliumnodeconfigs.cilium.io
169-
#Added in 1.14.0 snapshot 2
170197
- ciliumcidrgroups.cilium.io
198+
- ciliuml2announcementpolicies.cilium.io
199+
- ciliumpodippools.cilium.io
200+
- apiGroups:
201+
- cilium.io
202+
resources:
203+
- ciliumloadbalancerippools
204+
- ciliumpodippools
205+
- ciliumbgppeeringpolicies
206+
- ciliumbgpclusterconfigs
207+
- ciliumbgpnodeconfigoverrides
208+
- ciliumbgppeerconfigs
209+
verbs:
210+
- get
211+
- list
212+
- watch
213+
- apiGroups:
214+
- cilium.io
215+
resources:
216+
- ciliumpodippools
217+
verbs:
218+
- create
219+
- apiGroups:
220+
- cilium.io
221+
resources:
222+
- ciliumloadbalancerippools/status
223+
verbs:
224+
- patch
171225
# For cilium-operator running in HA mode.
172226
#
173227
# Cilium operator running in HA mode requires the use of ResourceLock for Leader Election
@@ -181,4 +235,4 @@ rules:
181235
verbs:
182236
- create
183237
- get
184-
- update
238+
- update

0 commit comments

Comments
 (0)