Skip to content

Commit

Permalink
Merge branch 'kubernetes-sigs:master' into master
Browse files Browse the repository at this point in the history
  • Loading branch information
kelein authored Dec 17, 2024
2 parents 66e9f03 + 30f0a14 commit 6170851
Show file tree
Hide file tree
Showing 4 changed files with 74 additions and 95 deletions.
2 changes: 1 addition & 1 deletion .gitlab-ci/packet.yml
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ packet_almalinux8-crio:
packet_almalinux8-kube-ovn:
extends: .packet_pr

packet_debian11-calico:
packet_debian11-calico-collection:
extends: .packet_pr

packet_debian11-macvlan:
Expand Down
File renamed without changes.
72 changes: 40 additions & 32 deletions tests/scripts/testcases_run.sh
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,42 @@ export ANSIBLE_BECOME_USER=root
export ANSIBLE_INVENTORY=${CI_PROJECT_DIR}/inventory/sample/

make -C tests INVENTORY_DIR=${ANSIBLE_INVENTORY} create-${CI_PLATFORM} -s

# Test collection build and install by installing our collection, emptying our repository, adding
# cluster.yml, reset.yml, and remote-node.yml files that simply point to our collection's playbooks, and then
# running the same tests as before
if [[ "${CI_JOB_NAME}" =~ "collection" ]]; then
# Build and install collection
ansible-galaxy collection build
ansible-galaxy collection install kubernetes_sigs-kubespray-$(grep "^version:" galaxy.yml | awk '{print $2}').tar.gz

# Simply remove all of our files and directories except for our tests directory
# to be absolutely certain that none of our playbooks or roles
# are interfering with our collection
find -mindepth 1 -maxdepth 1 ! -regex './\(tests\|inventory\)' -exec rm -rfv {} +

cat > cluster.yml <<EOF
- name: Install Kubernetes
ansible.builtin.import_playbook: kubernetes_sigs.kubespray.cluster
EOF

cat > upgrade-cluster.yml <<EOF
- name: Install Kubernetes
ansible.builtin.import_playbook: kubernetes_sigs.kubespray.upgrade-cluster
EOF

cat > reset.yml <<EOF
- name: Remove Kubernetes
ansible.builtin.import_playbook: kubernetes_sigs.kubespray.reset
EOF

cat > remove-node.yml <<EOF
- name: Remove node from Kubernetes
ansible.builtin.import_playbook: kubernetes_sigs.kubespray.remove_node
EOF

fi

ansible-playbook tests/cloud_playbooks/wait-for-ssh.yml

run_playbook () {
Expand All @@ -43,6 +79,10 @@ ansible-playbook \
${playbook}
}



## START KUBESPRAY

# Create cluster
run_playbook cluster.yml

Expand All @@ -68,38 +108,6 @@ if [ "${RECOVER_CONTROL_PLANE_TEST}" != "false" ]; then
run_playbook recover-control-plane.yml -e etcd_retries=10 --limit "etcd:kube_control_plane"
fi

# Test collection build and install by installing our collection, emptying our repository, adding
# cluster.yml, reset.yml, and remote-node.yml files that simply point to our collection's playbooks, and then
# running the same tests as before
if [[ "${CI_JOB_NAME}" =~ "collection" ]]; then
# Build and install collection
ansible-galaxy collection build
ansible-galaxy collection install kubernetes_sigs-kubespray-$(grep "^version:" galaxy.yml | awk '{print $2}').tar.gz

# Simply remove all of our files and directories except for our tests directory
# to be absolutely certain that none of our playbooks or roles
# are interfering with our collection
find -maxdepth 1 ! -name tests -exec rm -rfv {} \;

# Write cluster.yml
cat > cluster.yml <<EOF
- name: Install Kubernetes
ansible.builtin.import_playbook: kubernetes_sigs.kubespray.cluster
EOF

# Write reset.yml
cat > reset.yml <<EOF
- name: Remove Kubernetes
ansible.builtin.import_playbook: kubernetes_sigs.kubespray.reset
EOF

# Write remove-node.yml
cat > remove-node.yml <<EOF
- name: Remove node from Kubernetes
ansible.builtin.import_playbook: kubernetes_sigs.kubespray.remove_node
EOF

fi
# Tests Cases
## Test Control Plane API
run_playbook tests/testcases/010_check-apiserver.yml
Expand Down
95 changes: 33 additions & 62 deletions tests/testcases/030_check-network.yml
Original file line number Diff line number Diff line change
Expand Up @@ -79,60 +79,47 @@
command:
cmd: "{{ bin_dir }}/kubectl apply -f -"
stdin: |
apiVersion: v1
kind: Pod
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ item }}
namespace: test
name: agnhost
spec:
containers:
- name: agnhost
image: {{ test_image_repo }}:{{ test_image_tag }}
command: ['/agnhost', 'netexec', '--http-port=8080']
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop: ['ALL']
runAsUser: 1000
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
replicas: 2
selector:
matchLabels:
app: agnhost
template:
metadata:
labels:
app: agnhost
spec:
containers:
- name: agnhost
image: {{ test_image_repo }}:{{ test_image_tag }}
command: ['/agnhost', 'netexec', '--http-port=8080']
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop: ['ALL']
runAsUser: 1000
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
changed_when: false
loop:
- agnhost1
- agnhost2

- import_role: # noqa name[missing]
name: cluster-dump

- name: Check that all pods are running and ready
command: "{{ bin_dir }}/kubectl get pods --namespace test --no-headers -o yaml"
changed_when: false
register: run_pods_log
until:
# Check that all pods are running
- '(run_pods_log.stdout | from_yaml)["items"] | map(attribute = "status.phase") | unique | list == ["Running"]'
# Check that all pods are ready
- '(run_pods_log.stdout | from_yaml)["items"] | map(attribute = "status.containerStatuses") | map("map", attribute = "ready") | map("min") | min'
retries: 18
delay: 10
failed_when: false

- name: Get pod names
command: "{{ bin_dir }}/kubectl get pods -n test -o json"
changed_when: false
register: pods

- debug: # noqa name[missing]
msg: "{{ pods.stdout.split('\n') }}"
failed_when: not run_pods_log is success

- name: Get hostnet pods
command: "{{ bin_dir }}/kubectl get pods -n test -o
jsonpath='{range .items[?(.spec.hostNetwork)]}{.metadata.name} {.status.podIP} {.status.containerStatuses} {end}'"
changed_when: false
register: hostnet_pods
ignore_errors: true # noqa ignore-errors
block:
- name: Check Deployment is ready
command: "{{ bin_dir }}/kubectl rollout status deploy --namespace test agnhost --timeout=180"
changed_when: false
rescue:
- name: Get pod names
command: "{{ bin_dir }}/kubectl get pods -n test -o json"
changed_when: false
register: pods

- name: Get running pods
command: "{{ bin_dir }}/kubectl get pods -n test -o
Expand All @@ -153,9 +140,6 @@
kube_pods_subnet: 10.233.64.0/18
pod_names: "{{ (pods.stdout | from_json)['items'] | map(attribute='metadata.name') | list }}"
pod_ips: "{{ (pods.stdout | from_json)['items'] | selectattr('status.podIP', 'defined') | map(attribute='status.podIP') | list }}"
pods_hostnet: |
{% set list = hostnet_pods.stdout.split(" ") %}
{{ list }}
pods_running: |
{% set list = running_pods.stdout.split(" ") %}
{{ list }}
Expand All @@ -164,24 +148,11 @@
assert:
that: item | ansible.utils.ipaddr(kube_pods_subnet)
when:
- not item in pods_hostnet
- item in pods_running
with_items: "{{ pod_ips }}"

- name: Curl between pods is working
command: "{{ bin_dir }}/kubectl -n test exec {{ item[0] }} -- curl {{ item[1] }}:8080"
when:
- not item[0] in pods_hostnet
- not item[1] in pods_hostnet
with_nested:
- "{{ pod_names }}"
- "{{ pod_ips }}"

- name: Curl between hostnet pods is working
command: "{{ bin_dir }}/kubectl -n test exec {{ item[0] }} -- curl {{ item[1] }}:8080"
when:
- item[0] in pods_hostnet
- item[1] in pods_hostnet
with_nested:
- "{{ pod_names }}"
- "{{ pod_ips }}"

0 comments on commit 6170851

Please sign in to comment.