From 074f0a6555411afa67316715af1076af14c5f0fc Mon Sep 17 00:00:00 2001 From: abikouo <79859644+abikouo@users.noreply.github.com> Date: Wed, 23 Mar 2022 10:26:06 +0100 Subject: [PATCH] fix issue when using k8s_drain with disable_eviction set to yes (#418) fix issue when using k8s_drain with disable_eviction set to yes SUMMARY fixes #416 ISSUE TYPE Bugfix Pull Request COMPONENT NAME k8s_drain ADDITIONAL INFORMATION Reviewed-by: Abhijeet Kasurde --- .../417-fix-k8s-drain-delete-options.yaml | 3 + plugins/modules/k8s_drain.py | 24 +++---- tests/integration/targets/k8s_drain/aliases | 2 +- .../targets/k8s_drain/tasks/main.yml | 66 +++++++++++++++++++ 4 files changed, 78 insertions(+), 17 deletions(-) create mode 100644 changelogs/fragments/417-fix-k8s-drain-delete-options.yaml diff --git a/changelogs/fragments/417-fix-k8s-drain-delete-options.yaml b/changelogs/fragments/417-fix-k8s-drain-delete-options.yaml new file mode 100644 index 0000000000..c897c7e09d --- /dev/null +++ b/changelogs/fragments/417-fix-k8s-drain-delete-options.yaml @@ -0,0 +1,3 @@ +--- +bugfixes: + - k8s_drain - fix error occurring when trying to drain node with disable_eviction set to yes (https://github.com/ansible-collections/kubernetes.core/issues/416). diff --git a/plugins/modules/k8s_drain.py b/plugins/modules/k8s_drain.py index 11b7dbcd34..af3c69077f 100644 --- a/plugins/modules/k8s_drain.py +++ b/plugins/modules/k8s_drain.py @@ -140,7 +140,7 @@ try: from kubernetes.client.api import core_v1_api - from kubernetes.client.models import V1DeleteOptions + from kubernetes.client.models import V1DeleteOptions, V1ObjectMeta from kubernetes.client.exceptions import ApiException except ImportError: # ImportError are managed by the common module already. @@ -273,15 +273,8 @@ def __init__(self, module): self._drain_options = module.params.get("delete_options", {}) self._delete_options = None if self._drain_options.get("terminate_grace_period"): - self._delete_options = {} - self._delete_options.update({"apiVersion": "v1"}) - self._delete_options.update({"kind": "DeleteOptions"}) - self._delete_options.update( - { - "gracePeriodSeconds": self._drain_options.get( - "terminate_grace_period" - ) - } + self._delete_options = V1DeleteOptions( + grace_period_seconds=self._drain_options.get("terminate_grace_period") ) self._changed = False @@ -318,17 +311,16 @@ def _elapsed_time(): def evict_pods(self, pods): for namespace, name in pods: - definition = {"metadata": {"name": name, "namespace": namespace}} - if self._delete_options: - definition.update({"delete_options": self._delete_options}) try: if self._drain_options.get("disable_eviction"): - body = V1DeleteOptions(**definition) self._api_instance.delete_namespaced_pod( - name=name, namespace=namespace, body=body + name=name, namespace=namespace, body=self._delete_options ) else: - body = v1_eviction(**definition) + body = v1_eviction( + delete_options=self._delete_options, + metadata=V1ObjectMeta(name=name, namespace=namespace), + ) self._api_instance.create_namespaced_pod_eviction( name=name, namespace=namespace, body=body ) diff --git a/tests/integration/targets/k8s_drain/aliases b/tests/integration/targets/k8s_drain/aliases index 476fde2bdb..4e6d338db8 100644 --- a/tests/integration/targets/k8s_drain/aliases +++ b/tests/integration/targets/k8s_drain/aliases @@ -1,4 +1,4 @@ k8s_drain k8s k8s_info -time=78 +time=121 diff --git a/tests/integration/targets/k8s_drain/tasks/main.yml b/tests/integration/targets/k8s_drain/tasks/main.yml index 0eae3c7276..f16f8affc4 100644 --- a/tests/integration/targets/k8s_drain/tasks/main.yml +++ b/tests/integration/targets/k8s_drain/tasks/main.yml @@ -286,6 +286,72 @@ state: uncordon name: '{{ node_to_drain }}' + - name: Create another Pod + k8s: + namespace: '{{ test_namespace }}' + wait: yes + wait_timeout: "{{ k8s_wait_timeout | default(omit) }}" + definition: + apiVersion: v1 + kind: Pod + metadata: + name: '{{ drain_pod_name }}-01' + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchFields: + - key: metadata.name + operator: In + values: + - '{{ node_to_drain }}' + containers: + - name: c0 + image: busybox + command: + - /bin/sh + - -c + - while true;do date;sleep 5; done + volumeMounts: + - mountPath: /emptydir + name: emptydir + volumes: + - name: emptydir + emptyDir: {} + + - name: Drain node using disable_eviction set to yes + k8s_drain: + state: drain + name: '{{ node_to_drain }}' + delete_options: + force: true + disable_eviction: yes + terminate_grace_period: 0 + ignore_daemonsets: yes + wait_timeout: 0 + delete_emptydir_data: true + register: disable_evict + + - name: assert that node has been drained + assert: + that: + - disable_evict is changed + - '"node {{ node_to_drain }} marked unschedulable." in disable_evict.result' + + - name: assert that unmanaged pod were deleted + k8s_info: + namespace: '{{ test_namespace }}' + kind: Pod + name: '{{ drain_pod_name }}-01' + register: _result + failed_when: _result.resources + + - name: Uncordon node + k8s_drain: + state: uncordon + name: '{{ node_to_drain }}' + always: - name: Uncordon node k8s_drain: