diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index e3fe7c20..9c47af11 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -76,7 +76,7 @@ jobs: path: ansible_collections/community/kubernetes - name: Set up KinD cluster - uses: engineerd/setup-kind@v0.3.0 + uses: engineerd/setup-kind@v0.4.0 - name: Set up Python ${{ matrix.python_version }} uses: actions/setup-python@v1 diff --git a/molecule/default/converge.yml b/molecule/default/converge.yml index ebac0811..68dd56b3 100644 --- a/molecule/default/converge.yml +++ b/molecule/default/converge.yml @@ -21,6 +21,7 @@ that: (pod_list.resources | count) > 5 - include_tasks: tasks/delete.yml + - include_tasks: tasks/scale.yml - include_tasks: tasks/apply.yml - include_tasks: tasks/waiter.yml - include_tasks: tasks/full.yml diff --git a/molecule/default/tasks/apply.yml b/molecule/default/tasks/apply.yml index 1dd49e17..ca4fa284 100644 --- a/molecule/default/tasks/apply.yml +++ b/molecule/default/tasks/apply.yml @@ -403,6 +403,318 @@ that: - deploy_after_serviceaccount_removal is failed + - name: Insert new service port + k8s: + definition: + apiVersion: v1 + kind: Service + metadata: + name: apply-svc + namespace: "{{ apply_namespace }}" + spec: + selector: + app: whatever + ports: + - name: mesh + port: 8080 + targetPort: 8080 + - name: http + port: 8081 + targetPort: 8081 + apply: yes + register: k8s_service_4 + + - name: Check ports are correct + assert: + that: + - k8s_service_4 is changed + - k8s_service_4.result.spec.ports | length == 2 + - k8s_service_4.result.spec.ports[0].port == 8080 + - k8s_service_4.result.spec.ports[1].port == 8081 + + - name: Remove new service port (check mode) + k8s: + definition: + apiVersion: v1 + kind: Service + metadata: + name: apply-svc + namespace: "{{ apply_namespace }}" + spec: + selector: + app: whatever + ports: + - name: http + port: 8081 + targetPort: 8081 + apply: yes + check_mode: yes + register: k8s_service_check + + - name: Check ports are correct + assert: + that: + - k8s_service_check is changed + - k8s_service_check.result.spec.ports | length == 1 + - k8s_service_check.result.spec.ports[0].port == 8081 + + - name: Remove new service port + k8s: + definition: + apiVersion: v1 + kind: Service + metadata: + name: apply-svc + namespace: "{{ apply_namespace }}" + spec: + selector: + app: whatever + ports: + - name: http + port: 8081 + targetPort: 8081 + apply: yes + register: k8s_service_5 + + - name: Check ports are correct + assert: + that: + - k8s_service_5 is changed + - k8s_service_5.result.spec.ports | length == 1 + - k8s_service_5.result.spec.ports[0].port == 8081 + + - name: Add a serviceaccount + k8s: + definition: + apiVersion: v1 + kind: ServiceAccount + metadata: + name: apply-deploy + namespace: "{{ apply_namespace }}" + + - name: Add a deployment + k8s: + definition: + apiVersion: apps/v1 + kind: Deployment + metadata: + name: apply-deploy + namespace: "{{ apply_namespace }}" + spec: + replicas: 1 + selector: + matchLabels: + app: "{{ k8s_pod_name }}" + template: "{{ k8s_pod_template }}" + wait: yes + apply: yes + vars: + k8s_pod_name: apply-deploy + k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:v0.10.0-green + k8s_pod_service_account: apply-deploy + k8s_pod_ports: + - containerPort: 8080 + name: http + protocol: TCP + + - name: Remove the serviceaccount + k8s: + state: absent + definition: + apiVersion: v1 + kind: ServiceAccount + metadata: + name: apply-deploy + namespace: "{{ apply_namespace }}" + + - name: Update the earlier deployment + k8s: + definition: + apiVersion: apps/v1 + kind: Deployment + metadata: + name: apply-deploy + namespace: "{{ apply_namespace }}" + spec: + replicas: 2 + selector: + matchLabels: + app: "{{ k8s_pod_name }}" + template: "{{ k8s_pod_template }}" + wait: yes + apply: yes + vars: + k8s_pod_name: apply-deploy + k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:v0.10.0-purple + k8s_pod_service_account: apply-deploy + k8s_pod_ports: + - containerPort: 8080 + name: http + protocol: TCP + register: deploy_after_serviceaccount_removal + ignore_errors: yes + + - name: Ensure that updating deployment after service account removal failed + assert: + that: + - deploy_after_serviceaccount_removal is failed + + - name: Insert new service port + k8s: + definition: + apiVersion: v1 + kind: Service + metadata: + name: apply-svc + namespace: "{{ apply_namespace }}" + spec: + selector: + app: whatever + ports: + - name: mesh + port: 8080 + targetPort: 8080 + - name: http + port: 8081 + targetPort: 8081 + apply: yes + register: k8s_service_4 + + - name: Check ports are correct + assert: + that: + - k8s_service_4 is changed + - k8s_service_4.result.spec.ports | length == 2 + - k8s_service_4.result.spec.ports[0].port == 8080 + - k8s_service_4.result.spec.ports[1].port == 8081 + + - name: Remove new service port (check mode) + k8s: + definition: + apiVersion: v1 + kind: Service + metadata: + name: apply-svc + namespace: "{{ apply_namespace }}" + spec: + selector: + app: whatever + ports: + - name: http + port: 8081 + targetPort: 8081 + apply: yes + check_mode: yes + register: k8s_service_check + + - name: Check ports are correct + assert: + that: + - k8s_service_check is changed + - k8s_service_check.result.spec.ports | length == 1 + - k8s_service_check.result.spec.ports[0].port == 8081 + + - name: Remove new service port + k8s: + definition: + apiVersion: v1 + kind: Service + metadata: + name: apply-svc + namespace: "{{ apply_namespace }}" + spec: + selector: + app: whatever + ports: + - name: http + port: 8081 + targetPort: 8081 + apply: yes + register: k8s_service_5 + + - name: Check ports are correct + assert: + that: + - k8s_service_5 is changed + - k8s_service_5.result.spec.ports | length == 1 + - k8s_service_5.result.spec.ports[0].port == 8081 + + - name: Add a serviceaccount + k8s: + definition: + apiVersion: v1 + kind: ServiceAccount + metadata: + name: apply-deploy + namespace: "{{ apply_namespace }}" + + - name: Add a deployment + k8s: + definition: + apiVersion: apps/v1 + kind: Deployment + metadata: + name: apply-deploy + namespace: "{{ apply_namespace }}" + spec: + replicas: 1 + selector: + matchLabels: + app: "{{ k8s_pod_name }}" + template: "{{ k8s_pod_template }}" + wait: yes + apply: yes + vars: + k8s_pod_name: apply-deploy + k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:v0.10.0-green + k8s_pod_service_account: apply-deploy + k8s_pod_ports: + - containerPort: 8080 + name: http + protocol: TCP + + - name: Remove the serviceaccount + k8s: + state: absent + definition: + apiVersion: v1 + kind: ServiceAccount + metadata: + name: apply-deploy + namespace: "{{ apply_namespace }}" + + - name: Update the earlier deployment + k8s: + definition: + apiVersion: apps/v1 + kind: Deployment + metadata: + name: apply-deploy + namespace: "{{ apply_namespace }}" + spec: + replicas: 2 + selector: + matchLabels: + app: "{{ k8s_pod_name }}" + template: "{{ k8s_pod_template }}" + wait: yes + apply: yes + vars: + k8s_pod_name: apply-deploy + k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:v0.10.0-purple + k8s_pod_service_account: apply-deploy + k8s_pod_ports: + - containerPort: 8080 + name: http + protocol: TCP + register: deploy_after_serviceaccount_removal + ignore_errors: yes + + - name: Ensure that updating deployment after service account removal failed + assert: + that: + - deploy_after_serviceaccount_removal is failed + always: - name: Remove namespace k8s: diff --git a/molecule/default/tasks/scale.yml b/molecule/default/tasks/scale.yml new file mode 100644 index 00000000..32b718df --- /dev/null +++ b/molecule/default/tasks/scale.yml @@ -0,0 +1,210 @@ +--- +- block: + - set_fact: + scale_namespace: scale + + - name: Ensure namespace exists + k8s: + definition: + apiVersion: v1 + kind: Namespace + metadata: + name: "{{ scale_namespace }}" + + - name: Add a deployment + k8s: + definition: + apiVersion: apps/v1 + kind: Deployment + metadata: + name: scale-deploy + namespace: "{{ scale_namespace }}" + spec: + replicas: 1 + selector: + matchLabels: + app: "{{ k8s_pod_name }}" + template: "{{ k8s_pod_template }}" + wait: yes + wait_timeout: 60 + apply: yes + vars: + k8s_pod_name: scale-deploy + k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:v0.10.0-green + k8s_pod_ports: + - containerPort: 8080 + name: http + protocol: TCP + + - name: Get pods in scale-deploy + k8s_info: + kind: Pod + label_selectors: + - app=scale-deploy + namespace: "{{ scale_namespace }}" + field_selectors: + - status.phase=Running + + - name: Scale the deployment + k8s_scale: + api_version: apps/v1 + kind: Deployment + name: scale-deploy + namespace: "{{ scale_namespace }}" + replicas: 0 + wait: yes + register: scale_down + + - name: Get pods in scale-deploy + k8s_info: + kind: Pod + label_selectors: + - app=scale-deploy + namespace: "{{ scale_namespace }}" + field_selectors: + - status.phase=Running + register: scale_down_deploy_pods + until: "{{ scale_down_deploy_pods.resources | length == 0 }}" + retries: 6 + delay: 5 + + - name: Ensure that scale down took effect + assert: + that: + - scale_down is changed + - '"duration" in scale_down' + - scale_down.diff + + - name: Reapply the earlier deployment + k8s: + definition: + api_version: apps/v1 + kind: Deployment + metadata: + name: scale-deploy + namespace: "{{ scale_namespace }}" + spec: + replicas: 1 + selector: + matchLabels: + app: "{{ k8s_pod_name }}" + template: "{{ k8s_pod_template }}" + wait: yes + wait_timeout: 60 + apply: yes + vars: + k8s_pod_name: scale-deploy + k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:v0.10.0-green + k8s_pod_ports: + - containerPort: 8080 + name: http + protocol: TCP + register: reapply_after_scale + + - name: Get pods in scale-deploy + k8s_info: + kind: Pod + label_selectors: + - app=scale-deploy + namespace: "{{ scale_namespace }}" + field_selectors: + - status.phase=Running + register: scale_up_deploy_pods + + - name: Ensure that reapply after scale worked + assert: + that: + - reapply_after_scale is changed + - scale_up_deploy_pods.resources | length == 1 + + - name: Scale the deployment up + k8s_scale: + api_version: apps/v1 + kind: Deployment + name: scale-deploy + namespace: "{{ scale_namespace }}" + replicas: 2 + wait: yes + wait_timeout: 60 + register: scale_up + + - name: Get pods in scale-deploy + k8s_info: + kind: Pod + label_selectors: + - app=scale-deploy + field_selectors: + - status.phase=Running + namespace: "{{ scale_namespace }}" + register: scale_up_further_deploy_pods + + - name: Ensure that scale up worked + assert: + that: + - scale_up is changed + - '"duration" in scale_up' + - scale_up.diff + - scale_up_further_deploy_pods.resources | length == 2 + + - name: Don't scale the deployment up + k8s_scale: + api_version: apps/v1 + kind: Deployment + name: scale-deploy + namespace: "{{ scale_namespace }}" + replicas: 2 + wait: yes + register: scale_up_noop + + - name: Get pods in scale-deploy + k8s_info: + kind: Pod + label_selectors: + - app=scale-deploy + field_selectors: + - status.phase=Running + namespace: "{{ scale_namespace }}" + register: scale_up_noop_pods + + - name: Ensure that no-op scale up worked + assert: + that: + - scale_up_noop is not changed + - not scale_up_noop.diff + - scale_up_noop_pods.resources | length == 2 + - '"duration" in scale_up_noop' + + - name: Scale deployment down without wait + k8s_scale: + api_version: apps/v1 + kind: Deployment + name: scale-deploy + namespace: "{{ scale_namespace }}" + replicas: 1 + wait: no + register: scale_down_no_wait + + - name: Ensure that scale down succeeds + k8s_info: + kind: Pod + label_selectors: + - app=scale-deploy + namespace: "{{ scale_namespace }}" + register: scale_down_no_wait_pods + retries: 6 + delay: 5 + until: "{{ scale_down_no_wait_pods.resources | length == 1 }}" + + - name: Ensure that scale down without wait worked + assert: + that: + - scale_down_no_wait is changed + - scale_down_no_wait.diff + - scale_down_no_wait_pods.resources | length == 1 + + always: + - name: Remove namespace + k8s: + kind: Namespace + name: "{{ scale_namespace }}" + state: absent diff --git a/molecule/default/tasks/waiter.yml b/molecule/default/tasks/waiter.yml index 92d1adc7..5434e967 100644 --- a/molecule/default/tasks/waiter.yml +++ b/molecule/default/tasks/waiter.yml @@ -119,6 +119,8 @@ namespace: "{{ wait_namespace }}" label_selectors: - app=wait-ds + field_selectors: + - status.phase=Running register: updated_ds_pods - name: Check that daemonset wait worked @@ -242,13 +244,17 @@ namespace: "{{ wait_namespace }}" label_selectors: - app=wait-deploy + field_selectors: + - status.phase=Running register: updated_deploy_pods + until: "{{ updated_deploy_pods.resources[0].spec.containers[0].image.endswith(':2') }}" + retries: 6 + delay: 5 - name: Check that deployment wait worked assert: that: - deploy.result.status.availableReplicas == deploy.result.status.replicas - - updated_deploy_pods.resources[0].spec.containers[0].image.endswith(":2") - name: Pause a deployment k8s: diff --git a/plugins/module_utils/common.py b/plugins/module_utils/common.py index 7c88f5be..f9562011 100644 --- a/plugins/module_utils/common.py +++ b/plugins/module_utils/common.py @@ -19,7 +19,8 @@ __metaclass__ = type import copy -import json +from datetime import datetime +import time import os import traceback @@ -34,7 +35,7 @@ import kubernetes import openshift from openshift.dynamic import DynamicClient - from openshift.dynamic.exceptions import ResourceNotFoundError, ResourceNotUniqueError + from openshift.dynamic.exceptions import ResourceNotFoundError, ResourceNotUniqueError, NotFoundError HAS_K8S_MODULE_HELPER = True k8s_import_exception = None except ImportError as e: @@ -291,3 +292,90 @@ def execute_module(self): def fail(self, msg=None): self.fail_json(msg=msg) + + def _wait_for(self, resource, name, namespace, predicate, sleep, timeout, state): + start = datetime.now() + + def _wait_for_elapsed(): + return (datetime.now() - start).seconds + + response = None + while _wait_for_elapsed() < timeout: + try: + response = resource.get(name=name, namespace=namespace) + if predicate(response): + if response: + return True, response.to_dict(), _wait_for_elapsed() + else: + return True, {}, _wait_for_elapsed() + time.sleep(sleep) + except NotFoundError: + if state == 'absent': + return True, {}, _wait_for_elapsed() + if response: + response = response.to_dict() + return False, response, _wait_for_elapsed() + + def wait(self, resource, definition, sleep, timeout, state='present', condition=None): + + def _deployment_ready(deployment): + # FIXME: frustratingly bool(deployment.status) is True even if status is empty + # Furthermore deployment.status.availableReplicas == deployment.status.replicas == None if status is empty + # deployment.status.replicas is None is perfectly ok if desired replicas == 0 + # Scaling up means that we also need to check that we're not in a + # situation where status.replicas == status.availableReplicas + # but spec.replicas != status.replicas + return (deployment.status and + deployment.spec.replicas == (deployment.status.replicas or 0) and + deployment.status.availableReplicas == deployment.status.replicas and + deployment.status.observedGeneration == deployment.metadata.generation and + not deployment.status.unavailableReplicas) + + def _pod_ready(pod): + return (pod.status and pod.status.containerStatuses is not None and + all([container.ready for container in pod.status.containerStatuses])) + + def _daemonset_ready(daemonset): + return (daemonset.status and daemonset.status.desiredNumberScheduled is not None and + daemonset.status.numberReady == daemonset.status.desiredNumberScheduled and + daemonset.status.observedGeneration == daemonset.metadata.generation and + not daemonset.status.unavailableReplicas) + + def _custom_condition(resource): + if not resource.status or not resource.status.conditions: + return False + match = [x for x in resource.status.conditions if x.type == condition['type']] + if not match: + return False + # There should never be more than one condition of a specific type + match = match[0] + if match.status == 'Unknown': + if match.status == condition['status']: + if 'reason' not in condition: + return True + if condition['reason']: + return match.reason == condition['reason'] + return False + status = True if match.status == 'True' else False + if status == condition['status']: + if condition.get('reason'): + return match.reason == condition['reason'] + return True + return False + + def _resource_absent(resource): + return not resource + + waiter = dict( + Deployment=_deployment_ready, + DaemonSet=_daemonset_ready, + Pod=_pod_ready + ) + kind = definition['kind'] + if state == 'present' and not condition: + predicate = waiter.get(kind, lambda x: x) + elif state == 'present' and condition: + predicate = _custom_condition + else: + predicate = _resource_absent + return self._wait_for(resource, definition['metadata']['name'], definition['metadata'].get('namespace'), predicate, sleep, timeout, state) diff --git a/plugins/module_utils/raw.py b/plugins/module_utils/raw.py index 16dafaab..775deef7 100644 --- a/plugins/module_utils/raw.py +++ b/plugins/module_utils/raw.py @@ -20,9 +20,7 @@ __metaclass__ = type import copy -from datetime import datetime from distutils.version import LooseVersion -import time import sys import traceback @@ -442,85 +440,3 @@ def create_project_request(self, definition): result['changed'] = True result['method'] = 'create' return result - - def _wait_for(self, resource, name, namespace, predicate, sleep, timeout, state): - start = datetime.now() - - def _wait_for_elapsed(): - return (datetime.now() - start).seconds - - response = None - while _wait_for_elapsed() < timeout: - try: - response = resource.get(name=name, namespace=namespace) - if predicate(response): - if response: - return True, response.to_dict(), _wait_for_elapsed() - else: - return True, {}, _wait_for_elapsed() - time.sleep(sleep) - except NotFoundError: - if state == 'absent': - return True, {}, _wait_for_elapsed() - if response: - response = response.to_dict() - return False, response, _wait_for_elapsed() - - def wait(self, resource, definition, sleep, timeout, state='present', condition=None): - - def _deployment_ready(deployment): - # FIXME: frustratingly bool(deployment.status) is True even if status is empty - # Furthermore deployment.status.availableReplicas == deployment.status.replicas == None if status is empty - return (deployment.status and deployment.status.replicas is not None and - deployment.status.availableReplicas == deployment.status.replicas and - deployment.status.observedGeneration == deployment.metadata.generation and - not deployment.status.unavailableReplicas) - - def _pod_ready(pod): - return (pod.status and pod.status.containerStatuses is not None and - all([container.ready for container in pod.status.containerStatuses])) - - def _daemonset_ready(daemonset): - return (daemonset.status and daemonset.status.desiredNumberScheduled is not None and - daemonset.status.numberReady == daemonset.status.desiredNumberScheduled and - daemonset.status.observedGeneration == daemonset.metadata.generation and - not daemonset.status.unavailableReplicas) - - def _custom_condition(resource): - if not resource.status or not resource.status.conditions: - return False - match = [x for x in resource.status.conditions if x.type == condition['type']] - if not match: - return False - # There should never be more than one condition of a specific type - match = match[0] - if match.status == 'Unknown': - if match.status == condition['status']: - if 'reason' not in condition: - return True - if condition['reason']: - return match.reason == condition['reason'] - return False - status = True if match.status == 'True' else False - if status == condition['status']: - if condition.get('reason'): - return match.reason == condition['reason'] - return True - return False - - def _resource_absent(resource): - return not resource - - waiter = dict( - Deployment=_deployment_ready, - DaemonSet=_daemonset_ready, - Pod=_pod_ready - ) - kind = definition['kind'] - if state == 'present' and not condition: - predicate = waiter.get(kind, lambda x: x) - elif state == 'present' and condition: - predicate = _custom_condition - else: - predicate = _resource_absent - return self._wait_for(resource, definition['metadata']['name'], definition['metadata'].get('namespace'), predicate, sleep, timeout, state) diff --git a/plugins/module_utils/scale.py b/plugins/module_utils/scale.py index 1c153952..27f1abe0 100644 --- a/plugins/module_utils/scale.py +++ b/plugins/module_utils/scale.py @@ -20,8 +20,6 @@ __metaclass__ = type import copy -import math -import time from ansible_collections.community.kubernetes.plugins.module_utils.common import AUTH_ARG_SPEC, COMMON_ARG_SPEC from ansible_collections.community.kubernetes.plugins.module_utils.common import KubernetesAnsibleModule @@ -29,17 +27,9 @@ try: import yaml - from openshift.dynamic.client import ResourceInstance from openshift.dynamic.exceptions import NotFoundError except ImportError: pass -try: - from openshift import watch -except ImportError: - try: - from openshift.dynamic.client import watch - except ImportError: - pass SCALE_ARG_SPEC = { @@ -112,7 +102,9 @@ def execute_module(self): wait_time = self.params.get('wait_timeout') existing = None existing_count = None - return_attributes = dict(changed=False, result=dict()) + return_attributes = dict(changed=False, result=dict(), diff=dict()) + if wait: + return_attributes['duration'] = 0 resource = self.find_resource(kind, api_version, fail=True) @@ -142,10 +134,9 @@ def execute_module(self): if not self.check_mode: if self.kind == 'job': existing.spec.parallelism = replicas - k8s_obj = resource.patch(existing.to_dict()) + return_attributes['result'] = resource.patch(existing.to_dict()).to_dict() else: - k8s_obj = self.scale(resource, existing, replicas, wait, wait_time) - return_attributes['result'] = k8s_obj.to_dict() + return_attributes = self.scale(resource, existing, replicas, wait, wait_time) self.exit_json(**return_attributes) @@ -161,86 +152,31 @@ def argspec(self): def scale(self, resource, existing_object, replicas, wait, wait_time): name = existing_object.metadata.name namespace = existing_object.metadata.namespace + kind = existing_object.kind if not hasattr(resource, 'scale'): self.fail_json( msg="Cannot perform scale on resource of kind {0}".format(resource.kind) ) - scale_obj = {'metadata': {'name': name, 'namespace': namespace}, 'spec': {'replicas': replicas}} + scale_obj = {'kind': kind, 'metadata': {'name': name, 'namespace': namespace}, 'spec': {'replicas': replicas}} - return_obj = None - stream = None - - if wait: - w, stream = self._create_stream(resource, namespace, wait_time) + existing = resource.get(name=name, namespace=namespace) try: resource.scale.patch(body=scale_obj) except Exception as exc: - self.fail_json( - msg="Scale request failed: {0}".format(exc) - ) + self.fail_json(msg="Scale request failed: {0}".format(exc)) - if wait and stream is not None: - return_obj = self._read_stream(resource, w, stream, name, replicas) - - if not return_obj: - return_obj = self._wait_for_response(resource, name, namespace) - - return return_obj - - def _create_stream(self, resource, namespace, wait_time): - """ Create a stream of events for the object """ - w = None - stream = None - w = watch.Watch() - w._api_client = self.client.client - if namespace: - stream = w.stream(resource.get, serialize=False, namespace=namespace, timeout_seconds=wait_time) - else: - stream = w.stream(resource.get, serialize=False, namespace=namespace, timeout_seconds=wait_time) - return w, stream - - def _read_stream(self, resource, watcher, stream, name, replicas): - """ Wait for ready_replicas to equal the requested number of replicas. """ - return_obj = None - try: - for event in stream: - if event.get('object'): - obj = ResourceInstance(resource, event['object']) - if obj.metadata.name == name and hasattr(obj, 'status'): - if replicas == 0: - if not hasattr(obj.status, 'readyReplicas') or not obj.status.readyReplicas: - return_obj = obj - watcher.stop() - break - if hasattr(obj.status, 'readyReplicas') and obj.status.readyReplicas == replicas: - return_obj = obj - watcher.stop() - break - except Exception as exc: - self.fail_json(msg="Exception reading event stream: {0}".format(exc)) - - if not return_obj: - self.fail_json(msg="Error fetching the patched object. Try a higher wait_timeout value.") - if replicas and return_obj.status.readyReplicas is None: - self.fail_json(msg="Failed to fetch the number of ready replicas. Try a higher wait_timeout value.") - if replicas and return_obj.status.readyReplicas != replicas: - self.fail_json(msg="Number of ready replicas is {0}. Failed to reach {1} ready replicas within " - "the wait_timeout period.".format(return_obj.status.ready_replicas, replicas)) - return return_obj - - def _wait_for_response(self, resource, name, namespace): - """ Wait for an API response """ - tries = 0 - half = math.ceil(20 / 2) - obj = None - - while tries <= half: - obj = resource.get(name=name, namespace=namespace) - if obj: - break - tries += 2 - time.sleep(2) - return obj + k8s_obj = resource.get(name=name, namespace=namespace).to_dict() + match, diffs = self.diff_objects(existing.to_dict(), k8s_obj) + result = dict() + result['result'] = k8s_obj + result['changed'] = not match + result['diff'] = diffs + + if wait: + success, result['result'], result['duration'] = self.wait(resource, scale_obj, 5, wait_time) + if not success: + self.fail_json(msg="Resource scaling timed out", **result) + return result diff --git a/plugins/modules/k8s_scale.py b/plugins/modules/k8s_scale.py index 600ce3cc..cf3a0c39 100644 --- a/plugins/modules/k8s_scale.py +++ b/plugins/modules/k8s_scale.py @@ -111,6 +111,11 @@ description: Current status details for the object. returned: success type: complex + duration: + description: elapsed time of task in seconds + returned: when C(wait) is true + type: int + sample: 48 ''' from ansible_collections.community.kubernetes.plugins.module_utils.scale import KubernetesAnsibleScaleModule